id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
17,915
from typing import List from packaging import version from sklearn.metrics import f1_score import datasets from datasets.config import PY_VERSION try: from jiwer import transforms as tr _jiwer_available = True except ImportError: _jiwer_available = False def wer_and_cer(preds, labels, concatenate_texts, config_name): try: from jiwer import compute_measures except ImportError: raise ValueError( f"jiwer has to be installed in order to apply the wer metric for {config_name}." "You can install it via `pip install jiwer`." ) if concatenate_texts: wer = compute_measures(labels, preds)["wer"] cer = compute_measures(labels, preds, truth_transform=cer_transform, hypothesis_transform=cer_transform)["wer"] return {"wer": wer, "cer": cer} else: def compute_score(preds, labels, score_type="wer"): incorrect = 0 total = 0 for prediction, reference in zip(preds, labels): if score_type == "wer": measures = compute_measures(reference, prediction) elif score_type == "cer": measures = compute_measures( reference, prediction, truth_transform=cer_transform, hypothesis_transform=cer_transform ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total return {"wer": compute_score(preds, labels, "wer"), "cer": compute_score(preds, labels, "cer")}
null
17,916
import argparse import json import re import string import sys import numpy as np def compute_precision_recall(predictions, ground_truths, qa_id): tp, fp, fn = 0, 0, 0 substr_ok = "Parties" in qa_id # first check if ground truth is empty if len(ground_truths) == 0: if len(predictions) > 0: fp += len(predictions) # false positive for each one else: for ground_truth in ground_truths: assert len(ground_truth) > 0 # check if there is a match match_found = False for pred in predictions: if substr_ok: is_match = get_jaccard(pred, ground_truth) >= IOU_THRESH or ground_truth in pred else: is_match = get_jaccard(pred, ground_truth) >= IOU_THRESH if is_match: match_found = True if match_found: tp += 1 else: fn += 1 # now also get any fps by looping through preds for pred in predictions: # Check if there's a match. if so, don't count (don't want to double count based on the above) # but if there's no match, then this is a false positive. # (Note: we get the true positives in the above loop instead of this loop so that we don't double count # multiple predictions that are matched with the same answer.) match_found = False for ground_truth in ground_truths: assert len(ground_truth) > 0 if substr_ok: is_match = get_jaccard(pred, ground_truth) >= IOU_THRESH or ground_truth in pred else: is_match = get_jaccard(pred, ground_truth) >= IOU_THRESH if is_match: match_found = True if not match_found: fp += 1 precision = tp / (tp + fp) if tp + fp > 0 else np.nan recall = tp / (tp + fn) if tp + fn > 0 else np.nan return precision, recall def get_aupr(precisions, recalls): processed_precisions = process_precisions(precisions) aupr = np.trapz(processed_precisions, recalls) if np.isnan(aupr): return 0 return aupr def get_prec_at_recall(precisions, recalls, recall_thresh): """Assumes recalls are sorted in increasing order""" processed_precisions = process_precisions(precisions) prec_at_recall = 0 for prec, recall in zip(processed_precisions, recalls): if recall >= recall_thresh: prec_at_recall = prec break return prec_at_recall def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def metric_max_over_ground_truths(metric_fn, predictions, ground_truths): score = 0 for pred in predictions: for ground_truth in ground_truths: score = metric_fn(pred, ground_truth) if score == 1: # break the loop when one prediction matches the ground truth break if score == 1: break return score def evaluate(dataset, predictions): f1 = exact_match = total = 0 precisions = [] recalls = [] for article in dataset: for paragraph in article["paragraphs"]: for qa in paragraph["qas"]: total += 1 if qa["id"] not in predictions: message = "Unanswered question " + qa["id"] + " will receive score 0." print(message, file=sys.stderr) continue ground_truths = [x["text"] for x in qa["answers"]] prediction = predictions[qa["id"]] precision, recall = compute_precision_recall(prediction, ground_truths, qa["id"]) precisions.append(precision) recalls.append(recall) if precision == 0 and recall == 0: f1 += 0 else: f1 += 2 * (precision * recall) / (precision + recall) exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) precisions = [x for _, x in sorted(zip(recalls, precisions))] recalls.sort() f1 = 100.0 * f1 / total exact_match = 100.0 * exact_match / total aupr = get_aupr(precisions, recalls) prec_at_90_recall = get_prec_at_recall(precisions, recalls, recall_thresh=0.9) prec_at_80_recall = get_prec_at_recall(precisions, recalls, recall_thresh=0.8) return { "exact_match": exact_match, "f1": f1, "aupr": aupr, "prec_at_80_recall": prec_at_80_recall, "prec_at_90_recall": prec_at_90_recall, }
null
17,917
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import f1_score import datasets def simple_accuracy(preds, labels): return float((preds == labels).mean()) def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = float(f1_score(y_true=labels, y_pred=preds)) return { "accuracy": acc, "f1": f1, }
null
17,918
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import f1_score import datasets def precision_at_10(en_sentvecs, in_sentvecs): en_sentvecs = np.array(en_sentvecs) in_sentvecs = np.array(in_sentvecs) n = en_sentvecs.shape[0] # mean centering en_sentvecs = en_sentvecs - np.mean(en_sentvecs, axis=0) in_sentvecs = in_sentvecs - np.mean(in_sentvecs, axis=0) sim = cdist(en_sentvecs, in_sentvecs, "cosine") actual = np.array(range(n)) preds = sim.argsort(axis=1)[:, :10] matches = np.any(preds == actual[:, None], axis=1) return float(matches.mean())
null
17,919
import argparse import collections import json import os import re import string import sys import numpy as np def parse_args(): parser = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.") parser.add_argument("data_file", metavar="data.json", help="Input data JSON file.") parser.add_argument("pred_file", metavar="pred.json", help="Model predictions.") parser.add_argument( "--out-file", "-o", metavar="eval.json", help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file", "-n", metavar="na_prob.json", help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh", "-t", type=float, default=1.0, help='Predict "" if no-answer probability exceeds this (default = 1.0).', ) parser.add_argument( "--out-image-dir", "-p", metavar="out_images", default=None, help="Save precision-recall curves to directory." ) parser.add_argument("--verbose", "-v", action="store_true") if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args()
null
17,920
import argparse import collections import json import os import re import string import sys import numpy as np def make_qid_to_has_ans(dataset): qid_to_has_ans = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: qid_to_has_ans[qa["id"]] = bool(qa["answers"]["text"]) return qid_to_has_ans
null
17,921
import argparse import collections import json import os import re import string import sys import numpy as np def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return ARTICLES_REGEX.sub(" ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(dataset, preds): exact_scores = {} f1_scores = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: qid = qa["id"] gold_answers = [t for t in qa["answers"]["text"] if normalize_answer(t)] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [""] if qid not in preds: print(f"Missing prediction for {qid}") continue a_pred = preds[qid] # Take max over all gold answers exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) return exact_scores, f1_scores
null
17,922
import argparse import collections import json import os import re import string import sys import numpy as np def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores
null
17,923
import argparse import collections import json import os import re import string import sys import numpy as np def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values()) / total), ("f1", 100.0 * sum(f1_scores.values()) / total), ("total", total), ] ) else: total = len(qid_list) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total), ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total), ("total", total), ] )
null
17,924
import argparse import collections import json import os import re import string import sys import numpy as np def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval[f"{prefix}_{k}"] = new_eval[k] def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None): qid_list = sorted(na_probs, key=lambda k: na_probs[k]) true_pos = 0.0 cur_p = 1.0 cur_r = 0.0 precisions = [1.0] recalls = [0.0] avg_prec = 0.0 for i, qid in enumerate(qid_list): if qid_to_has_ans[qid]: true_pos += scores[qid] cur_p = true_pos / float(i + 1) cur_r = true_pos / float(num_true_pos) if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(cur_p) recalls.append(cur_r) if out_image: plot_pr_curve(precisions, recalls, out_image, title) return {"ap": 100.0 * avg_prec} def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): if out_image_dir and not os.path.exists(out_image_dir): os.makedirs(out_image_dir) num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return pr_exact = make_precision_recall_eval( exact_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_exact.png"), title="Precision-Recall curve for Exact Match score", ) pr_f1 = make_precision_recall_eval( f1_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_f1.png"), title="Precision-Recall curve for F1 score", ) oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} pr_oracle = make_precision_recall_eval( oracle_scores, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_oracle.png"), title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)", ) merge_eval(main_eval, pr_exact, "pr_exact") merge_eval(main_eval, pr_f1, "pr_f1") merge_eval(main_eval, pr_oracle, "pr_oracle")
null
17,925
import argparse import collections import json import os import re import string import sys import numpy as np def histogram_na_prob(na_probs, qid_list, image_dir, name): if not qid_list: return x = [na_probs[k] for k in qid_list] weights = np.ones_like(x) / float(len(x)) plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0)) plt.xlabel("Model probability of no-answer") plt.ylabel("Proportion of dataset") plt.title(f"Histogram of no-answer probability: {name}") plt.savefig(os.path.join(image_dir, f"na_prob_hist_{name}.png")) plt.clf()
null
17,926
import argparse import collections import json import os import re import string import sys import numpy as np def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh
null
17,927
import argparse import json import re import string import sys from collections import Counter def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) return max(scores_for_ground_truths) def evaluate(dataset, predictions): f1 = exact_match = total = 0 correct_ids = [] for passage in dataset: for qa in passage["qas"]: total += 1 if qa["id"] not in predictions: message = f'Unanswered question {qa["id"]} will receive score 0.' print(message, file=sys.stderr) continue ground_truths = [x["text"] for x in qa["answers"]] prediction = predictions[qa["id"]] _exact_match = metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) if int(_exact_match) == 1: correct_ids.append(qa["id"]) exact_match += _exact_match f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) exact_match = exact_match / total f1 = f1 / total return {"exact_match": exact_match, "f1": f1}, correct_ids
null
17,928
from sklearn.metrics import f1_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record def simple_accuracy(preds, labels): return float((preds == labels).mean()) def acc_and_f1(preds, labels, f1_avg="binary"): acc = simple_accuracy(preds, labels) f1 = float(f1_score(y_true=labels, y_pred=preds, average=f1_avg)) return { "accuracy": acc, "f1": f1, }
null
17,929
from sklearn.metrics import f1_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record The provided code snippet includes necessary dependencies for implementing the `evaluate_multirc` function. Write a Python function `def evaluate_multirc(ids_preds, labels)` to solve the following problem: Computes F1 score and Exact Match for MultiRC predictions. Here is the function: def evaluate_multirc(ids_preds, labels): """ Computes F1 score and Exact Match for MultiRC predictions. """ question_map = {} for id_pred, label in zip(ids_preds, labels): question_id = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}' pred = id_pred["prediction"] if question_id in question_map: question_map[question_id].append((pred, label)) else: question_map[question_id] = [(pred, label)] f1s, ems = [], [] for question, preds_labels in question_map.items(): question_preds, question_labels = zip(*preds_labels) f1 = f1_score(y_true=question_labels, y_pred=question_preds, average="macro") f1s.append(f1) em = int(sum(pred == label for pred, label in preds_labels) == len(preds_labels)) ems.append(em) f1_m = float(sum(f1s) / len(f1s)) em = sum(ems) / len(ems) f1_a = float(f1_score(y_true=labels, y_pred=[id_pred["prediction"] for id_pred in ids_preds])) return {"exact_match": em, "f1_m": f1_m, "f1_a": f1_a}
Computes F1 score and Exact Match for MultiRC predictions.
17,930
from typing import Dict, Optional import numpy as np import datasets def total_intersect_and_union( results, gt_seg_maps, num_labels, ignore_index: bool, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): """Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair. Args: results (`ndarray`): List of prediction segmentation maps, each of shape (height, width). gt_seg_maps (`ndarray`): List of ground truth segmentation maps, each of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: total_area_intersect (`ndarray`): The intersection of prediction and ground truth histogram on all classes. total_area_union (`ndarray`): The union of prediction and ground truth histogram on all classes. total_area_pred_label (`ndarray`): The prediction histogram on all classes. total_area_label (`ndarray`): The ground truth histogram on all classes. """ total_area_intersect = np.zeros((num_labels,), dtype=np.float64) total_area_union = np.zeros((num_labels,), dtype=np.float64) total_area_pred_label = np.zeros((num_labels,), dtype=np.float64) total_area_label = np.zeros((num_labels,), dtype=np.float64) for result, gt_seg_map in zip(results, gt_seg_maps): area_intersect, area_union, area_pred_label, area_label = intersect_and_union( result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label The provided code snippet includes necessary dependencies for implementing the `mean_iou` function. Write a Python function `def mean_iou( results, gt_seg_maps, num_labels, ignore_index: bool, nan_to_num: Optional[int] = None, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, )` to solve the following problem: Calculate Mean Intersection and Union (mIoU). Args: results (`ndarray`): List of prediction segmentation maps, each of shape (height, width). gt_seg_maps (`ndarray`): List of ground truth segmentation maps, each of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Here is the function: def mean_iou( results, gt_seg_maps, num_labels, ignore_index: bool, nan_to_num: Optional[int] = None, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): """Calculate Mean Intersection and Union (mIoU). Args: results (`ndarray`): List of prediction segmentation maps, each of shape (height, width). gt_seg_maps (`ndarray`): List of ground truth segmentation maps, each of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. """ total_area_intersect, total_area_union, total_area_pred_label, total_area_label = total_intersect_and_union( results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels ) # compute metrics metrics = {} all_acc = total_area_intersect.sum() / total_area_label.sum() iou = total_area_intersect / total_area_union acc = total_area_intersect / total_area_label metrics["mean_iou"] = np.nanmean(iou) metrics["mean_accuracy"] = np.nanmean(acc) metrics["overall_accuracy"] = all_acc metrics["per_category_iou"] = iou metrics["per_category_accuracy"] = acc if nan_to_num is not None: metrics = {metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()} return metrics
Calculate Mean Intersection and Union (mIoU). Args: results (`ndarray`): List of prediction segmentation maps, each of shape (height, width). gt_seg_maps (`ndarray`): List of ground truth segmentation maps, each of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU.
17,931
from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets def SARIngram(sgrams, cgrams, rgramslist, numref): rgramsall = [rgram for rgrams in rgramslist for rgram in rgrams] rgramcounter = Counter(rgramsall) sgramcounter = Counter(sgrams) sgramcounter_rep = Counter() for sgram, scount in sgramcounter.items(): sgramcounter_rep[sgram] = scount * numref cgramcounter = Counter(cgrams) cgramcounter_rep = Counter() for cgram, ccount in cgramcounter.items(): cgramcounter_rep[cgram] = ccount * numref # KEEP keepgramcounter_rep = sgramcounter_rep & cgramcounter_rep keepgramcountergood_rep = keepgramcounter_rep & rgramcounter keepgramcounterall_rep = sgramcounter_rep & rgramcounter keeptmpscore1 = 0 keeptmpscore2 = 0 for keepgram in keepgramcountergood_rep: keeptmpscore1 += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscore2 += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. keepscore_precision = 1 keepscore_recall = 1 if len(keepgramcounter_rep) > 0: keepscore_precision = keeptmpscore1 / len(keepgramcounter_rep) if len(keepgramcounterall_rep) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) keepscore_recall = keeptmpscore2 / sum(keepgramcounterall_rep.values()) keepscore = 0 if keepscore_precision > 0 or keepscore_recall > 0: keepscore = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION delgramcounter_rep = sgramcounter_rep - cgramcounter_rep delgramcountergood_rep = delgramcounter_rep - rgramcounter delgramcounterall_rep = sgramcounter_rep - rgramcounter deltmpscore1 = 0 deltmpscore2 = 0 for delgram in delgramcountergood_rep: deltmpscore1 += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscore2 += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. delscore_precision = 1 if len(delgramcounter_rep) > 0: delscore_precision = deltmpscore1 / len(delgramcounter_rep) # ADDITION addgramcounter = set(cgramcounter) - set(sgramcounter) addgramcountergood = set(addgramcounter) & set(rgramcounter) addgramcounterall = set(rgramcounter) - set(sgramcounter) addtmpscore = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. addscore_precision = 1 addscore_recall = 1 if len(addgramcounter) > 0: addscore_precision = addtmpscore / len(addgramcounter) if len(addgramcounterall) > 0: addscore_recall = addtmpscore / len(addgramcounterall) addscore = 0 if addscore_precision > 0 or addscore_recall > 0: addscore = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def SARIsent(ssent, csent, rsents): numref = len(rsents) s1grams = ssent.split(" ") c1grams = csent.split(" ") s2grams = [] c2grams = [] s3grams = [] c3grams = [] s4grams = [] c4grams = [] r1gramslist = [] r2gramslist = [] r3gramslist = [] r4gramslist = [] for rsent in rsents: r1grams = rsent.split(" ") r2grams = [] r3grams = [] r4grams = [] r1gramslist.append(r1grams) for i in range(0, len(r1grams) - 1): if i < len(r1grams) - 1: r2gram = r1grams[i] + " " + r1grams[i + 1] r2grams.append(r2gram) if i < len(r1grams) - 2: r3gram = r1grams[i] + " " + r1grams[i + 1] + " " + r1grams[i + 2] r3grams.append(r3gram) if i < len(r1grams) - 3: r4gram = r1grams[i] + " " + r1grams[i + 1] + " " + r1grams[i + 2] + " " + r1grams[i + 3] r4grams.append(r4gram) r2gramslist.append(r2grams) r3gramslist.append(r3grams) r4gramslist.append(r4grams) for i in range(0, len(s1grams) - 1): if i < len(s1grams) - 1: s2gram = s1grams[i] + " " + s1grams[i + 1] s2grams.append(s2gram) if i < len(s1grams) - 2: s3gram = s1grams[i] + " " + s1grams[i + 1] + " " + s1grams[i + 2] s3grams.append(s3gram) if i < len(s1grams) - 3: s4gram = s1grams[i] + " " + s1grams[i + 1] + " " + s1grams[i + 2] + " " + s1grams[i + 3] s4grams.append(s4gram) for i in range(0, len(c1grams) - 1): if i < len(c1grams) - 1: c2gram = c1grams[i] + " " + c1grams[i + 1] c2grams.append(c2gram) if i < len(c1grams) - 2: c3gram = c1grams[i] + " " + c1grams[i + 1] + " " + c1grams[i + 2] c3grams.append(c3gram) if i < len(c1grams) - 3: c4gram = c1grams[i] + " " + c1grams[i + 1] + " " + c1grams[i + 2] + " " + c1grams[i + 3] c4grams.append(c4gram) (keep1score, del1score, add1score) = SARIngram(s1grams, c1grams, r1gramslist, numref) (keep2score, del2score, add2score) = SARIngram(s2grams, c2grams, r2gramslist, numref) (keep3score, del3score, add3score) = SARIngram(s3grams, c3grams, r3gramslist, numref) (keep4score, del4score, add4score) = SARIngram(s4grams, c4grams, r4gramslist, numref) avgkeepscore = sum([keep1score, keep2score, keep3score, keep4score]) / 4 avgdelscore = sum([del1score, del2score, del3score, del4score]) / 4 avgaddscore = sum([add1score, add2score, add3score, add4score]) / 4 finalscore = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore
null
17,932
from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets def normalize(sentence, lowercase: bool = True, tokenizer: str = "13a", return_str: bool = True): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: sentence = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__).major >= 2: normalized_sent = sacrebleu.metrics.bleu._get_tokenizer(tokenizer)()(sentence) else: normalized_sent = sacrebleu.TOKENIZERS[tokenizer]()(sentence) elif tokenizer == "moses": normalized_sent = sacremoses.MosesTokenizer().tokenize(sentence, return_str=True, escape=False) elif tokenizer == "penn": normalized_sent = sacremoses.MosesTokenizer().penn_tokenize(sentence, return_str=True) else: normalized_sent = sentence if not return_str: normalized_sent = normalized_sent.split() return normalized_sent
null
17,935
import functools from contextlib import contextmanager import bert_score from packaging import version import datasets def filter_logging_context(): def filter_log(record): return False if "This IS expected if you are initializing" in record.msg else True logger = datasets.utils.logging.get_logger("transformers.modeling_utils") logger.addFilter(filter_log) try: yield finally: logger.removeFilter(filter_log)
null
17,936
import argparse import json import re import string import sys from collections import Counter def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) return max(scores_for_ground_truths) def evaluate(dataset, predictions): f1 = exact_match = total = 0 for article in dataset: for paragraph in article["paragraphs"]: for qa in paragraph["qas"]: total += 1 if qa["id"] not in predictions: message = "Unanswered question " + qa["id"] + " will receive score 0." print(message, file=sys.stderr) continue ground_truths = [x["text"] for x in qa["answers"]] prediction = predictions[qa["id"]] exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) exact_match = 100.0 * exact_match / total f1 = 100.0 * f1 / total return {"exact_match": exact_match, "f1": f1}
null
17,937
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness The provided code snippet includes necessary dependencies for implementing the `estimate_pass_at_k` function. Write a Python function `def estimate_pass_at_k(num_samples, num_correct, k)` to solve the following problem: Estimates pass@k of each problem and returns them in an array. Here is the function: def estimate_pass_at_k(num_samples, num_correct, k): """Estimates pass@k of each problem and returns them in an array.""" def estimator(n: int, c: int, k: int) -> float: """Calculates 1 - comb(n - c, k) / comb(n, k).""" if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1)) if isinstance(num_samples, int): num_samples_it = itertools.repeat(num_samples, len(num_correct)) else: assert len(num_samples) == len(num_correct) num_samples_it = iter(num_samples) return np.array([estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)])
Estimates pass@k of each problem and returns them in an array.
17,938
import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def unsafe_execute(check_program, result, timeout): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil rmtree = shutil.rmtree rmdir = os.rmdir chdir = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: exec_globals = {} with swallow_io(): with time_limit(timeout): exec(check_program, exec_globals) result.append("passed") except TimeoutException: result.append("timed out") except BaseException as e: result.append(f"failed: {e}") # Needed for cleaning up. shutil.rmtree = rmtree os.rmdir = rmdir os.chdir = chdir The provided code snippet includes necessary dependencies for implementing the `check_correctness` function. Write a Python function `def check_correctness(check_program, timeout, task_id, completion_id)` to solve the following problem: Evaluates the functional correctness of a completion by running the test suite provided in the problem. :param completion_id: an optional completion ID so we can match the results later even if execution finishes asynchronously. Here is the function: def check_correctness(check_program, timeout, task_id, completion_id): """ Evaluates the functional correctness of a completion by running the test suite provided in the problem. :param completion_id: an optional completion ID so we can match the results later even if execution finishes asynchronously. """ manager = multiprocessing.Manager() result = manager.list() p = multiprocessing.Process(target=unsafe_execute, args=(check_program, result, timeout)) p.start() p.join(timeout=timeout + 1) if p.is_alive(): p.kill() if not result: result.append("timed out") return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, }
Evaluates the functional correctness of a completion by running the test suite provided in the problem. :param completion_id: an optional completion ID so we can match the results later even if execution finishes asynchronously.
17,939
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger class ConvertCommand(BaseDatasetsCLICommand): def register_subcommand(parser: ArgumentParser): """ Register this command to argparse so it's available for the datasets-cli Args: parser: Root parser to register command-specific arguments """ train_parser = parser.add_parser( "convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", ) train_parser.add_argument( "--tfds_path", type=str, required=True, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", ) train_parser.add_argument( "--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=convert_command_factory) def __init__(self, tfds_path: str, datasets_directory: str, *args): self._logger = get_logger("datasets-cli/converting") self._tfds_path = tfds_path self._datasets_directory = datasets_directory def run(self): if os.path.isdir(self._tfds_path): abs_tfds_path = os.path.abspath(self._tfds_path) elif os.path.isfile(self._tfds_path): abs_tfds_path = os.path.dirname(self._tfds_path) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path.") abs_datasets_path = os.path.abspath(self._datasets_directory) self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}") utils_files = [] with_manual_update = [] imports_to_builder_map = {} if os.path.isdir(self._tfds_path): file_names = os.listdir(abs_tfds_path) else: file_names = [os.path.basename(self._tfds_path)] for f_name in file_names: self._logger.info(f"Looking at file {f_name}") input_file = os.path.join(abs_tfds_path, f_name) output_file = os.path.join(abs_datasets_path, f_name) if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file") continue with open(input_file, encoding="utf-8") as f: lines = f.readlines() out_lines = [] is_builder = False needs_manual_update = False tfds_imports = [] for line in lines: out_line = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: out_line = "import datasets\n" elif "import tensorflow" in out_line: # order is important here out_line = "" continue elif "from absl import logging" in out_line: out_line = "from datasets import logging\n" elif "getLogger" in out_line: out_line = out_line.replace("getLogger", "get_logger") elif any(expression in out_line for expression in TO_HIGHLIGHT): needs_manual_update = True to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT)) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n") out_lines.append(out_line) out_lines.append(HIGHLIGHT_MESSAGE_POST) continue else: for pattern, replacement in TO_CONVERT: out_line = re.sub(pattern, replacement, out_line) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line) tfds_imports.extend(imp.strip() for imp in match.group(1).split(",")) out_line = "from . import " + match.group(1) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f"Error converting {out_line.strip()}") if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: is_builder = True out_lines.append(out_line) if is_builder or "wmt" in f_name: # We create a new directory for each dataset dir_name = f_name.replace(".py", "") output_dir = os.path.join(abs_datasets_path, dir_name) output_file = os.path.join(output_dir, f_name) os.makedirs(output_dir, exist_ok=True) self._logger.info(f"Adding directory {output_dir}") imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) else: # Utilities will be moved at the end utils_files.append(output_file) if needs_manual_update: with_manual_update.append(output_file) with open(output_file, "w", encoding="utf-8") as f: f.writelines(out_lines) self._logger.info(f"Converted in {output_file}") for utils_file in utils_files: try: f_name = os.path.basename(utils_file) dest_folder = imports_to_builder_map[f_name.replace(".py", "")] self._logger.info(f"Moving {dest_folder} to {utils_file}") shutil.copy(utils_file, dest_folder) except KeyError: self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.") if with_manual_update: for file_path in with_manual_update: self._logger.warning( f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." ) The provided code snippet includes necessary dependencies for implementing the `convert_command_factory` function. Write a Python function `def convert_command_factory(args: Namespace)` to solve the following problem: Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. Returns: ConvertCommand Here is the function: def convert_command_factory(args: Namespace): """ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. Returns: ConvertCommand """ return ConvertCommand(args.tfds_path, args.datasets_directory)
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. Returns: ConvertCommand
17,940
import fnmatch import json import os import shutil import tempfile import xml.etree.ElementTree as ET from argparse import ArgumentParser from pathlib import Path from typing import Optional from datasets import config from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.download.mock_download_manager import MockDownloadManager from datasets.load import dataset_module_factory, import_main_class from datasets.utils.deprecation_utils import deprecated from datasets.utils.logging import get_logger, set_verbosity_warning from datasets.utils.py_utils import map_nested class DummyDataCommand(BaseDatasetsCLICommand): def register_subcommand(parser: ArgumentParser): test_parser = parser.add_parser("dummy_data", help="Generate dummy data.") test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data") test_parser.add_argument( "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data" ) test_parser.add_argument( "--json_field", type=str, default=None, help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)", ) test_parser.add_argument( "--xml_tag", type=str, default=None, help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.", ) test_parser.add_argument( "--match_text_files", type=str, default=None, help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label", ) test_parser.add_argument( "--keep_uncompressed", action="store_true", help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.", ) test_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory to download and cache files when auto-generating dummy data", ) test_parser.add_argument( "--encoding", type=str, default=None, help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}", ) test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)") test_parser.set_defaults(func=dummy_data_command_factory) def __init__( self, path_to_dataset: str, auto_generate: bool, n_lines: int, json_field: Optional[str], xml_tag: Optional[str], match_text_files: Optional[str], keep_uncompressed: bool, cache_dir: Optional[str], encoding: Optional[str], ): self._path_to_dataset = path_to_dataset if os.path.isdir(path_to_dataset): self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1] else: self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2] cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE) self._auto_generate = auto_generate self._n_lines = n_lines self._json_field = json_field self._xml_tag = xml_tag self._match_text_files = match_text_files self._keep_uncompressed = keep_uncompressed self._cache_dir = cache_dir self._encoding = encoding def run(self): set_verbosity_warning() dataset_module = dataset_module_factory(self._path_to_dataset) builder_cls = import_main_class(dataset_module.module_path) # use `None` as config if no configs builder_configs = builder_cls.BUILDER_CONFIGS or [None] auto_generate_results = [] with tempfile.TemporaryDirectory() as tmp_dir: for builder_config in builder_configs: config_name = builder_config.name if builder_config else None dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir) version = builder_config.version if builder_config else dataset_builder.config.version mock_dl_manager = MockDownloadManager( dataset_name=self._dataset_name, config=builder_config, version=version, use_local_dummy_data=True, load_existing_dummy_data=False, ) if self._auto_generate: auto_generate_results.append( self._autogenerate_dummy_data( dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager, keep_uncompressed=self._keep_uncompressed, ) ) else: self._print_dummy_data_instructions( dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager ) if self._auto_generate and not self._keep_uncompressed: if all(auto_generate_results): print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'") else: print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'") def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]: dl_cache_dir = ( os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR) if self._cache_dir else config.DOWNLOADED_DATASETS_PATH ) download_config = DownloadConfig(cache_dir=dl_cache_dir) dl_manager = DummyDataGeneratorDownloadManager( dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config ) dataset_builder._split_generators(dl_manager) mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data dl_manager.auto_generate_dummy_data_folder( n_lines=self._n_lines, json_field=self._json_field, xml_tag=self._xml_tag, match_text_files=self._match_text_files, encoding=self._encoding, ) if not keep_uncompressed: path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name) dl_manager.compress_autogenerated_dummy_data(path_do_dataset) # now test that the dummy_data.zip file actually works mock_dl_manager.load_existing_dummy_data = True # use real dummy data n_examples_per_split = {} os.makedirs(dataset_builder._cache_dir, exist_ok=True) try: split_generators = dataset_builder._split_generators(mock_dl_manager) for split_generator in split_generators: dataset_builder._prepare_split(split_generator, check_duplicate_keys=False) n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples except OSError as e: logger.error( f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n" + str(e) ) return False else: if all(n_examples > 0 for n_examples in n_examples_per_split.values()): logger.warning( f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''." ) return True else: empty_splits = [ split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0 ] logger.warning( f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''." ) return False else: generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) logger.info( f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. " "Please compress this directory into a zip file to use it for dummy data tests." ) def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager): dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ") os.makedirs(dummy_data_folder, exist_ok=True) try: generator_splits = dataset_builder._split_generators(mock_dl_manager) except FileNotFoundError as e: print( f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}." ) files_to_create = set() split_names = [] dummy_file_name = mock_dl_manager.dummy_file_name for split in generator_splits: logger.info(f"Collecting dummy data file paths to create for {split.name}") split_names.append(split.name) gen_kwargs = split.gen_kwargs generator = dataset_builder._generate_examples(**gen_kwargs) try: dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n" config_string = ( f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else "" ) dummy_data_guidance_print += ( "- In order to create the dummy data for " + config_string + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n" ) # trigger generate function for key, record in generator: pass dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n" except FileNotFoundError as e: files_to_create.add(e.filename) split_names = ", ".join(split_names) if len(files_to_create) > 0: # no glob.glob(...) in `_generate_examples(...)` if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n" files_string = dummy_file_name else: files_string = ", ".join(files_to_create) dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n" dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n" dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n" if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n" dummy_data_guidance_print += ( f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n" ) dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" else: dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n" dummy_data_guidance_print += ( f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n" ) dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" dummy_data_guidance_print += ( f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n" ) dummy_data_guidance_print += 83 * "=" + "\n" print(dummy_data_guidance_print) def dummy_data_command_factory(args): return DummyDataCommand( args.path_to_dataset, args.auto_generate, args.n_lines, args.json_field, args.xml_tag, args.match_text_files, args.keep_uncompressed, args.cache_dir, args.encoding, )
null
17,941
import platform from argparse import ArgumentParser import fsspec import huggingface_hub import pandas import pyarrow from datasets import __version__ as version from datasets.commands import BaseDatasetsCLICommand class EnvironmentCommand(BaseDatasetsCLICommand): def register_subcommand(parser: ArgumentParser): def run(self): def format_dict(d): def info_command_factory(_): return EnvironmentCommand()
null
17,942
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def parse_unknown_args(unknown_args): return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
null
17,943
import os from argparse import ArgumentParser from pathlib import Path from shutil import copyfile from typing import List from datasets import config from datasets.builder import DatasetBuilder from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadMode from datasets.load import dataset_module_factory, import_main_class from datasets.utils.deprecation_utils import deprecated from datasets.utils.info_utils import VerificationMode class RunBeamCommand(BaseDatasetsCLICommand): def register_subcommand(parser: ArgumentParser): run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline") run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download") run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name") run_beam_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory where the datasets are stored", ) run_beam_parser.add_argument( "--beam_pipeline_options", type=str, default="", help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`", ) run_beam_parser.add_argument( "--data_dir", type=str, default=None, help="Can be used to specify a manual directory to get the files from", ) run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file") run_beam_parser.add_argument( "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks" ) run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") # aliases run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info") run_beam_parser.set_defaults(func=run_beam_command_factory) def __init__( self, dataset: str, name: str, cache_dir: str, beam_pipeline_options: str, data_dir: str, all_configs: bool, save_infos: bool, ignore_verifications: bool, force_redownload: bool, **config_kwargs, ): self._dataset = dataset self._name = name self._cache_dir = cache_dir self._beam_pipeline_options = beam_pipeline_options self._data_dir = data_dir self._all_configs = all_configs self._save_infos = save_infos self._ignore_verifications = ignore_verifications self._force_redownload = force_redownload self._config_kwargs = config_kwargs def run(self): import apache_beam as beam if self._name is not None and self._all_configs: print("Both parameters `name` and `all_configs` can't be used at once.") exit(1) path, config_name = self._dataset, self._name dataset_module = dataset_module_factory(path) builder_cls = import_main_class(dataset_module.module_path) builders: List[DatasetBuilder] = [] if self._beam_pipeline_options: beam_options = beam.options.pipeline_options.PipelineOptions( flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt] ) else: beam_options = None if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0: for builder_config in builder_cls.BUILDER_CONFIGS: builders.append( builder_cls( config_name=builder_config.name, data_dir=self._data_dir, hash=dataset_module.hash, beam_options=beam_options, cache_dir=self._cache_dir, base_path=dataset_module.builder_kwargs.get("base_path"), ) ) else: builders.append( builder_cls( config_name=config_name, data_dir=self._data_dir, beam_options=beam_options, cache_dir=self._cache_dir, base_path=dataset_module.builder_kwargs.get("base_path"), **self._config_kwargs, ) ) for builder in builders: builder.download_and_prepare( download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS if not self._force_redownload else DownloadMode.FORCE_REDOWNLOAD, download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH), verification_mode=VerificationMode.NO_CHECKS if self._ignore_verifications else VerificationMode.ALL_CHECKS, ) if self._save_infos: builder._save_infos() print("Apache beam run successful.") # If save_infos=True, the dataset infos file is created next to the loaded module file. # Let's move it to the original directory of the dataset script, to allow the user to # upload them on S3 at the same time afterwards. if self._save_infos: dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) name = Path(path).name + ".py" combined_path = os.path.join(path, name) if os.path.isfile(path): dataset_dir = os.path.dirname(path) elif os.path.isfile(combined_path): dataset_dir = path else: # in case of a remote dataset print(f"Dataset Infos file saved at {dataset_infos_path}") exit(1) # Move datasetinfo back to the user user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME) copyfile(dataset_infos_path, user_dataset_infos_path) print(f"Dataset Infos file saved at {user_dataset_infos_path}") def run_beam_command_factory(args, **kwargs): return RunBeamCommand( args.dataset, args.name, args.cache_dir, args.beam_pipeline_options, args.data_dir, args.all_configs, args.save_info or args.save_infos, args.ignore_verifications, args.force_redownload, **kwargs, )
null
17,944
import copy import math import os import re import shutil from dataclasses import dataclass from functools import partial from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union import pyarrow as pa import pyarrow.parquet as pq from tqdm.contrib.concurrent import thread_map from .download.download_config import DownloadConfig from .naming import _split_re, filenames_for_dataset_split from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables from .utils import logging from .utils import tqdm as hf_tqdm from .utils.deprecation_utils import deprecated from .utils.file_utils import cached_path class FileInstructions: """The file instructions associated with a split ReadInstruction. Attributes: num_examples: `int`, The total number of examples file_instructions: List[dict(filename, skip, take)], the files information. The filenames contains the relative path, not absolute. skip/take indicates which example read in the file: `ds.slice(skip, take)` """ num_examples: int file_instructions: List[dict] class ReadInstruction: """Reading instruction for a dataset. Examples:: # The following lines are equivalent: ds = datasets.load_dataset('mnist', split='test[:33%]') ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]')) ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%')) ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction( 'test', from_=0, to=33, unit='%')) # The following lines are equivalent: ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]') ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec( 'test[:33%]+train[1:-1]')) ds = datasets.load_dataset('mnist', split=( datasets.ReadInstruction('test', to=33, unit='%') + datasets.ReadInstruction('train', from_=1, to=-1, unit='abs'))) # The following lines are equivalent: ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)') ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec( 'test[:33%](pct1_dropremainder)')) ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction( 'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder")) # 10-fold validation: tests = datasets.load_dataset( 'mnist', [datasets.ReadInstruction('train', from_=k, to=k+10, unit='%') for k in range(0, 100, 10)]) trains = datasets.load_dataset( 'mnist', [datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%') for k in range(0, 100, 10)]) """ def _init(self, relative_instructions): # Private initializer. self._relative_instructions = relative_instructions def _read_instruction_from_relative_instructions(cls, relative_instructions): """Returns ReadInstruction obj initialized with relative_instructions.""" # Use __new__ to bypass __init__ used by public API and not conveniant here. result = cls.__new__(cls) result._init(relative_instructions) # pylint: disable=protected-access return result def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None): """Initialize ReadInstruction. Args: split_name (str): name of the split to read. Eg: 'train'. rounding (str, optional): The rounding behaviour to use when percent slicing is used. Ignored when slicing with absolute indices. Possible values: - 'closest' (default): The specified percentages are rounded to the closest value. Use this if you want specified percents to be as much exact as possible. - 'pct1_dropremainder': the specified percentages are treated as multiple of 1%. Use this option if you want consistency. Eg: len(5%) == 5 * len(1%). Using this option, one might not be able to use the full set of examples, if the number of those is not a multiple of 100. from_ (int): to (int): alternative way of specifying slicing boundaries. If any of {from_, to, unit} argument is used, slicing cannot be specified as string. unit (str): optional, one of: '%': to set the slicing unit as percents of the split size. 'abs': to set the slicing unit as absolute numbers. """ # This constructor is not always called. See factory method # `_read_instruction_from_relative_instructions`. Common init instructions # MUST be placed in the _init method. self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)]) def from_spec(cls, spec): """Creates a `ReadInstruction` instance out of a string spec. Args: spec (`str`): Split(s) + optional slice(s) to read + optional rounding if percents are used as the slicing unit. A slice can be specified, using absolute numbers (`int`) or percentages (`int`). Examples: ``` test: test split. test + validation: test split + validation split. test[10:]: test split, minus its first 10 records. test[:10%]: first 10% records of test split. test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding. test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train. ``` Returns: ReadInstruction instance. """ spec = str(spec) # Need to convert to str in case of NamedSplit instance. subs = _ADDITION_SEP_RE.split(spec) if not subs: raise ValueError(f"No instructions could be built out of {spec}") instruction = _str_to_read_instruction(subs[0]) return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction) def to_spec(self): rel_instr_specs = [] for rel_instr in self._relative_instructions: rel_instr_spec = rel_instr.splitname if rel_instr.from_ is not None or rel_instr.to is not None: from_ = rel_instr.from_ to = rel_instr.to unit = rel_instr.unit rounding = rel_instr.rounding unit = unit if unit == "%" else "" from_ = str(from_) + unit if from_ is not None else "" to = str(to) + unit if to is not None else "" slice_str = f"[{from_}:{to}]" rounding_str = ( f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else "" ) rel_instr_spec += slice_str + rounding_str rel_instr_specs.append(rel_instr_spec) return "+".join(rel_instr_specs) def __add__(self, other): """Returns a new ReadInstruction obj, result of appending other to self.""" if not isinstance(other, ReadInstruction): msg = "ReadInstruction can only be added to another ReadInstruction obj." raise TypeError(msg) self_ris = self._relative_instructions other_ris = other._relative_instructions # pylint: disable=protected-access if ( self_ris[0].unit != "abs" and other_ris[0].unit != "abs" and self._relative_instructions[0].rounding != other_ris[0].rounding ): raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.") return self._read_instruction_from_relative_instructions(self_ris + other_ris) def __str__(self): return self.to_spec() def __repr__(self): return f"ReadInstruction({self._relative_instructions})" def to_absolute(self, name2len): """Translate instruction into a list of absolute instructions. Those absolute instructions are then to be added together. Args: name2len (`dict`): Associating split names to number of examples. Returns: list of _AbsoluteInstruction instances (corresponds to the + in spec). """ return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions] def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None): prefix = filename_prefix_for_split(dataset_name, split) prefix = os.path.join(path, prefix) if shard_lengths: num_shards = len(shard_lengths) filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)] if filetype_suffix: filenames = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: filename = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename] The provided code snippet includes necessary dependencies for implementing the `make_file_instructions` function. Write a Python function `def make_file_instructions( name: str, split_infos: List["SplitInfo"], instruction: Union[str, "ReadInstruction"], filetype_suffix: Optional[str] = None, prefix_path: Optional[str] = None, ) -> FileInstructions` to solve the following problem: Returns instructions of the split dict. Args: name (`str`): Name of the dataset. split_infos (`list` of `[SplitInfo]`): Dataset splits information. instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset. filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'. prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name. Returns: [`FileInstructions`] Here is the function: def make_file_instructions( name: str, split_infos: List["SplitInfo"], instruction: Union[str, "ReadInstruction"], filetype_suffix: Optional[str] = None, prefix_path: Optional[str] = None, ) -> FileInstructions: """Returns instructions of the split dict. Args: name (`str`): Name of the dataset. split_infos (`list` of `[SplitInfo]`): Dataset splits information. instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset. filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'. prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name. Returns: [`FileInstructions`] """ if not isinstance(name, str): raise TypeError(f"Expected str 'name', but got: {type(name).__name__}") elif not name: raise ValueError("Expected non-empty str 'name'") name2len = {info.name: info.num_examples for info in split_infos} name2shard_lengths = {info.name: info.shard_lengths for info in split_infos} name2filenames = { info.name: filenames_for_dataset_split( path=prefix_path, dataset_name=name, split=info.name, filetype_suffix=filetype_suffix, shard_lengths=name2shard_lengths[info.name], ) for info in split_infos } if not isinstance(instruction, ReadInstruction): instruction = ReadInstruction.from_spec(instruction) # Create the absolute instruction (per split) absolute_instructions = instruction.to_absolute(name2len) # For each split, return the files instruction (skip/take) file_instructions = [] num_examples = 0 for abs_instr in absolute_instructions: split_length = name2len[abs_instr.splitname] filenames = name2filenames[abs_instr.splitname] shard_lengths = name2shard_lengths[abs_instr.splitname] from_ = 0 if abs_instr.from_ is None else abs_instr.from_ to = split_length if abs_instr.to is None else abs_instr.to if shard_lengths is None: # not sharded for filename in filenames: take = to - from_ if take == 0: continue num_examples += take file_instructions.append({"filename": filename, "skip": from_, "take": take}) else: # sharded index_start = 0 # Beginning (included) of moving window. index_end = 0 # End (excluded) of moving window. for filename, shard_length in zip(filenames, shard_lengths): index_end += shard_length if from_ < index_end and to > index_start: # There is something to take. skip = from_ - index_start if from_ > index_start else 0 take = to - index_start - skip if to < index_end else -1 if take == 0: continue file_instructions.append({"filename": filename, "skip": skip, "take": take}) num_examples += shard_length - skip if take == -1 else take index_start += shard_length return FileInstructions( num_examples=num_examples, file_instructions=file_instructions, )
Returns instructions of the split dict. Args: name (`str`): Name of the dataset. split_infos (`list` of `[SplitInfo]`): Dataset splits information. instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset. filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'. prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name. Returns: [`FileInstructions`]
17,945
import copy import math import os import re import shutil from dataclasses import dataclass from functools import partial from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union import pyarrow as pa import pyarrow.parquet as pq from tqdm.contrib.concurrent import thread_map from .download.download_config import DownloadConfig from .naming import _split_re, filenames_for_dataset_split from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables from .utils import logging from .utils import tqdm as hf_tqdm from .utils.deprecation_utils import deprecated from .utils.file_utils import cached_path _SUB_SPEC_RE = re.compile( rf""" ^ (?P<split>{_split_re[1:-1]}) (\[ ((?P<from>-?\d+) (?P<from_pct>%)?)? : ((?P<to>-?\d+) (?P<to_pct>%)?)? \])?(\((?P<rounding>[^\)]*)\))? $ """, # remove ^ and $ re.X, ) class ReadInstruction: """Reading instruction for a dataset. Examples:: # The following lines are equivalent: ds = datasets.load_dataset('mnist', split='test[:33%]') ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]')) ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%')) ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction( 'test', from_=0, to=33, unit='%')) # The following lines are equivalent: ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]') ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec( 'test[:33%]+train[1:-1]')) ds = datasets.load_dataset('mnist', split=( datasets.ReadInstruction('test', to=33, unit='%') + datasets.ReadInstruction('train', from_=1, to=-1, unit='abs'))) # The following lines are equivalent: ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)') ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec( 'test[:33%](pct1_dropremainder)')) ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction( 'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder")) # 10-fold validation: tests = datasets.load_dataset( 'mnist', [datasets.ReadInstruction('train', from_=k, to=k+10, unit='%') for k in range(0, 100, 10)]) trains = datasets.load_dataset( 'mnist', [datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%') for k in range(0, 100, 10)]) """ def _init(self, relative_instructions): # Private initializer. self._relative_instructions = relative_instructions def _read_instruction_from_relative_instructions(cls, relative_instructions): """Returns ReadInstruction obj initialized with relative_instructions.""" # Use __new__ to bypass __init__ used by public API and not conveniant here. result = cls.__new__(cls) result._init(relative_instructions) # pylint: disable=protected-access return result def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None): """Initialize ReadInstruction. Args: split_name (str): name of the split to read. Eg: 'train'. rounding (str, optional): The rounding behaviour to use when percent slicing is used. Ignored when slicing with absolute indices. Possible values: - 'closest' (default): The specified percentages are rounded to the closest value. Use this if you want specified percents to be as much exact as possible. - 'pct1_dropremainder': the specified percentages are treated as multiple of 1%. Use this option if you want consistency. Eg: len(5%) == 5 * len(1%). Using this option, one might not be able to use the full set of examples, if the number of those is not a multiple of 100. from_ (int): to (int): alternative way of specifying slicing boundaries. If any of {from_, to, unit} argument is used, slicing cannot be specified as string. unit (str): optional, one of: '%': to set the slicing unit as percents of the split size. 'abs': to set the slicing unit as absolute numbers. """ # This constructor is not always called. See factory method # `_read_instruction_from_relative_instructions`. Common init instructions # MUST be placed in the _init method. self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)]) def from_spec(cls, spec): """Creates a `ReadInstruction` instance out of a string spec. Args: spec (`str`): Split(s) + optional slice(s) to read + optional rounding if percents are used as the slicing unit. A slice can be specified, using absolute numbers (`int`) or percentages (`int`). Examples: ``` test: test split. test + validation: test split + validation split. test[10:]: test split, minus its first 10 records. test[:10%]: first 10% records of test split. test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding. test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train. ``` Returns: ReadInstruction instance. """ spec = str(spec) # Need to convert to str in case of NamedSplit instance. subs = _ADDITION_SEP_RE.split(spec) if not subs: raise ValueError(f"No instructions could be built out of {spec}") instruction = _str_to_read_instruction(subs[0]) return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction) def to_spec(self): rel_instr_specs = [] for rel_instr in self._relative_instructions: rel_instr_spec = rel_instr.splitname if rel_instr.from_ is not None or rel_instr.to is not None: from_ = rel_instr.from_ to = rel_instr.to unit = rel_instr.unit rounding = rel_instr.rounding unit = unit if unit == "%" else "" from_ = str(from_) + unit if from_ is not None else "" to = str(to) + unit if to is not None else "" slice_str = f"[{from_}:{to}]" rounding_str = ( f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else "" ) rel_instr_spec += slice_str + rounding_str rel_instr_specs.append(rel_instr_spec) return "+".join(rel_instr_specs) def __add__(self, other): """Returns a new ReadInstruction obj, result of appending other to self.""" if not isinstance(other, ReadInstruction): msg = "ReadInstruction can only be added to another ReadInstruction obj." raise TypeError(msg) self_ris = self._relative_instructions other_ris = other._relative_instructions # pylint: disable=protected-access if ( self_ris[0].unit != "abs" and other_ris[0].unit != "abs" and self._relative_instructions[0].rounding != other_ris[0].rounding ): raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.") return self._read_instruction_from_relative_instructions(self_ris + other_ris) def __str__(self): return self.to_spec() def __repr__(self): return f"ReadInstruction({self._relative_instructions})" def to_absolute(self, name2len): """Translate instruction into a list of absolute instructions. Those absolute instructions are then to be added together. Args: name2len (`dict`): Associating split names to number of examples. Returns: list of _AbsoluteInstruction instances (corresponds to the + in spec). """ return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions] The provided code snippet includes necessary dependencies for implementing the `_str_to_read_instruction` function. Write a Python function `def _str_to_read_instruction(spec)` to solve the following problem: Returns ReadInstruction for given string. Here is the function: def _str_to_read_instruction(spec): """Returns ReadInstruction for given string.""" res = _SUB_SPEC_RE.match(spec) if not res: raise ValueError(f"Unrecognized instruction format: {spec}") unit = "%" if res.group("from_pct") or res.group("to_pct") else "abs" return ReadInstruction( split_name=res.group("split"), rounding=res.group("rounding"), from_=int(res.group("from")) if res.group("from") else None, to=int(res.group("to")) if res.group("to") else None, unit=unit, )
Returns ReadInstruction for given string.
17,946
import copy import math import os import re import shutil from dataclasses import dataclass from functools import partial from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union import pyarrow as pa import pyarrow.parquet as pq from tqdm.contrib.concurrent import thread_map from .download.download_config import DownloadConfig from .naming import _split_re, filenames_for_dataset_split from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables from .utils import logging from .utils import tqdm as hf_tqdm from .utils.deprecation_utils import deprecated from .utils.file_utils import cached_path class _AbsoluteInstruction: """A machine friendly slice: defined absolute positive boundaries.""" splitname: str from_: int # uint (starting index). to: int # uint (ending index). def _pct_to_abs_pct1(boundary, num_examples): # Using math.trunc here, since -99.5% should give -99%, not -100%. if num_examples < 100: msg = ( 'Using "pct1_dropremainder" rounding on a split with less than 100 ' "elements is forbidden: it always results in an empty dataset." ) raise ValueError(msg) return boundary * math.trunc(num_examples / 100.0) def _pct_to_abs_closest(boundary, num_examples): return int(round(boundary * num_examples / 100.0)) The provided code snippet includes necessary dependencies for implementing the `_rel_to_abs_instr` function. Write a Python function `def _rel_to_abs_instr(rel_instr, name2len)` to solve the following problem: Returns _AbsoluteInstruction instance for given RelativeInstruction. Args: rel_instr: RelativeInstruction instance. name2len: dict {split_name: num_examples}. Here is the function: def _rel_to_abs_instr(rel_instr, name2len): """Returns _AbsoluteInstruction instance for given RelativeInstruction. Args: rel_instr: RelativeInstruction instance. name2len: dict {split_name: num_examples}. """ pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == "closest" else _pct_to_abs_pct1 split = rel_instr.splitname if split not in name2len: raise ValueError(f'Unknown split "{split}". Should be one of {list(name2len)}.') num_examples = name2len[split] from_ = rel_instr.from_ to = rel_instr.to if rel_instr.unit == "%": from_ = 0 if from_ is None else pct_to_abs(from_, num_examples) to = num_examples if to is None else pct_to_abs(to, num_examples) else: from_ = 0 if from_ is None else from_ to = num_examples if to is None else to if from_ < 0: from_ = max(num_examples + from_, 0) if to < 0: to = max(num_examples + to, 0) from_ = min(from_, num_examples) to = min(to, num_examples) return _AbsoluteInstruction(split, from_, to)
Returns _AbsoluteInstruction instance for given RelativeInstruction. Args: rel_instr: RelativeInstruction instance. name2len: dict {split_name: num_examples}.
17,947
import importlib import inspect from functools import wraps from typing import TYPE_CHECKING, Optional from .download.download_config import DownloadConfig from .download.streaming_download_manager import ( xbasename, xdirname, xet_parse, xexists, xgetsize, xglob, xgzip_open, xisdir, xisfile, xjoin, xlistdir, xnumpy_load, xopen, xpandas_read_csv, xpandas_read_excel, xPath, xpyarrow_parquet_read_table, xrelpath, xsio_loadmat, xsplit, xsplitext, xwalk, xxml_dom_minidom_parse, ) from .utils.logging import get_logger from .utils.patching import patch_submodule from .utils.py_utils import get_imports, lock_importable_file def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None): """Extend the module to support streaming. We patch some functions in the module to use `fsspec` to support data streaming: - We use `fsspec.open` to open and read remote files. We patch the module function: - `open` - We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module functions: - `os.path.join` - `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator) The patched functions are replaced with custom functions defined to work with the :class:`~download.streaming_download_manager.StreamingDownloadManager`. Args: module_path: Path to the module to be extended. download_config : mainly use use_auth_token or storage_options to support different platforms and auth types. """ module = importlib.import_module(module_path) # TODO(QL): always update the module to add subsequent new authentication without removing old ones if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming: if isinstance(module._patched_for_streaming, DownloadConfig): module._patched_for_streaming.token = download_config.token module._patched_for_streaming.storage_options = download_config.storage_options return def wrap_auth(function): def wrapper(*args, **kwargs): return function(*args, download_config=download_config, **kwargs) wrapper._decorator_name_ = "wrap_auth" return wrapper # open files in a streaming fashion patch_submodule(module, "open", wrap_auth(xopen)).start() patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start() patch_submodule(module, "os.walk", wrap_auth(xwalk)).start() patch_submodule(module, "glob.glob", wrap_auth(xglob)).start() # allow to navigate in remote zip files patch_submodule(module, "os.path.join", xjoin).start() patch_submodule(module, "os.path.dirname", xdirname).start() patch_submodule(module, "os.path.basename", xbasename).start() patch_submodule(module, "os.path.relpath", xrelpath).start() patch_submodule(module, "os.path.split", xsplit).start() patch_submodule(module, "os.path.splitext", xsplitext).start() # allow checks on paths patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start() patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start() patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start() patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start() patch_submodule(module, "pathlib.Path", xPath).start() # file readers patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start() patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start() patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start() patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start() patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start() patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start() patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start() # pyarrow: do not patch pyarrow attribute in packaged modules if not module.__name__.startswith("datasets.packaged_modules."): patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start() module._patched_for_streaming = download_config import inspect class DownloadConfig: """Configuration for our cached path manager. Attributes: cache_dir (`str` or `Path`, *optional*): Specify a cache directory to save the file to (overwrite the default cache dir). force_download (`bool`, defaults to `False`): If `True`, re-dowload the file even if it's already cached in the cache dir. resume_download (`bool`, defaults to `False`): If `True`, resume the download if an incompletely received file is found. proxies (`dict`, *optional*): user_agent (`str`, *optional*): Optional string or dict that will be appended to the user-agent on remote requests. extract_compressed_file (`bool`, defaults to `False`): If `True` and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract (`bool`, defaults to `False`): If `True` when `extract_compressed_file` is `True` and the archive was already extracted, re-extract the archive and override the folder where it was extracted. delete_extracted (`bool`, defaults to `False`): Whether to delete (or keep) the extracted files. use_etag (`bool`, defaults to `True`): Whether to use the ETag HTTP response header to validate the cached files. num_proc (`int`, *optional*): The number of processes to launch to download the files in parallel. max_retries (`int`, default to `1`): The number of times to retry an HTTP request if it fails. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`. use_auth_token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`. <Deprecated version="2.14.0"> `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. </Deprecated> ignore_url_params (`bool`, defaults to `False`): Whether to strip all query parameters and fragments from the download URL before using it for caching the file. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the dataset file-system backend, if any. download_desc (`str`, *optional*): A description to be displayed alongside with the progress bar while downloading the files. """ cache_dir: Optional[Union[str, Path]] = None force_download: bool = False resume_download: bool = False local_files_only: bool = False proxies: Optional[Dict] = None user_agent: Optional[str] = None extract_compressed_file: bool = False force_extract: bool = False delete_extracted: bool = False use_etag: bool = True num_proc: Optional[int] = None max_retries: int = 1 token: Optional[Union[str, bool]] = None use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated" ignore_url_params: bool = False storage_options: Dict[str, Any] = field(default_factory=dict) download_desc: Optional[str] = None def __post_init__(self, use_auth_token): if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) self.token = use_auth_token if "hf" not in self.storage_options: self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT} def copy(self) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) def __setattr__(self, name, value): if name == "token" and getattr(self, "storage_options", None) is not None: if "hf" not in self.storage_options: self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT} elif getattr(self.storage_options["hf"], "token", None) is None: self.storage_options["hf"]["token"] = value super().__setattr__(name, value) def lock_importable_file(importable_local_file: str) -> FileLock: # Check the directory with a unique name in our dataset folder # path is: ./datasets/dataset_name/hash_from_code/script.py # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together importable_directory_path = str(Path(importable_local_file).resolve().parent.parent) lock_path = importable_directory_path + ".lock" return FileLock(lock_path) def get_imports(file_path: str) -> Tuple[str, str, str, str]: """Find whether we should import or clone additional files for a given processing script. And list the import. We allow: - library dependencies, - local dependencies and - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository. external dependencies will be downloaded (and extracted if needed in the dataset folder). We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script. Note that only direct import in the dataset processing script will be handled We don't recursively explore the additional import to download further files. Example:: import tensorflow import .c4_utils import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset """ lines = [] with open(file_path, encoding="utf-8") as f: lines.extend(f.readlines()) logger.debug(f"Checking {file_path} for additional imports.") imports: List[Tuple[str, str, str, Optional[str]]] = [] is_in_docstring = False for line in lines: docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line) if len(docstr_start_match) == 1: # flip True <=> False only if doctstring # starts at line without finishing is_in_docstring = not is_in_docstring if is_in_docstring: # import statements in doctstrings should # not be added as required dependencies continue match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE) if match is None: match = re.match( r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE, ) if match is None: continue if match.group(1): # The import starts with a '.', we will download the relevant file if any(imp[1] == match.group(2) for imp in imports): # We already have this import continue if match.group(3): # The import has a comment with 'From:', we'll retrieve it from the given url url_path = match.group(3) url_path, sub_directory = _convert_github_url(url_path) imports.append(("external", match.group(2), url_path, sub_directory)) elif match.group(2): # The import should be at the same place as the file imports.append(("internal", match.group(2), match.group(2), None)) else: if match.group(3): # The import has a comment with `From: git+https:...`, asks user to pip install from git. url_path = match.group(3) imports.append(("library", match.group(2), url_path, None)) else: imports.append(("library", match.group(2), match.group(2), None)) return imports class DatasetBuilder: """Abstract base class for all datasets. `DatasetBuilder` has 3 key methods: - [`DatasetBuilder.info`]: Documents the dataset, including feature names, types, shapes, version, splits, citation, etc. - [`DatasetBuilder.download_and_prepare`]: Downloads the source data and writes it to disk. - [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`]. Some `DatasetBuilder`s expose multiple variants of the dataset by defining a [`BuilderConfig`] subclass and accepting a config object (or name) on construction. Configurable datasets expose a pre-defined set of configurations in [`DatasetBuilder.builder_configs`]. Args: cache_dir (`str`, *optional*): Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`. dataset_name (`str`, *optional*): Name of the dataset, if different from the builder name. Useful for packaged builders like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets that use the same packaged builder. config_name (`str`, *optional*): Name of the dataset configuration. It affects the data generated on disk. Different configurations will have their own subdirectories and versions. If not provided, the default configuration is used (if it exists). <Added version="2.3.0"> Parameter `name` was renamed to `config_name`. </Added> hash (`str`, *optional*): Hash specific to the dataset code. Used to update the caching directory when the dataset loading script code is updated (to avoid reusing old data). The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`. base_path (`str`, *optional*): Base path for relative paths that are used to download files. This can be a remote URL. features ([`Features`], *optional*): Features types to use with this dataset. It can be used to change the [`Features`] types of a dataset, for example. token (`str` or `bool`, *optional*): String or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, will get token from `"~/.huggingface"`. repo_id (`str`, *optional*): ID of the dataset repository. Used to distinguish builders with the same name but not coming from the same namespace, for example "squad" and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad". data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). For builders like "csv" or "json" that need the user to specify data files. They can be either local or remote files. For convenience, you can use a `DataFilesDict`. data_dir (`str`, *optional*): Path to directory containing source data file(s). Use only if `data_files` is not passed, in which case it is equivalent to passing `os.path.join(data_dir, "**")` as `data_files`. For builders that require manual download, it must be the path to the local directory containing the manually downloaded data. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the dataset file-system backend, if any. writer_batch_size (`int`, *optional*): Batch size used by the ArrowWriter. It defines the number of samples that are kept in memory before writing them and also the length of the arrow chunks. None means that the ArrowWriter will use its default value. name (`str`): Configuration name for the dataset. <Deprecated version="2.3.0"> Use `config_name` instead. </Deprecated> **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder configuration class is [`BuilderConfig`] or a subclass of it. """ # Default version VERSION = None # Default version set in BuilderConfig # Class for the builder config. BUILDER_CONFIG_CLASS = BuilderConfig # Named configurations that modify the data generated by download_and_prepare. BUILDER_CONFIGS = [] # Optional default config name to be used when name is None DEFAULT_CONFIG_NAME = None # Default batch size used by the ArrowWriter # It defines the number of samples that are kept in memory before writing them # and also the length of the arrow chunks # None means that the ArrowWriter will use its default value DEFAULT_WRITER_BATCH_SIZE = None def __init__( self, cache_dir: Optional[str] = None, dataset_name: Optional[str] = None, config_name: Optional[str] = None, hash: Optional[str] = None, base_path: Optional[str] = None, info: Optional[DatasetInfo] = None, features: Optional[Features] = None, token: Optional[Union[bool, str]] = None, use_auth_token="deprecated", repo_id: Optional[str] = None, data_files: Optional[Union[str, list, dict, DataFilesDict]] = None, data_dir: Optional[str] = None, storage_options: Optional[dict] = None, writer_batch_size: Optional[int] = None, name="deprecated", **config_kwargs, ): if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) token = use_auth_token if name != "deprecated": warnings.warn( "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.", category=FutureWarning, ) config_name = name # DatasetBuilder name self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1]) self.hash: Optional[str] = hash self.base_path = base_path self.token = token # For backwards compatibility (e.g. if accessed in a dataset script) self.use_auth_token = token self.repo_id = repo_id self.storage_options = storage_options or {} self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE if data_files is not None and not isinstance(data_files, DataFilesDict): data_files = DataFilesDict.from_patterns( sanitize_patterns(data_files), base_path=base_path, download_config=DownloadConfig(token=token, storage_options=self.storage_options), ) # Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None: config_kwargs["features"] = features if data_files is not None: config_kwargs["data_files"] = data_files if data_dir is not None: config_kwargs["data_dir"] = data_dir self.config, self.config_id = self._create_builder_config( config_name=config_name, custom_features=features, **config_kwargs, ) # prepare info: DatasetInfo are a standardized dataclass across all datasets # Prefill datasetinfo if info is None: # TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense info = self.get_exported_dataset_info() info.update(self._info()) info.builder_name = self.name info.dataset_name = self.dataset_name info.config_name = self.config.name info.version = self.config.version self.info = info # update info with user specified infos if features is not None: self.info.features = features # Prepare data dirs: # cache_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing) self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE) self._cache_dir_root = ( self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root) ) self._cache_downloaded_dir = ( posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR) if cache_dir else str(config.DOWNLOADED_DATASETS_PATH) ) self._cache_downloaded_dir = ( self._cache_downloaded_dir if is_remote_url(self._cache_downloaded_dir) else os.path.expanduser(self._cache_downloaded_dir) ) # In case there exists a legacy cache directory self._legacy_relative_data_dir = None self._cache_dir = self._build_cache_dir() if not is_remote_url(self._cache_dir_root): os.makedirs(self._cache_dir_root, exist_ok=True) lock_path = os.path.join( self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock" ) with FileLock(lock_path): if os.path.exists(self._cache_dir): # check if data exist if len(os.listdir(self._cache_dir)) > 0: if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)): logger.info("Overwrite dataset info from restored data version if exists.") self.info = DatasetInfo.from_directory(self._cache_dir) else: # dir exists but no data, remove the empty dir as data aren't available anymore logger.warning( f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. " ) os.rmdir(self._cache_dir) # Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare self._output_dir = self._cache_dir self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file") # Set download manager self.dl_manager = None # Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value. self._record_infos = False # Set in `.download_and_prepare` once the format of the generated dataset is known self._file_format = None # Enable streaming (e.g. it patches "open" to work with remote files) extend_dataset_builder_for_streaming(self) def __getstate__(self): return self.__dict__ def __setstate__(self, d): self.__dict__ = d # Re-enable streaming, since patched functions are not kept when pickling extend_dataset_builder_for_streaming(self) # Must be set for datasets that use 'data_dir' functionality - the ones # that require users to do additional steps to download the data # (this is usually due to some external regulations / rules). # This field should contain a string with user instructions, including # the list of files that should be present. It will be # displayed in the dataset documentation. def manual_download_instructions(self) -> Optional[str]: return None def _check_legacy_cache(self) -> Optional[str]: """Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13""" if ( self.__module__.startswith("datasets.") and not is_remote_url(self._cache_dir_root) and self.config.name == "default" ): from .packaged_modules import _PACKAGED_DATASETS_MODULES namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name config_id = config_name + self.config_id[len(self.config.name) :] hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1] legacy_relative_data_dir = posixpath.join( self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}", config_id, "0.0.0", hash, ) legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir) if os.path.isdir(legacy_cache_dir): return legacy_relative_data_dir def _check_legacy_cache2(self, dataset_module: "DatasetModule") -> Optional[str]: """Check for the old cache directory template {cache_dir}/{namespace}___{dataset_name}/{config_name}-xxx from 2.14 and 2.15""" if self.__module__.startswith("datasets.") and not is_remote_url(self._cache_dir_root): from .packaged_modules import _PACKAGED_DATASETS_MODULES from .utils._dill import Pickler def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str: """ Used to update hash of packaged modules which is used for creating unique cache directories to reflect different config parameters which are passed in metadata from readme. """ params_to_exclude = {"config_name", "version", "description"} params_to_add_to_hash = { param: value for param, value in sorted(config_parameters.items()) if param not in params_to_exclude } m = Hasher() m.update(hash) m.update(params_to_add_to_hash) return m.hexdigest() namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None with patch.object(Pickler, "_legacy_no_dict_keys_sorting", True): config_id = self.config.name + "-" + Hasher.hash({"data_files": self.config.data_files}) hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1] if ( dataset_module.builder_configs_parameters.metadata_configs and self.config.name in dataset_module.builder_configs_parameters.metadata_configs ): hash = update_hash_with_config_parameters( hash, dataset_module.builder_configs_parameters.metadata_configs[self.config.name] ) legacy_relative_data_dir = posixpath.join( self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}", config_id, "0.0.0", hash, ) legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir) if os.path.isdir(legacy_cache_dir): return legacy_relative_data_dir def get_all_exported_dataset_infos(cls) -> DatasetInfosDict: """Empty dict if doesn't exist Example: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder('rotten_tomatoes') >>> ds_builder.get_all_exported_dataset_infos() {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)} ``` """ return DatasetInfosDict.from_directory(cls.get_imported_module_dir()) def get_exported_dataset_info(self) -> DatasetInfo: """Empty `DatasetInfo` if doesn't exist Example: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder('rotten_tomatoes') >>> ds_builder.get_exported_dataset_info() DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231) ``` """ return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo()) def _create_builder_config( self, config_name=None, custom_features=None, **config_kwargs ) -> Tuple[BuilderConfig, str]: """Create and validate BuilderConfig object as well as a unique config id for this config. Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None. config_kwargs override the defaults kwargs in config """ builder_config = None # try default config if config_name is None and self.BUILDER_CONFIGS: if self.DEFAULT_CONFIG_NAME is not None: builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME) logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}") else: if len(self.BUILDER_CONFIGS) > 1: if not config_kwargs: example_of_usage = f"load_dataset('{self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')" raise ValueError( "Config name is missing." f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}" + f"\nExample of usage:\n\t`{example_of_usage}`" ) else: builder_config = self.BUILDER_CONFIGS[0] logger.info( f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}" ) # try to get config by name if isinstance(config_name, str): builder_config = self.builder_configs.get(config_name) if builder_config is None and self.BUILDER_CONFIGS: raise ValueError( f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}" ) # if not using an existing config, then create a new config on the fly if not builder_config: if config_name is not None: config_kwargs["name"] = config_name elif self.DEFAULT_CONFIG_NAME and not config_kwargs: # Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed config_kwargs["name"] = self.DEFAULT_CONFIG_NAME if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION: config_kwargs["version"] = self.VERSION builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs) # otherwise use the config_kwargs to overwrite the attributes else: builder_config = copy.deepcopy(builder_config) if config_kwargs else builder_config for key, value in config_kwargs.items(): if value is not None: if not hasattr(builder_config, key): raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.") setattr(builder_config, key, value) if not builder_config.name: raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}") # resolve data files if needed builder_config._resolve_data_files( base_path=self.base_path, download_config=DownloadConfig(token=self.token, storage_options=self.storage_options), ) # compute the config id that is going to be used for caching config_id = builder_config.create_config_id( config_kwargs, custom_features=custom_features, ) is_custom = (config_id not in self.builder_configs) and config_id != "default" if is_custom: logger.info(f"Using custom data configuration {config_id}") else: if ( builder_config.name in self.builder_configs and builder_config != self.builder_configs[builder_config.name] ): raise ValueError( "Cannot name a custom BuilderConfig the same as an available " f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}" ) if not builder_config.version: raise ValueError(f"BuilderConfig {builder_config.name} must have a version") return builder_config, config_id def builder_configs(cls) -> Dict[str, BuilderConfig]: """Dictionary of pre-defined configurations for this builder class.""" configs = {config.name: config for config in cls.BUILDER_CONFIGS} if len(configs) != len(cls.BUILDER_CONFIGS): names = [config.name for config in cls.BUILDER_CONFIGS] raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}") return configs def cache_dir(self): return self._cache_dir def _use_legacy_cache_dir_if_possible(self, dataset_module: "DatasetModule"): # Check for the legacy cache directory template (datasets<3.0.0) self._legacy_relative_data_dir = ( self._check_legacy_cache2(dataset_module) or self._check_legacy_cache() or None ) self._cache_dir = self._build_cache_dir() self._output_dir = self._cache_dir def _relative_data_dir(self, with_version=True, with_hash=True) -> str: """Relative path of this dataset in cache_dir: Will be: self.dataset_name/self.config.version/self.hash/ or if a repo_id with a namespace has been specified: self.namespace___self.dataset_name/self.config.version/self.hash/ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped. """ if self._legacy_relative_data_dir is not None and with_version and with_hash: return self._legacy_relative_data_dir namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None builder_data_dir = self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}" builder_data_dir = posixpath.join(builder_data_dir, self.config_id) if with_version: builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version)) if with_hash and self.hash and isinstance(self.hash, str): builder_data_dir = posixpath.join(builder_data_dir, self.hash) return builder_data_dir def _build_cache_dir(self): """Return the data directory for the current version.""" builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False)) version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True)) def _other_versions_on_disk(): """Returns previous versions on disk.""" if not os.path.exists(builder_data_dir): return [] version_dirnames = [] for dir_name in os.listdir(builder_data_dir): try: version_dirnames.append((utils.Version(dir_name), dir_name)) except ValueError: # Invalid version (ex: incomplete data dir) pass version_dirnames.sort(reverse=True) return version_dirnames # Check and warn if other versions exist if not is_remote_url(builder_data_dir): version_dirs = _other_versions_on_disk() if version_dirs: other_version = version_dirs[0][0] if other_version != self.config.version: warn_msg = ( f"Found a different version {str(other_version)} of dataset {self.dataset_name} in " f"cache_dir {self._cache_dir_root}. Using currently defined version " f"{str(self.config.version)}." ) logger.warning(warn_msg) return version_data_dir def _info(self) -> DatasetInfo: """Construct the DatasetInfo object. See `DatasetInfo` for details. Warning: This function is only called once and the result is cached for all following .info() calls. Returns: info: (DatasetInfo) The dataset information """ raise NotImplementedError def get_imported_module_dir(cls): """Return the path of the module of this class or subclass.""" return os.path.dirname(inspect.getfile(inspect.getmodule(cls))) def _rename(self, src: str, dst: str): rename(self._fs, src, dst) def download_and_prepare( self, output_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, verification_mode: Optional[Union[VerificationMode, str]] = None, ignore_verifications="deprecated", try_from_hf_gcs="deprecated", dl_manager: Optional[DownloadManager] = None, base_path: Optional[str] = None, use_auth_token="deprecated", file_format: str = "arrow", max_shard_size: Optional[Union[int, str]] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **download_and_prepare_kwargs, ): """Downloads and prepares dataset for reading. Args: output_dir (`str`, *optional*): Output directory for the dataset. Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default. <Added version="2.5.0"/> download_config (`DownloadConfig`, *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, *optional*): Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`. verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). <Added version="2.9.1"/> ignore_verifications (`bool`, defaults to `False`): Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...). <Deprecated version="2.9.1"> `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0. Please use `verification_mode` instead. </Deprecated> try_from_hf_gcs (`bool`): If `True`, it will try to download the already prepared dataset from the HF Google cloud storage. <Deprecated version="2.16.0"> `try_from_hf_gcs` was deprecated in version 2.16.0 and will be removed in 3.0.0. Host the processed files on the Hugging Face Hub instead. </Deprecated> dl_manager (`DownloadManager`, *optional*): Specific `DownloadManger` to use. base_path (`str`, *optional*): Base path for relative paths that are used to download files. This can be a remote url. If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead. use_auth_token (`Union[str, bool]`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If True, or not specified, will get token from ~/.huggingface. <Deprecated version="2.7.1"> Pass `use_auth_token` to `load_dataset_builder` instead. </Deprecated> file_format (`str`, *optional*): Format of the data files in which the dataset will be written. Supported formats: "arrow", "parquet". Default to "arrow" format. If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files. <Added version="2.5.0"/> max_shard_size (`Union[str, int]`, *optional*): Maximum number of bytes written per shard, default is "500MB". The size is based on uncompressed data size, so in practice your shard files may be smaller than `max_shard_size` thanks to Parquet compression for example. <Added version="2.5.0"/> num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.7.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the caching file-system backend, if any. <Added version="2.5.0"/> **download_and_prepare_kwargs (additional keyword arguments): Keyword arguments. Example: Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`: ```py >>> from datasets import load_dataset_builder >>> builder = load_dataset_builder("rotten_tomatoes") >>> builder.download_and_prepare() ``` Download and prepare the dataset as sharded Parquet files locally: ```py >>> from datasets import load_dataset_builder >>> builder = load_dataset_builder("rotten_tomatoes") >>> builder.download_and_prepare("./output_dir", file_format="parquet") ``` Download and prepare the dataset as sharded Parquet files in a cloud storage: ```py >>> from datasets import load_dataset_builder >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} >>> builder = load_dataset_builder("rotten_tomatoes") >>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet") ``` """ if ignore_verifications != "deprecated": verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS warnings.warn( "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.", FutureWarning, ) if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass `token` to `load_dataset_builder` instead.", FutureWarning, ) token = use_auth_token else: token = self.token if try_from_hf_gcs != "deprecated": warnings.warn( "'try_from_hf_gcs' was deprecated in version 2.16.0 and will be removed in 3.0.0.", FutureWarning, ) else: try_from_hf_gcs = False output_dir = output_dir if output_dir is not None else self._cache_dir # output_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing) fs, output_dir = url_to_fs(output_dir, **(storage_options or {})) self._fs = fs self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir) download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS) base_path = base_path if base_path is not None else self.base_path if file_format is not None and file_format not in ["arrow", "parquet"]: raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'") self._file_format = file_format if self._fs._strip_protocol(self._output_dir) == "": # We don't support the root directory, because it has no dirname, # and we need a dirname to use a <dirname>.incomplete directory # when the dataset is being written raise RuntimeError( f"Unable to download and prepare the dataset at the root {self._output_dir}. " f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'" ) if dl_manager is None: if download_config is None: download_config = DownloadConfig( cache_dir=self._cache_downloaded_dir, force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD, force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD, use_etag=False, num_proc=num_proc, token=token, storage_options=self.storage_options, ) # We don't use etag for data files to speed up the process dl_manager = DownloadManager( dataset_name=self.dataset_name, download_config=download_config, data_dir=self.config.data_dir, base_path=base_path, record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS), ) is_local = not is_remote_filesystem(self._fs) if ( isinstance(dl_manager, MockDownloadManager) or not is_local or file_format != "arrow" or max_shard_size is not None ): try_from_hf_gcs = False self.dl_manager = dl_manager # Prevent parallel local disk operations if is_local: # Create parent directory of the output_dir to put the lock file in there Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True) lock_path = self._output_dir + "_builder.lock" # File locking only with local paths; no file locking on GCS or S3 with FileLock(lock_path) if is_local else contextlib.nullcontext(): # Check if the data already exists data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME)) if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS: logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})") # We need to update the info in case some splits were added in the meantime # for example when calling load_dataset from multiple workers. self.info = self._load_info() self.download_post_processing_resources(dl_manager) return logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})") if is_local: # if cache dir is local, check for available space if not has_sufficient_disk_space( self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent ): raise OSError( f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})" ) def incomplete_dir(dirname): """Create temporary dir for dirname and rename on exit.""" if not is_local: self._fs.makedirs(dirname, exist_ok=True) yield dirname else: tmp_dir = dirname + ".incomplete" os.makedirs(tmp_dir, exist_ok=True) try: yield tmp_dir if os.path.isdir(dirname): shutil.rmtree(dirname) # LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory shutil.move(tmp_dir, dirname) finally: if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) # Print is intentional: we want this to always go to stdout so user has # information needed to cancel download/preparation if needed. # This comes right before the progress bar. if self.info.size_in_bytes: logger.info( f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} " f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, " f"post-processed: {size_str(self.info.post_processing_size)}, " f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..." ) else: _dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...") self._check_manual_download(dl_manager) # Create a tmp dir and rename to self._output_dir on successful exit. with incomplete_dir(self._output_dir) as tmp_output_dir: # Temporarily assign _output_dir to tmp_data_dir to avoid having to forward # it to every sub function. with temporary_assignment(self, "_output_dir", tmp_output_dir): # Try to download the already prepared dataset files downloaded_from_gcs = False if try_from_hf_gcs: try: self._download_prepared_from_hf_gcs(dl_manager.download_config) downloaded_from_gcs = True except (DatasetNotOnHfGcsError, MissingFilesOnHfGcsError): logger.info("Dataset not on Hf google storage. Downloading and preparing it from source") except ConnectionError: logger.warning("HF google storage unreachable. Downloading and preparing it from source") if not downloaded_from_gcs: prepare_split_kwargs = {"file_format": file_format} if max_shard_size is not None: prepare_split_kwargs["max_shard_size"] = max_shard_size if num_proc is not None: prepare_split_kwargs["num_proc"] = num_proc self._download_and_prepare( dl_manager=dl_manager, verification_mode=verification_mode, **prepare_split_kwargs, **download_and_prepare_kwargs, ) # Sync info self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) self.info.download_checksums = dl_manager.get_recorded_sizes_checksums() self.info.size_in_bytes = self.info.dataset_size + self.info.download_size # Save info self._save_info() # Download post processing resources self.download_post_processing_resources(dl_manager) logger.info( f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. " f"Subsequent calls will reuse this data." ) def _check_manual_download(self, dl_manager): if self.manual_download_instructions is not None and dl_manager.manual_dir is None: raise ManualDownloadError( textwrap.dedent( f"""\ The dataset {self.dataset_name} with config {self.config.name} requires manual data. Please follow the manual download instructions: {self.manual_download_instructions} Manual data can be loaded with: datasets.load_dataset("{self.dataset_name}", data_dir="<path/to/manual/data>")""" ) ) def _download_prepared_from_hf_gcs(self, download_config: DownloadConfig): relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False) reader = ArrowReader(self._output_dir, self.info) # use reader instructions to download the right files reader.download_from_hf_gcs(download_config, relative_data_dir) downloaded_info = DatasetInfo.from_directory(self._output_dir) self.info.update(downloaded_info) # download post processing resources remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/") for split in self.info.splits: for resource_file_name in self._post_processing_resources(split).values(): if os.sep in resource_file_name: raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}") try: resource_path = cached_path(remote_cache_dir + "/" + resource_file_name) shutil.move(resource_path, os.path.join(self._output_dir, resource_file_name)) except ConnectionError: logger.info(f"Couldn't download resourse file {resource_file_name} from Hf google storage.") logger.info("Dataset downloaded from Hf google storage.") def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs): """Downloads and prepares dataset for reading. This is the internal implementation to overwrite called when user calls `download_and_prepare`. It should download all required data and generate the pre-processed datasets files. Args: dl_manager ([`DownloadManager`]): `DownloadManager` used to download and cache data. verification_mode ([`VerificationMode`]): if `ALL_CHECKS`, perform all the verifications including checksums. if `BASIC_CHECKS`, do not perform checksums, only perform split tests. if `NO_CHECKS`, do not perform any verification. prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size` """ # Generating data for all splits split_dict = SplitDict(dataset_name=self.dataset_name) split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) split_generators = self._split_generators(dl_manager, **split_generators_kwargs) # Checksums verification if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums: verify_checksums( self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files" ) # Build splits for split_generator in split_generators: if str(split_generator.split_info.name).lower() == "all": raise ValueError( "`all` is a special split keyword corresponding to the " "union of all splits, so cannot be used as key in " "._split_generator()." ) logger.info(f"Generating {split_generator.split_info.name} split") split_dict.add(split_generator.split_info) try: # Prepare split will record examples associated to the split self._prepare_split(split_generator, **prepare_split_kwargs) except OSError as e: raise OSError( "Cannot find data file. " + (self.manual_download_instructions or "") + "\nOriginal error:\n" + str(e) ) from None # If check_duplicates is set to True , then except DuplicatedKeysError except DuplicatedKeysError as e: raise DuplicatedKeysError( e.key, e.duplicate_key_indices, fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py", ) from None dl_manager.manage_extracted_files() if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS: verify_splits(self.info.splits, split_dict) # Update the info object with the splits. self.info.splits = split_dict self.info.download_size = dl_manager.downloaded_size def download_post_processing_resources(self, dl_manager): for split in self.info.splits or []: for resource_name, resource_file_name in self._post_processing_resources(split).items(): if not not is_remote_filesystem(self._fs): raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}") if os.sep in resource_file_name: raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}") resource_path = os.path.join(self._output_dir, resource_file_name) if not os.path.exists(resource_path): downloaded_resource_path = self._download_post_processing_resources( split, resource_name, dl_manager ) if downloaded_resource_path: logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}") shutil.move(downloaded_resource_path, resource_path) def _load_info(self) -> DatasetInfo: return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options) def _save_info(self): file_lock = ( FileLock(self._output_dir + "_info.lock") if not is_remote_filesystem(self._fs) else contextlib.nullcontext() ) with file_lock: self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options) def _save_infos(self): file_lock = ( FileLock(self._output_dir + "_infos.lock") if not is_remote_filesystem(self._fs) else contextlib.nullcontext() ) with file_lock: DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir()) def _make_split_generators_kwargs(self, prepare_split_kwargs): """Get kwargs for `self._split_generators()` from `prepare_split_kwargs`.""" del prepare_split_kwargs return {} def as_dataset( self, split: Optional[Split] = None, run_post_process=True, verification_mode: Optional[Union[VerificationMode, str]] = None, ignore_verifications="deprecated", in_memory=False, ) -> Union[Dataset, DatasetDict]: """Return a Dataset for the specified split. Args: split (`datasets.Split`): Which subset of the data to return. run_post_process (`bool`, defaults to `True`): Whether to run post-processing dataset transforms and/or add indexes. verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). <Added version="2.9.1"/> ignore_verifications (`bool`, defaults to `False`): Whether to ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...). <Deprecated version="2.9.1"> `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0. Please use `verification_mode` instead. </Deprecated> in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. Returns: datasets.Dataset Example: ```py >>> from datasets import load_dataset_builder >>> builder = load_dataset_builder('rotten_tomatoes') >>> builder.download_and_prepare() >>> ds = builder.as_dataset(split='train') >>> ds Dataset({ features: ['text', 'label'], num_rows: 8530 }) ``` """ if ignore_verifications != "deprecated": verification_mode = verification_mode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS warnings.warn( "'ignore_verifications' was deprecated in favor of 'verification' in version 2.9.1 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.", FutureWarning, ) if self._file_format is not None and self._file_format != "arrow": raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.') if is_remote_filesystem(self._fs): raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.") if not os.path.exists(self._output_dir): raise FileNotFoundError( f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call " "builder.download_and_prepare(), or use " "datasets.load_dataset() before trying to access the Dataset object." ) logger.debug(f'Constructing Dataset for split {split or ", ".join(self.info.splits)}, from {self._output_dir}') # By default, return all splits if split is None: split = {s: s for s in self.info.splits} verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS) # Create a dataset for each of the given splits datasets = map_nested( partial( self._build_single_dataset, run_post_process=run_post_process, verification_mode=verification_mode, in_memory=in_memory, ), split, map_tuple=True, disable_tqdm=True, ) if isinstance(datasets, dict): datasets = DatasetDict(datasets) return datasets def _build_single_dataset( self, split: Union[str, ReadInstruction, Split], run_post_process: bool, verification_mode: VerificationMode, in_memory: bool = False, ): """as_dataset for a single split.""" if not isinstance(split, ReadInstruction): split = str(split) if split == "all": split = "+".join(self.info.splits.keys()) split = Split(split) # Build base dataset ds = self._as_dataset( split=split, in_memory=in_memory, ) if run_post_process: for resource_file_name in self._post_processing_resources(split).values(): if os.sep in resource_file_name: raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}") resources_paths = { resource_name: os.path.join(self._output_dir, resource_file_name) for resource_name, resource_file_name in self._post_processing_resources(split).items() } post_processed = self._post_process(ds, resources_paths) if post_processed is not None: ds = post_processed recorded_checksums = {} record_checksums = False for resource_name, resource_path in resources_paths.items(): size_checksum = get_size_checksum_dict(resource_path) recorded_checksums[resource_name] = size_checksum if verification_mode == VerificationMode.ALL_CHECKS and record_checksums: if self.info.post_processed is None or self.info.post_processed.resources_checksums is None: expected_checksums = None else: expected_checksums = self.info.post_processed.resources_checksums.get(split) verify_checksums(expected_checksums, recorded_checksums, "post processing resources") if self.info.post_processed is None: self.info.post_processed = PostProcessedInfo() if self.info.post_processed.resources_checksums is None: self.info.post_processed.resources_checksums = {} self.info.post_processed.resources_checksums[str(split)] = recorded_checksums self.info.post_processing_size = sum( checksums_dict["num_bytes"] for split_checksums_dicts in self.info.post_processed.resources_checksums.values() for checksums_dict in split_checksums_dicts.values() ) if self.info.dataset_size is not None and self.info.download_size is not None: self.info.size_in_bytes = ( self.info.dataset_size + self.info.download_size + self.info.post_processing_size ) self._save_info() ds._info.post_processed = self.info.post_processed ds._info.post_processing_size = self.info.post_processing_size ds._info.size_in_bytes = self.info.size_in_bytes if self.info.post_processed.features is not None: if self.info.post_processed.features.type != ds.features.type: raise ValueError( f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}" ) else: ds.info.features = self.info.post_processed.features return ds def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset: """Constructs a `Dataset`. This is the internal implementation to overwrite called when user calls `as_dataset`. It should read the pre-processed datasets files and generate the `Dataset` object. Args: split (`datasets.Split`): which subset of the data to read. in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. Returns: `Dataset` """ cache_dir = self._fs._strip_protocol(self._output_dir) dataset_name = self.dataset_name if self._check_legacy_cache(): dataset_name = self.name dataset_kwargs = ArrowReader(cache_dir, self.info).read( name=dataset_name, instructions=split, split_infos=self.info.splits.values(), in_memory=in_memory, ) fingerprint = self._get_dataset_fingerprint(split) return Dataset(fingerprint=fingerprint, **dataset_kwargs) def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str: """The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs.""" hasher = Hasher() hasher.update(Path(self._relative_data_dir()).as_posix()) hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder) fingerprint = hasher.hexdigest() return fingerprint def as_streaming_dataset( self, split: Optional[str] = None, base_path: Optional[str] = None, ) -> Union[Dict[str, IterableDataset], IterableDataset]: if is_remote_filesystem(self._fs): raise NotImplementedError( f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet." ) dl_manager = StreamingDownloadManager( base_path=base_path or self.base_path, download_config=DownloadConfig(token=self.token, storage_options=self.storage_options), dataset_name=self.dataset_name, data_dir=self.config.data_dir, ) self._check_manual_download(dl_manager) splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)} # By default, return all splits if split is None: splits_generator = splits_generators elif split in splits_generators: splits_generator = splits_generators[split] else: raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}") # Create a dataset for each of the given splits datasets = map_nested( self._as_streaming_dataset_single, splits_generator, map_tuple=True, ) if isinstance(datasets, dict): datasets = IterableDatasetDict(datasets) return datasets def _as_streaming_dataset_single( self, splits_generator, ) -> IterableDataset: ex_iterable = self._get_examples_iterable_for_split(splits_generator) # add auth to be able to access and decode audio/image files from private repositories. token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {} return IterableDataset( ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id ) def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]: """Run dataset transforms or add indexes""" return None def _post_processing_resources(self, split: str) -> Dict[str, str]: """Mapping resource_name -> resource_file_name""" return {} def _download_post_processing_resources( self, split: str, resource_name: str, dl_manager: DownloadManager ) -> Optional[str]: """Download the resource using the download manager and return the downloaded path.""" return None def _split_generators(self, dl_manager: DownloadManager): """Specify feature dictionary generators and dataset splits. This function returns a list of `SplitGenerator`s defining how to generate data and what splits to use. Example: return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={'file': 'train_data.zip'}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={'file': 'test_data.zip'}, ), ] The above code will first call `_generate_examples(file='train_data.zip')` to write the train data, then `_generate_examples(file='test_data.zip')` to write the test data. Datasets are typically split into different subsets to be used at various stages of training and evaluation. Note that for datasets without a `VALIDATION` split, you can use a fraction of the `TRAIN` data for evaluation as you iterate on your model so as not to overfit to the `TEST` data. For downloads and extractions, use the given `download_manager`. Note that the `DownloadManager` caches downloads, so it is fine to have each generator attempt to download the source data. A good practice is to download all data in this function, and then distribute the relevant parts to each split with the `gen_kwargs` argument Args: dl_manager (`DownloadManager`): Download manager to download the data Returns: `list<SplitGenerator>`. """ raise NotImplementedError() def _prepare_split( self, split_generator: SplitGenerator, file_format: str = "arrow", max_shard_size: Optional[Union[str, int]] = None, num_proc: Optional[int] = None, **kwargs, ): """Generate the examples and record them on disk. Args: split_generator (`SplitGenerator`): Split generator to process file_format (`str`, *optional*): format of the data files in which the dataset will be written. Supported formats: "arrow", "parquet". Default to "arrow" format. max_shard_size (`Union[str, int]`, *optional*): Maximum number of bytes written per shard, default is "500MB". The size is based on uncompressed data size, so in practice your shard files may be smaller than `max_shard_size` thanks to Parquet compression for example. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.7.0"/> **kwargs: Additional kwargs forwarded from _download_and_prepare (ex: beam pipeline) """ raise NotImplementedError() def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: """Generate the examples on the fly. Args: split_generator (`SplitGenerator`): Split generator to process """ raise NotImplementedError() The provided code snippet includes necessary dependencies for implementing the `extend_dataset_builder_for_streaming` function. Write a Python function `def extend_dataset_builder_for_streaming(builder: "DatasetBuilder")` to solve the following problem: Extend the dataset builder module and the modules imported by it to support streaming. Args: builder (:class:`DatasetBuilder`): Dataset builder instance. Here is the function: def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"): """Extend the dataset builder module and the modules imported by it to support streaming. Args: builder (:class:`DatasetBuilder`): Dataset builder instance. """ # this extends the open and os.path.join functions for data streaming download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token) extend_module_for_streaming(builder.__module__, download_config=download_config) # if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils) if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv importable_file = inspect.getfile(builder.__class__) with lock_importable_file(importable_file): for imports in get_imports(importable_file): if imports[0] == "internal": internal_import_name = imports[1] internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name]) extend_module_for_streaming(internal_module_name, download_config=download_config) # builders can inherit from other builders that might use streaming functionality # (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation) # but these parents builders are not patched automatically as they are not instantiated, so we patch them here from .builder import DatasetBuilder parent_builder_modules = [ cls.__module__ for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__ ] # check it's not a standard builder from datasets.builder for module in parent_builder_modules: extend_module_for_streaming(module, download_config=download_config)
Extend the dataset builder module and the modules imported by it to support streaming. Args: builder (:class:`DatasetBuilder`): Dataset builder instance.
17,948
import contextlib import copy import fnmatch import itertools import json import math import os import posixpath import re import shutil import sys import tempfile import time import warnings import weakref from collections import Counter from collections.abc import Mapping from copy import deepcopy from functools import partial, wraps from io import BytesIO from math import ceil, floor from pathlib import Path from random import sample from typing import ( TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, overload, ) from typing import Sequence as Sequence_ import fsspec import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc from fsspec.core import url_to_fs from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi from multiprocess import Pool from tqdm.contrib.concurrent import thread_map from . import config from .arrow_reader import ArrowReader from .arrow_writer import ArrowWriter, OptimizedTypedSequence from .data_files import sanitize_patterns from .download.streaming_download_manager import xgetsize from .features import Audio, ClassLabel, Features, Image, Sequence, Value from .features.features import ( FeatureType, _align_features, _check_if_features_can_be_aligned, generate_from_arrow_type, pandas_types_mapper, require_decoding, ) from .filesystems import is_remote_filesystem from .fingerprint import ( fingerprint_transform, format_kwargs_for_fingerprint, format_transform_for_fingerprint, generate_fingerprint, generate_random_fingerprint, get_temporary_cache_files_directory, is_caching_enabled, maybe_register_dataset_for_temp_dir_deletion, update_fingerprint, validate_fingerprint, ) from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table from .formatting.formatting import LazyDict, _is_range_contiguous from .info import DatasetInfo, DatasetInfosDict from .naming import _split_re from .search import IndexableMixin from .splits import NamedSplit, Split, SplitDict, SplitInfo from .table import ( InMemoryTable, MemoryMappedTable, Table, _memory_mapped_record_batch_reader_from_file, cast_array_to_feature, concat_tables, embed_table_storage, list_table_cache_files, table_cast, table_iter, table_visitor, ) from .tasks import TaskTemplate from .utils import logging from .utils import tqdm as hf_tqdm from .utils.deprecation_utils import deprecated from .utils.file_utils import estimate_dataset_size from .utils.hub import list_files_info, preupload_lfs_files from .utils.info_utils import is_small_dataset from .utils.metadata import MetadataConfigs from .utils.py_utils import ( Literal, asdict, convert_file_size_to_int, glob_pattern_to_regex, iflatmap_unordered, string_to_dict, unique_values, ) from .utils.stratify import stratified_shuffle_split_generate_indices from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf from .utils.typing import ListLike, PathLike The provided code snippet includes necessary dependencies for implementing the `transmit_format` function. Write a Python function `def transmit_format(func)` to solve the following problem: Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset Here is the function: def transmit_format(func): """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset""" @wraps(func) def wrapper(*args, **kwargs): if args: self: "Dataset" = args[0] args = args[1:] else: self: "Dataset" = kwargs.pop("self") # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None unformatted_columns = set(self.column_names) - set(self._format_columns or []) self_format = { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self._format_columns, "output_all_columns": self._output_all_columns, } # apply actual function out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] # re-apply format to the output for dataset in datasets: new_format = self_format.copy() if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns) # sort the columns to have a deterministic list of columns that we can compare with `out_format` new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns) out_format = { "type": dataset._format_type, "format_kwargs": dataset._format_kwargs, "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None, "output_all_columns": dataset._output_all_columns, } if out_format != new_format: fingerprint = dataset._fingerprint dataset.set_format(**new_format) dataset._fingerprint = fingerprint return out wrapper._decorator_name_ = "transmit_format" return wrapper
Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset
17,949
import contextlib import copy import fnmatch import itertools import json import math import os import posixpath import re import shutil import sys import tempfile import time import warnings import weakref from collections import Counter from collections.abc import Mapping from copy import deepcopy from functools import partial, wraps from io import BytesIO from math import ceil, floor from pathlib import Path from random import sample from typing import ( TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, overload, ) from typing import Sequence as Sequence_ import fsspec import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc from fsspec.core import url_to_fs from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi from multiprocess import Pool from tqdm.contrib.concurrent import thread_map from . import config from .arrow_reader import ArrowReader from .arrow_writer import ArrowWriter, OptimizedTypedSequence from .data_files import sanitize_patterns from .download.streaming_download_manager import xgetsize from .features import Audio, ClassLabel, Features, Image, Sequence, Value from .features.features import ( FeatureType, _align_features, _check_if_features_can_be_aligned, generate_from_arrow_type, pandas_types_mapper, require_decoding, ) from .filesystems import is_remote_filesystem from .fingerprint import ( fingerprint_transform, format_kwargs_for_fingerprint, format_transform_for_fingerprint, generate_fingerprint, generate_random_fingerprint, get_temporary_cache_files_directory, is_caching_enabled, maybe_register_dataset_for_temp_dir_deletion, update_fingerprint, validate_fingerprint, ) from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table from .formatting.formatting import LazyDict, _is_range_contiguous from .info import DatasetInfo, DatasetInfosDict from .naming import _split_re from .search import IndexableMixin from .splits import NamedSplit, Split, SplitDict, SplitInfo from .table import ( InMemoryTable, MemoryMappedTable, Table, _memory_mapped_record_batch_reader_from_file, cast_array_to_feature, concat_tables, embed_table_storage, list_table_cache_files, table_cast, table_iter, table_visitor, ) from .tasks import TaskTemplate from .utils import logging from .utils import tqdm as hf_tqdm from .utils.deprecation_utils import deprecated from .utils.file_utils import estimate_dataset_size from .utils.hub import list_files_info, preupload_lfs_files from .utils.info_utils import is_small_dataset from .utils.metadata import MetadataConfigs from .utils.py_utils import ( Literal, asdict, convert_file_size_to_int, glob_pattern_to_regex, iflatmap_unordered, string_to_dict, unique_values, ) from .utils.stratify import stratified_shuffle_split_generate_indices from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf from .utils.typing import ListLike, PathLike The provided code snippet includes necessary dependencies for implementing the `transmit_tasks` function. Write a Python function `def transmit_tasks(func)` to solve the following problem: Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset Here is the function: def transmit_tasks(func): """Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset""" @wraps(func) def wrapper(*args, **kwargs): if args: self: "Dataset" = args[0] args = args[1:] else: self: "Dataset" = kwargs.pop("self") # apply actual function out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] for dataset in datasets: # Remove task templates if a column mapping of the template is no longer valid if self.info.task_templates is not None: dataset.info.task_templates = [ template for template in self.info.task_templates if all( dataset._info.features.get(k) == self._info.features.get(k) for k in template.column_mapping.keys() ) ] return out wrapper._decorator_name_ = "transmit_tasks" return wrapper
Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset
17,950
import contextlib import copy import fnmatch import itertools import json import math import os import posixpath import re import shutil import sys import tempfile import time import warnings import weakref from collections import Counter from collections.abc import Mapping from copy import deepcopy from functools import partial, wraps from io import BytesIO from math import ceil, floor from pathlib import Path from random import sample from typing import ( TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, overload, ) from typing import Sequence as Sequence_ import fsspec import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc from fsspec.core import url_to_fs from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi from multiprocess import Pool from tqdm.contrib.concurrent import thread_map from . import config from .arrow_reader import ArrowReader from .arrow_writer import ArrowWriter, OptimizedTypedSequence from .data_files import sanitize_patterns from .download.streaming_download_manager import xgetsize from .features import Audio, ClassLabel, Features, Image, Sequence, Value from .features.features import ( FeatureType, _align_features, _check_if_features_can_be_aligned, generate_from_arrow_type, pandas_types_mapper, require_decoding, ) from .filesystems import is_remote_filesystem from .fingerprint import ( fingerprint_transform, format_kwargs_for_fingerprint, format_transform_for_fingerprint, generate_fingerprint, generate_random_fingerprint, get_temporary_cache_files_directory, is_caching_enabled, maybe_register_dataset_for_temp_dir_deletion, update_fingerprint, validate_fingerprint, ) from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table from .formatting.formatting import LazyDict, _is_range_contiguous from .info import DatasetInfo, DatasetInfosDict from .naming import _split_re from .search import IndexableMixin from .splits import NamedSplit, Split, SplitDict, SplitInfo from .table import ( InMemoryTable, MemoryMappedTable, Table, _memory_mapped_record_batch_reader_from_file, cast_array_to_feature, concat_tables, embed_table_storage, list_table_cache_files, table_cast, table_iter, table_visitor, ) from .tasks import TaskTemplate from .utils import logging from .utils import tqdm as hf_tqdm from .utils.deprecation_utils import deprecated from .utils.file_utils import estimate_dataset_size from .utils.hub import list_files_info, preupload_lfs_files from .utils.info_utils import is_small_dataset from .utils.metadata import MetadataConfigs from .utils.py_utils import ( Literal, asdict, convert_file_size_to_int, glob_pattern_to_regex, iflatmap_unordered, string_to_dict, unique_values, ) from .utils.stratify import stratified_shuffle_split_generate_indices from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf from .utils.typing import ListLike, PathLike class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() class InMemoryTable(TableBlock): """ The table is said in-memory when it is loaded into the user's RAM. Pickling it does copy all the data using memory. Its implementation is simple and uses the underlying pyarrow Table methods directly. This is different from the `MemoryMapped` table, for which pickling doesn't copy all the data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk. `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ def from_file(cls, filename: str): table = _in_memory_arrow_table_from_file(filename) return cls(table) def from_buffer(cls, buffer: pa.Buffer): table = _in_memory_arrow_table_from_buffer(buffer) return cls(table) def from_pandas(cls, *args, **kwargs): """ Convert pandas.DataFrame to an Arrow Table. The column types in the resulting Arrow Table are inferred from the dtypes of the pandas.Series in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains `None/nan` objects, the type is set to null. This behavior can be avoided by constructing an explicit schema and passing it to this function. Args: df (`pandas.DataFrame`): schema (`pyarrow.Schema`, *optional*): The expected schema of the Arrow Table. This can be used to indicate the type of columns if we cannot infer it automatically. If passed, the output will have exactly this schema. Columns specified in the schema that are not found in the DataFrame columns or its index will raise an error. Additional columns or index levels in the DataFrame which are not specified in the schema will be ignored. preserve_index (`bool`, *optional*): Whether to store the index as an additional column in the resulting `Table`. The default of None will store the index as a column, except for RangeIndex which is stored as metadata only. Use `preserve_index=True` to force it to be stored as a column. nthreads (`int`, defaults to `None` (may use up to system CPU count threads)) If greater than 1, convert columns to Arrow in parallel using indicated number of threads. columns (`List[str]`, *optional*): List of column to be converted. If `None`, use all columns. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions, Returns: `datasets.table.Table`: Examples: ```python >>> import pandas as pd >>> import pyarrow as pa >>> df = pd.DataFrame({ ... 'int': [1, 2], ... 'str': ['a', 'b'] ... }) >>> pa.Table.from_pandas(df) <pyarrow.lib.Table object at 0x7f05d1fb1b40> ``` """ return cls(pa.Table.from_pandas(*args, **kwargs)) def from_arrays(cls, *args, **kwargs): """ Construct a Table from Arrow arrays. Args: arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`): Equal-length arrays that should form the table. names (`List[str]`, *optional*): Names for the table columns. If not passed, schema must be passed. schema (`Schema`, defaults to `None`): Schema for the created table. If not passed, names must be passed. metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_arrays(*args, **kwargs)) def from_pydict(cls, *args, **kwargs): """ Construct a Table from Arrow arrays or columns. Args: mapping (`Union[dict, Mapping]`): A mapping of strings to Arrays or Python lists. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the Mapping values metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_pydict(*args, **kwargs)) def from_pylist(cls, mapping, *args, **kwargs): """ Construct a Table from list of rows / dictionaries. Args: mapping (`List[dict]`): A mapping of strings to row values. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the Mapping values metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_pylist(mapping, *args, **kwargs)) def from_batches(cls, *args, **kwargs): """ Construct a Table from a sequence or iterator of Arrow `RecordBatches`. Args: batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`): Sequence of `RecordBatch` to be converted, all schemas must be equal. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the first `RecordBatch`. Returns: `datasets.table.Table`: """ return cls(pa.Table.from_batches(*args, **kwargs)) def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ # Use fast slicing here return InMemoryTable(self.fast_slice(offset=offset, length=length)) def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ return InMemoryTable(self.table.filter(*args, **kwargs)) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ return InMemoryTable(table_flatten(self.table, *args, **kwargs)) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ return InMemoryTable(self.table.combine_chunks(*args, **kwargs)) def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ return InMemoryTable(table_cast(self.table, *args, **kwargs)) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be `None`, which deletes any existing metadata). Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs)) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ return InMemoryTable(self.table.add_column(*args, **kwargs)) def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ return InMemoryTable(self.table.append_column(*args, **kwargs)) def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ return InMemoryTable(self.table.remove_column(*args, **kwargs)) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ return InMemoryTable(self.table.set_column(*args, **kwargs)) def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ return InMemoryTable(self.table.rename_columns(*args, **kwargs)) def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ return InMemoryTable(self.table.drop(*args, **kwargs)) def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ return InMemoryTable(self.table.select(*args, **kwargs)) The provided code snippet includes necessary dependencies for implementing the `_check_table` function. Write a Python function `def _check_table(table) -> Table` to solve the following problem: We check the table type to make sure it's an instance of :class:`datasets.table.Table` Here is the function: def _check_table(table) -> Table: """We check the table type to make sure it's an instance of :class:`datasets.table.Table`""" if isinstance(table, pa.Table): # for a pyarrow table, we can just consider it as a in-memory table # this is here for backward compatibility return InMemoryTable(table) elif isinstance(table, Table): return table else: raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.")
We check the table type to make sure it's an instance of :class:`datasets.table.Table`
17,951
import contextlib import copy import fnmatch import itertools import json import math import os import posixpath import re import shutil import sys import tempfile import time import warnings import weakref from collections import Counter from collections.abc import Mapping from copy import deepcopy from functools import partial, wraps from io import BytesIO from math import ceil, floor from pathlib import Path from random import sample from typing import ( TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, overload, ) from typing import Sequence as Sequence_ import fsspec import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc from fsspec.core import url_to_fs from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi from multiprocess import Pool from tqdm.contrib.concurrent import thread_map from . import config from .arrow_reader import ArrowReader from .arrow_writer import ArrowWriter, OptimizedTypedSequence from .data_files import sanitize_patterns from .download.streaming_download_manager import xgetsize from .features import Audio, ClassLabel, Features, Image, Sequence, Value from .features.features import ( FeatureType, _align_features, _check_if_features_can_be_aligned, generate_from_arrow_type, pandas_types_mapper, require_decoding, ) from .filesystems import is_remote_filesystem from .fingerprint import ( fingerprint_transform, format_kwargs_for_fingerprint, format_transform_for_fingerprint, generate_fingerprint, generate_random_fingerprint, get_temporary_cache_files_directory, is_caching_enabled, maybe_register_dataset_for_temp_dir_deletion, update_fingerprint, validate_fingerprint, ) from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table from .formatting.formatting import LazyDict, _is_range_contiguous from .info import DatasetInfo, DatasetInfosDict from .naming import _split_re from .search import IndexableMixin from .splits import NamedSplit, Split, SplitDict, SplitInfo from .table import ( InMemoryTable, MemoryMappedTable, Table, _memory_mapped_record_batch_reader_from_file, cast_array_to_feature, concat_tables, embed_table_storage, list_table_cache_files, table_cast, table_iter, table_visitor, ) from .tasks import TaskTemplate from .utils import logging from .utils import tqdm as hf_tqdm from .utils.deprecation_utils import deprecated from .utils.file_utils import estimate_dataset_size from .utils.hub import list_files_info, preupload_lfs_files from .utils.info_utils import is_small_dataset from .utils.metadata import MetadataConfigs from .utils.py_utils import ( Literal, asdict, convert_file_size_to_int, glob_pattern_to_regex, iflatmap_unordered, string_to_dict, unique_values, ) from .utils.stratify import stratified_shuffle_split_generate_indices from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf from .utils.typing import ListLike, PathLike def _check_valid_indices_value(index, size): if (index < 0 and index + size < 0) or (index >= size): raise IndexError(f"Index {index} out of range for dataset of size {size}.")
null
17,952
import contextlib import copy import fnmatch import itertools import json import math import os import posixpath import re import shutil import sys import tempfile import time import warnings import weakref from collections import Counter from collections.abc import Mapping from copy import deepcopy from functools import partial, wraps from io import BytesIO from math import ceil, floor from pathlib import Path from random import sample from typing import ( TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, overload, ) from typing import Sequence as Sequence_ import fsspec import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc from fsspec.core import url_to_fs from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi from multiprocess import Pool from tqdm.contrib.concurrent import thread_map from . import config from .arrow_reader import ArrowReader from .arrow_writer import ArrowWriter, OptimizedTypedSequence from .data_files import sanitize_patterns from .download.streaming_download_manager import xgetsize from .features import Audio, ClassLabel, Features, Image, Sequence, Value from .features.features import ( FeatureType, _align_features, _check_if_features_can_be_aligned, generate_from_arrow_type, pandas_types_mapper, require_decoding, ) from .filesystems import is_remote_filesystem from .fingerprint import ( fingerprint_transform, format_kwargs_for_fingerprint, format_transform_for_fingerprint, generate_fingerprint, generate_random_fingerprint, get_temporary_cache_files_directory, is_caching_enabled, maybe_register_dataset_for_temp_dir_deletion, update_fingerprint, validate_fingerprint, ) from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table from .formatting.formatting import LazyDict, _is_range_contiguous from .info import DatasetInfo, DatasetInfosDict from .naming import _split_re from .search import IndexableMixin from .splits import NamedSplit, Split, SplitDict, SplitInfo from .table import ( InMemoryTable, MemoryMappedTable, Table, _memory_mapped_record_batch_reader_from_file, cast_array_to_feature, concat_tables, embed_table_storage, list_table_cache_files, table_cast, table_iter, table_visitor, ) from .tasks import TaskTemplate from .utils import logging from .utils import tqdm as hf_tqdm from .utils.deprecation_utils import deprecated from .utils.file_utils import estimate_dataset_size from .utils.hub import list_files_info, preupload_lfs_files from .utils.info_utils import is_small_dataset from .utils.metadata import MetadataConfigs from .utils.py_utils import ( Literal, asdict, convert_file_size_to_int, glob_pattern_to_regex, iflatmap_unordered, string_to_dict, unique_values, ) from .utils.stratify import stratified_shuffle_split_generate_indices from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf from .utils.typing import ListLike, PathLike class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() def get_indices_from_mask_function( function: Callable, batched: bool, with_indices: bool, with_rank: bool, input_columns: Optional[Union[str, List[str]]], indices_mapping: Optional[Table] = None, *args, **fn_kwargs, ): if batched: # we extract indices and rank from args *inputs, indices, rank = args additional_args = () if with_indices: additional_args += (indices,) if with_rank: additional_args += (rank,) mask = function(*inputs, *additional_args, **fn_kwargs) else: # we get batched data (to do less look-ups) but `function` only accepts one example # therefore we need to call `function` on each example of the batch to get the mask *inputs, indices, rank = args mask = [] if input_columns is None: # inputs only contains a batch of examples batch: dict = inputs[0] num_examples = len(batch[next(iter(batch.keys()))]) for i in range(num_examples): example = {key: batch[key][i] for key in batch} additional_args = () if with_indices: additional_args += (indices[i],) if with_rank: additional_args += (rank,) mask.append(function(example, *additional_args, **fn_kwargs)) else: # inputs is a list of columns columns: List[List] = inputs num_examples = len(columns[0]) for i in range(num_examples): input = [column[i] for column in columns] additional_args = () if with_indices: additional_args += (indices[i],) if with_rank: additional_args += (rank,) mask.append(function(*input, *additional_args, **fn_kwargs)) indices_array = [i for i, to_keep in zip(indices, mask) if to_keep] if indices_mapping is not None: indices_array = pa.array(indices_array, type=pa.uint64()) indices_array = indices_mapping.column(0).take(indices_array) indices_array = indices_array.to_pylist() return {"indices": indices_array}
null
17,953
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs def identity_func(x): return x
null
17,954
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs def _rename_columns_fn(example: Dict, column_mapping: Dict[str, str]): if any(col not in example for col in column_mapping): raise ValueError( f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset." ) if any(col in example for col in column_mapping.values()): raise ValueError( f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset." ) return { new_column_name: example[original_column_name] for original_column_name, new_column_name in column_mapping.items() }
null
17,955
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs def add_column_fn(example: Dict, idx: int, name: str, column: List[Dict]): if name in example: raise ValueError(f"Error when adding {name}: column {name} is already in the dataset.") return {name: column[idx]}
null
17,956
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs def table_cast(table: pa.Table, schema: pa.Schema): def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features] = None) -> Features: pa_table = pa.Table.from_pydict(batch) if try_features is not None: try: pa_table = table_cast(pa_table, pa.schema(try_features.type)) except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError): pass return Features.from_arrow_schema(pa_table.schema)
null
17,957
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]: # we order the columns by order of appearance # to do so, we use a dict as an ordered set cols = {col: None for example in examples for col in example} # when an example is missing a column, we set the value to None with .get() arrays = [[example.get(col) for example in examples] for col in cols] return dict(zip(cols, arrays))
null
17,958
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs The provided code snippet includes necessary dependencies for implementing the `_batch_to_examples` function. Write a Python function `def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]` to solve the following problem: Convert a batch (dict of examples) to examples list Here is the function: def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]: """Convert a batch (dict of examples) to examples list""" n_examples = len(batch[next(iter(batch))]) for i in range(n_examples): yield {col: array[i] for col, array in batch.items()}
Convert a batch (dict of examples) to examples list
17,959
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs Key = Union[int, str] def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: """ Cast numpy/pytorch/tensorflow/pandas objects to python lists. It works recursively. If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object """ return _cast_to_python_objects( obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] The provided code snippet includes necessary dependencies for implementing the `_convert_to_arrow` function. Write a Python function `def _convert_to_arrow( iterable: Iterable[Tuple[Key, dict]], batch_size: int, drop_last_batch: bool = False, ) -> Iterator[Tuple[Key, pa.Table]]` to solve the following problem: Convert and group examples in Arrow tables of size `batch_size`. Args: iterable (`Iterable[Tuple[Key, dict]]`): An examples iterable containing tuples (example_key, example) of type (int/str, dict) batch_size (`Optional[int]`): Size of each sub-table to yield. If None or <= 0, yields the full table. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. Here is the function: def _convert_to_arrow( iterable: Iterable[Tuple[Key, dict]], batch_size: int, drop_last_batch: bool = False, ) -> Iterator[Tuple[Key, pa.Table]]: """Convert and group examples in Arrow tables of size `batch_size`. Args: iterable (`Iterable[Tuple[Key, dict]]`): An examples iterable containing tuples (example_key, example) of type (int/str, dict) batch_size (`Optional[int]`): Size of each sub-table to yield. If None or <= 0, yields the full table. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. """ if batch_size is None or batch_size <= 0: yield ( "all", pa.Table.from_pylist(cast_to_python_objects([example for _, example in iterable], only_1d_for_numpy=True)), ) return iterator = iter(iterable) for key, example in iterator: iterator_batch = islice(iterator, batch_size - 1) key_examples_list = [(key, example)] + list(iterator_batch) if len(key_examples_list) < batch_size and drop_last_batch: return keys, examples = zip(*key_examples_list) new_key = "_".join(str(key) for key in keys) yield new_key, pa.Table.from_pylist(cast_to_python_objects(examples, only_1d_for_numpy=True))
Convert and group examples in Arrow tables of size `batch_size`. Args: iterable (`Iterable[Tuple[Key, dict]]`): An examples iterable containing tuples (example_key, example) of type (int/str, dict) batch_size (`Optional[int]`): Size of each sub-table to yield. If None or <= 0, yields the full table. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`.
17,960
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs Key = Union[int, str] The provided code snippet includes necessary dependencies for implementing the `_batch_arrow_tables` function. Write a Python function `def _batch_arrow_tables( iterable: Iterable[Tuple[Key, pa.Table]], batch_size: Optional[int], drop_last_batch: bool = False, ) -> Iterator[Tuple[Key, pa.Table]]` to solve the following problem: Iterate over sub-tables of size `batch_size`. Args: iterable (`Iterable[Tuple[Key, pa.Table]]`): A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table) batch_size (`Optional[int]`): Size of each sub-table to yield. If None or <= 0, yields the full table. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. Here is the function: def _batch_arrow_tables( iterable: Iterable[Tuple[Key, pa.Table]], batch_size: Optional[int], drop_last_batch: bool = False, ) -> Iterator[Tuple[Key, pa.Table]]: """Iterate over sub-tables of size `batch_size`. Args: iterable (`Iterable[Tuple[Key, pa.Table]]`): A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table) batch_size (`Optional[int]`): Size of each sub-table to yield. If None or <= 0, yields the full table. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. """ if batch_size is None or batch_size <= 0: yield "all", pa.concat_tables([pa_table for _, pa_table in iterable]) return keys_buffer = [] chunks_buffer = [] chunks_buffer_size = 0 for key, pa_table in iterable: for chunk in pa_table.to_reader(max_chunksize=batch_size): if len(chunk) == 0: continue elif chunks_buffer_size + len(chunk) < batch_size: keys_buffer.append(key) chunks_buffer.append(chunk) chunks_buffer_size += len(chunk) continue elif chunks_buffer_size + len(chunk) == batch_size: keys_buffer.append(key) chunks_buffer.append(chunk) new_key = "_".join(str(_key) for _key in keys_buffer) yield new_key, pa.Table.from_batches(chunks_buffer) keys_buffer = [] chunks_buffer = [] chunks_buffer_size = 0 else: cropped_chunk_length = batch_size - chunks_buffer_size keys_buffer.append(f"{key}[:{cropped_chunk_length}]") chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) new_key = "_".join(str(_key) for _key in keys_buffer) yield new_key, pa.Table.from_batches(chunks_buffer) keys_buffer = [f"{key}[{cropped_chunk_length}:]"] chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] chunks_buffer_size = len(chunk) - cropped_chunk_length if not drop_last_batch and chunks_buffer: new_key = "_".join(str(_key) for _key in keys_buffer) yield new_key, pa.Table.from_batches(chunks_buffer)
Iterate over sub-tables of size `batch_size`. Args: iterable (`Iterable[Tuple[Key, pa.Table]]`): A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table) batch_size (`Optional[int]`): Size of each sub-table to yield. If None or <= 0, yields the full table. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`.
17,961
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs def _apply_feature_types_on_example( example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]] ) -> dict: example = dict(example) # add missing columns for column_name in features: if column_name not in example: example[column_name] = None # we encode the example for ClassLabel feature types for example encoded_example = features.encode_example(example) # Decode example for Audio feature, e.g. decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id) return decoded_example
null
17,962
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs def _apply_feature_types_on_batch( batch: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]] ) -> dict: batch = dict(batch) # add missing columns n_examples = len(batch[next(iter(batch))]) for column_name in features: if column_name not in batch: batch[column_name] = [None] * n_examples # we encode the batch for ClassLabel feature types for example encoded_batch = features.encode_batch(batch) # Decode batch for Audio feature, e.g. decoded_batch = features.decode_batch(encoded_batch, token_per_repo_id=token_per_repo_id) return decoded_batch
null
17,963
import copy import itertools import sys import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass from functools import partial from itertools import cycle, islice from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union import fsspec.asyn import numpy as np import pyarrow as pa from . import config from .arrow_dataset import Dataset, DatasetInfoMixin from .features import Features from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter from .info import DatasetInfo from .splits import NamedSplit from .table import cast_table_to_features, read_schema_from_file, table_cast from .utils.logging import get_logger from .utils.py_utils import Literal from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs class IterableDataset(DatasetInfoMixin): """A Dataset backed by an iterable.""" def __init__( self, ex_iterable: _BaseExamplesIterable, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, formatting: Optional[FormattingConfig] = None, shuffling: Optional[ShufflingConfig] = None, distributed: Optional[DistributedConfig] = None, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None, format_type="deprecated", ): if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None: raise RuntimeError( "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. " "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. " ) if format_type != "deprecated": warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. " help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead." warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2) formatting = FormattingConfig(format_type=format_type) info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) self._ex_iterable = ex_iterable self._formatting = formatting self._shuffling = shuffling self._distributed = distributed self._epoch = 0 self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {} _maybe_add_torch_iterable_dataset_parent_class(self.__class__) def __repr__(self): return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n n_shards: {self.n_shards}\n}})" def __getstate__(self): return self.__dict__ def __setstate__(self, d): self.__dict__ = d # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling _maybe_add_torch_iterable_dataset_parent_class(self.__class__) def _head(self, n=5): return _examples_to_batch(list(self.take(n))) def _effective_generator(self): if self._shuffling and self._epoch == 0: return self._shuffling.generator elif self._shuffling: # Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars) effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed return np.random.default_rng(effective_seed) else: raise ValueError("This dataset is not shuffled") def n_shards(self) -> int: if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0: return self._ex_iterable.n_shards // self._distributed.world_size return self._ex_iterable.n_shards def _iter_pytorch(self): ex_iterable = self._prepare_ex_iterable_for_iteration() # Fix for fsspec when using multiprocess to avoid hanging in the ML training loop. (only required for fsspec >= 0.9.0) # See https://github.com/fsspec/gcsfs/issues/379 fsspec.asyn.reset_lock() # check if there aren't too many workers import torch.utils.data worker_info = torch.utils.data.get_worker_info() if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers: logger.warning( f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). " f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers." ) logger.info( f"To parallelize data loading, we give each process some shards (or data sources) to process. " f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. " f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}." ) # split workload _log_prefix = f"node#{self._distributed.rank} " if self._distributed else "" shards_indices = ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers) if shards_indices: logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards." ) ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers) if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1) else: iterator = _convert_to_arrow(ex_iterable, batch_size=1) for key, pa_table in iterator: yield formatter.format_row(pa_table) return else: for key, example in ex_iterable: if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_example`. example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self._token_per_repo_id ) yield format_dict(example) if format_dict else example logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards." ) else: logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})." ) def _is_main_process(self): if self._distributed and self._distributed.rank > 0: return False if "torch" in sys.modules: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if worker_info is not None and worker_info.id > 0: return False return True def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable: if self._shuffling: ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator()) else: ex_iterable = self._ex_iterable if self._distributed: rank = self._distributed.rank world_size = self._distributed.world_size if ex_iterable.n_shards % world_size == 0: if self._is_main_process(): n_shards_per_node = ex_iterable.n_shards // world_size plural = "s" if n_shards_per_node > 1 else "" logger.info( f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node." ) ex_iterable = ex_iterable.shard_data_sources(rank, world_size) else: if self._is_main_process(): logger.info( f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration." ) logger.info( f"It is more optimized to distribute the dataset shards (or data sources) across nodes. " f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. " f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}" ) ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank) return ex_iterable def __iter__(self): if "torch" in sys.modules: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None: # We're a torch.utils.data.IterableDataset in a PyTorch worker process yield from self._iter_pytorch() return ex_iterable = self._prepare_ex_iterable_for_iteration() if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1) else: iterator = _convert_to_arrow(ex_iterable, batch_size=1) for key, pa_table in iterator: yield formatter.format_row(pa_table) return for key, example in ex_iterable: if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_example`. example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self._token_per_repo_id ) yield format_dict(example) if format_dict else example def iter(self, batch_size: int, drop_last_batch: bool = False): """Iterate through the batches of size `batch_size`. Args: batch_size (:obj:`int`): size of each batch to yield. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped """ if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None ex_iterable = self._prepare_ex_iterable_for_iteration() if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables( ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch ) else: iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) for key, pa_table in iterator: yield formatter.format_batch(pa_table) return iterator = iter(ex_iterable) for key, example in iterator: # If batched, first build the batch examples = [example] + [example for key, example in islice(iterator, batch_size - 1)] if drop_last_batch and len(examples) < batch_size: # ignore last batch return batch = _examples_to_batch(examples) if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_batch`. batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id) yield format_dict(batch) if format_dict else batch def from_generator( generator: Callable, features: Optional[Features] = None, gen_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Create an Iterable Dataset from a generator. Args: generator (`Callable`): A generator function that `yields` examples. features (`Features`, *optional*): Dataset features. gen_kwargs(`dict`, *optional*): Keyword arguments to be passed to the `generator` callable. You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`. This can be used to improve shuffling and when iterating over the dataset with multiple workers. Returns: `IterableDataset` Example: ```py >>> def gen(): ... yield {"text": "Good", "label": 0} ... yield {"text": "Bad", "label": 1} ... >>> ds = IterableDataset.from_generator(gen) ``` ```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards}) >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer >>> from torch.utils.data import DataLoader >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards ``` """ from .io.generator import GeneratorDatasetInputStream return GeneratorDatasetInputStream( generator=generator, features=features, gen_kwargs=gen_kwargs, streaming=True, ).read() def from_spark( df: "pyspark.sql.DataFrame", split: Optional[NamedSplit] = None, features: Optional[Features] = None, **kwargs, ) -> "IterableDataset": """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches. Args: df (`pyspark.sql.DataFrame`): The DataFrame containing the desired data. split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. Returns: [`IterableDataset`] Example: ```py >>> df = spark.createDataFrame( >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], >>> columns=["id", "name"], >>> ) >>> ds = IterableDataset.from_spark(df) ``` """ from .io.spark import SparkDatasetReader if sys.platform == "win32": raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows") return SparkDatasetReader( df, split=split, features=features, streaming=True, **kwargs, ).read() def from_file(filename: str) -> "IterableDataset": """Instantiate a IterableDataset from Arrow table at filename. Args: filename (`str`): File name of the dataset. Returns: [`IterableDataset`] """ pa_table_schema = read_schema_from_file(filename) inferred_features = Features.from_arrow_schema(pa_table_schema) ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename}) return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features)) def with_format( self, type: Optional[str] = None, ) -> "IterableDataset": """ Return a dataset with the specified format. Supported formats: "arrow", or None for regular python objects. The other formats are currently not implemented. Args: type (`str`, optional, default None): if set to "torch", the returned dataset will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader """ type = get_format_type_from_alias(type) # TODO(QL): add format_kwargs # TODO(QL): add format_columns and return_all_columns # TODO(QL): add pandas format return IterableDataset( ex_iterable=self._ex_iterable, info=self._info.copy(), split=self._split, formatting=FormattingConfig(format_type=type), shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def map( self, function: Optional[Callable] = None, with_indices: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, features: Optional[Features] = None, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. If your function returns a column that already exists, then it overwrites it. The function is applied on-the-fly on the examples when iterating over the dataset. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}. - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`, *optional*, defaults to `None`): Function applied on-the-fly on the examples when you iterate on the dataset. It must have one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`[List[str]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. features (`[Features]`, *optional*, defaults to `None`): Feature types of the resulting dataset. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> list(ds.take(3)) [{'label': 1, 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'Review: effective but too-tepid biopic'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns] if isinstance(remove_columns, str): remove_columns = [remove_columns] if function is None: function = identity_func if fn_kwargs is None: fn_kwargs = {} ex_iterable = MappedExamplesIterable( TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs, formatting=self._formatting, ) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. The filtering is done on-the-fly when iterating over the dataset. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` If no function is provided, defaults to an always True function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, default `1000`): Number of examples per batch provided to `function` if `batched=True`. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds = ds.filter(lambda x: x["label"] == 0) >>> list(ds.take(3)) [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'}, {'label': 0, 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, {'label': 0, 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns] # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example) info = copy.deepcopy(self._info) info.features = None # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here ex_iterable = FilteredExamplesIterable( TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, formatting=self._formatting, ) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def shuffle( self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 ) -> "IterableDataset": """ Randomly shuffles the elements of this dataset. This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will initially select a random element from only the first 1000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1000 element buffer. If the dataset is made of several shards, it also does shuffle the order of the shards. However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] then the order of the shards is kept unchanged. Args: seed (`int`, *optional*, defaults to `None`): Random seed that will be used to shuffle the dataset. It is used to sample from the shuffle buffer and also to shuffle the data shards. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). buffer_size (`int`, defaults to `1000`): Size of the buffer. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> shuffled_ds = ds.shuffle(seed=42) >>> list(shuffled_ds.take(3)) [{'label': 1, 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, {'label': 1, 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, {'label': 1, 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] ``` """ if generator is None: generator = np.random.default_rng(seed) else: generator = deepcopy(generator) shuffling = ShufflingConfig(generator=generator, _original_seed=seed) return IterableDataset( ex_iterable=BufferShuffledExamplesIterable( self._ex_iterable, buffer_size=buffer_size, generator=generator ).shuffle_data_sources(generator), info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=shuffling, distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def set_epoch(self, epoch: int): self._epoch = epoch def skip(self, n) -> "IterableDataset": """ Create a new [`IterableDataset`] that skips the first `n` elements. Args: n (`int`): Number of elements to skip. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> ds = ds.skip(1) >>> list(ds.take(3)) [{'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}, {'label': 1, 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}] ``` """ ex_iterable = SkipExamplesIterable(self._ex_iterable, n) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def take(self, n) -> "IterableDataset": """ Create a new [`IterableDataset`] with only the first `n` elements. Args: n (`int`): Number of elements to take. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> small_ds = ds.take(2) >>> list(small_ds) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] ``` """ ex_iterable = TakeExamplesIterable(self._ex_iterable, n) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def column_names(self) -> Optional[List[str]]: """Names of the columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True) >>> ds.column_names ['text', 'label'] ``` """ return list(self._info.features.keys()) if self._info.features is not None else None def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset": """Add column to Dataset. Args: name (str): Column name. column (list or np.array): Column data to be added. Returns: `IterableDataset` """ return self.map(partial(add_column_fn, name=name, column=column), with_indices=True) def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Returns: `IterableDataset`: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} >>> ds = ds.rename_column("text", "movie_review") >>> next(iter(ds)) {'label': 1, 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return self.rename_columns({original_column_name: new_column_name}) def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names Returns: `IterableDataset`: A copy of the dataset with renamed columns """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map( partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping) ) if original_features is not None: ds_iterable._info.features = Features( { column_mapping[col] if col in column_mapping.keys() else col: feature for col, feature in original_features.items() } ) # check that it's still valid, especially with regard to task templates try: ds_iterable._info.copy() except ValueError: ds_iterable._info.task_templates = None return ds_iterable def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """ Remove one or several column(s) in the dataset and the features associated to them. The removal is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: `IterableDataset`: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.remove_columns("label") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map(remove_columns=column_names) if original_features is not None: ds_iterable._info.features = original_features.copy() for col, _ in original_features.items(): if col in column_names: del ds_iterable._info.features[col] # check that it's still valid, especially with regard to task templates try: ds_iterable._info.copy() except ValueError: ds_iterable._info.task_templates = None return ds_iterable def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """Select one or several column(s) in the dataset and the features associated to them. The selection is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to select. Returns: `IterableDataset`: A copy of the dataset object with selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.select_columns("text") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ if isinstance(column_names, str): column_names = [column_names] if self._info: info = copy.deepcopy(self._info) if self._info.features is not None: missing_columns = set(column_names) - set(self._info.features.keys()) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Columns in the dataset: " f"{list(self._info.features.keys())}." ) info.features = Features({c: info.features[c] for c in column_names}) # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=self._shuffling, distributed=self._distributed, token_per_repo_id=self._token_per_repo_id, ) def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`Feature`): Target feature. Returns: `IterableDataset` Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True) >>> ds.features {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds.features {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} ``` """ info = self._info.copy() info.features[column] = feature # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def cast( self, features: Features, ) -> "IterableDataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. Returns: `IterableDataset`: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features["label"] = ClassLabel(names=["bad", "good"]) >>> new_features["text"] = Value("large_string") >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ info = self._info.copy() info.features = features # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def _step(self, step: int, offset: int) -> "IterableDataset": ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def _resolve_features(self): if self.features is not None: return self elif isinstance(self._ex_iterable, TypedExamplesIterable): features = self._ex_iterable.features else: features = _infer_features_from_batch(self.with_format(None)._head()) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) The provided code snippet includes necessary dependencies for implementing the `_maybe_add_torch_iterable_dataset_parent_class` function. Write a Python function `def _maybe_add_torch_iterable_dataset_parent_class(cls)` to solve the following problem: Add torch.utils.data.IterableDataset as a parent class if 'torch' is available Here is the function: def _maybe_add_torch_iterable_dataset_parent_class(cls): """Add torch.utils.data.IterableDataset as a parent class if 'torch' is available""" if config.TORCH_AVAILABLE: import torch.utils.data if torch.utils.data.IterableDataset not in cls.__bases__: cls.__bases__ += (torch.utils.data.IterableDataset,)
Add torch.utils.data.IterableDataset as a parent class if 'torch' is available
17,964
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging class ParallelBackendConfig: backend_name = None def _map_with_multiprocessing_pool(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): num_proc = num_proc if num_proc <= len(iterable) else len(iterable) split_kwds = [] # We organize the splits ourselve (contiguous splits) for index in range(num_proc): div = len(iterable) // num_proc mod = len(iterable) % num_proc start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc)) if len(iterable) != sum(len(i[1]) for i in split_kwds): raise ValueError( f"Error dividing inputs iterable among processes. " f"Total number of objects {len(iterable)}, " f"length: {sum(len(i[1]) for i in split_kwds)}" ) logger.info( f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}" ) initargs, initializer = None, None if not disable_tqdm: initargs, initializer = (RLock(),), tqdm.set_lock with Pool(num_proc, initargs=initargs, initializer=initializer) as pool: mapped = pool.map(single_map_nested_func, split_kwds) logger.info(f"Finished {num_proc} processes") mapped = [obj for proc_res in mapped for obj in proc_res] logger.info(f"Unpacked {len(mapped)} objects") return mapped def _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, # and it requires monkey-patching joblib internal classes which is subject to change import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc): return joblib.Parallel()( joblib.delayed(single_map_nested_func)((function, obj, types, None, True, None)) for obj in iterable ) The provided code snippet includes necessary dependencies for implementing the `parallel_map` function. Write a Python function `def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func)` to solve the following problem: **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either multiprocessing.Pool or joblib for parallelization. Args: function (`Callable[[Any], Any]`): Function to be applied to `iterable`. iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to. num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib). types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`): Whether to disable the tqdm progressbar. desc (`str`): Prefix for the tqdm progressbar. single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`. Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an element of `iterable`, and `rank` is used for progress bar. Here is the function: def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): """ **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either multiprocessing.Pool or joblib for parallelization. Args: function (`Callable[[Any], Any]`): Function to be applied to `iterable`. iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to. num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib). types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`): Whether to disable the tqdm progressbar. desc (`str`): Prefix for the tqdm progressbar. single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`. Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an element of `iterable`, and `rank` is used for progress bar. """ if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func ) return _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func)
**Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either multiprocessing.Pool or joblib for parallelization. Args: function (`Callable[[Any], Any]`): Function to be applied to `iterable`. iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to. num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib). types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`): Whether to disable the tqdm progressbar. desc (`str`): Prefix for the tqdm progressbar. single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`. Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an element of `iterable`, and `rank` is used for progress bar.
17,965
from typing import Union from huggingface_hub.utils import insecure_hashlib class InvalidKeyError(Exception): """Raises an error when given key is of invalid datatype.""" def __init__(self, hash_data): self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected" self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}" self.suffix = "\nKeys should be either str, int or bytes type" super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}") The provided code snippet includes necessary dependencies for implementing the `_as_bytes` function. Write a Python function `def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes` to solve the following problem: Returns the input hash_data in its bytes form Args: hash_data: the hash salt/key to be converted to bytes Here is the function: def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes: """ Returns the input hash_data in its bytes form Args: hash_data: the hash salt/key to be converted to bytes """ if isinstance(hash_data, bytes): # Data already in bytes, returns as it as return hash_data elif isinstance(hash_data, str): # We keep the data as it as for it ot be later encoded to UTF-8 # However replace `\\` with `/` for Windows compatibility hash_data = hash_data.replace("\\", "/") elif isinstance(hash_data, int): hash_data = str(hash_data) else: # If data is not of the required type, raise error raise InvalidKeyError(hash_data) return hash_data.encode("utf-8")
Returns the input hash_data in its bytes form Args: hash_data: the hash salt/key to be converted to bytes
17,966
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger _TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempCacheDir"] = None _DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None The provided code snippet includes necessary dependencies for implementing the `maybe_register_dataset_for_temp_dir_deletion` function. Write a Python function `def maybe_register_dataset_for_temp_dir_deletion(dataset)` to solve the following problem: This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order to properly delete them before deleting the temporary directory. The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled. Here is the function: def maybe_register_dataset_for_temp_dir_deletion(dataset): """ This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order to properly delete them before deleting the temporary directory. The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled. """ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: return global _DATASETS_WITH_TABLE_IN_TEMP_DIR if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None: _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet() if any( Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents for cache_file in dataset.cache_files ): _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset)
This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order to properly delete them before deleting the temporary directory. The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled.
17,967
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger _DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None def get_datasets_with_cache_file_in_temp_dir(): return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else []
null
17,968
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger _CACHING_ENABLED = True The provided code snippet includes necessary dependencies for implementing the `enable_caching` function. Write a Python function `def enable_caching()` to solve the following problem: When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. Here is the function: def enable_caching(): """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED _CACHING_ENABLED = True
When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`].
17,969
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger _CACHING_ENABLED = True The provided code snippet includes necessary dependencies for implementing the `disable_caching` function. Write a Python function `def disable_caching()` to solve the following problem: When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. Here is the function: def disable_caching(): """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED _CACHING_ENABLED = False
When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`].
17,970
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger _CACHING_ENABLED = True The provided code snippet includes necessary dependencies for implementing the `set_caching_enabled` function. Write a Python function `def set_caching_enabled(boolean: bool)` to solve the following problem: When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use the ``download_mode`` parameter in :func:`datasets.load_dataset`. Here is the function: def set_caching_enabled(boolean: bool): """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use the ``download_mode`` parameter in :func:`datasets.load_dataset`. """ global _CACHING_ENABLED _CACHING_ENABLED = bool(boolean)
When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use the ``download_mode`` parameter in :func:`datasets.load_dataset`.
17,971
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger _CACHING_ENABLED = True The provided code snippet includes necessary dependencies for implementing the `is_caching_enabled` function. Write a Python function `def is_caching_enabled() -> bool` to solve the following problem: When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. Here is the function: def is_caching_enabled() -> bool: """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED return bool(_CACHING_ENABLED)
When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`].
17,972
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger _TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempCacheDir"] = None class _TempCacheDir: """ A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files before deleting the directory itself to avoid permission errors on Windows. """ def __init__(self): self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX) self._finalizer = weakref.finalize(self, self._cleanup) def _cleanup(self): for dset in get_datasets_with_cache_file_in_temp_dir(): dset.__del__() if os.path.exists(self.name): try: shutil.rmtree(self.name) except Exception as e: raise OSError( f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually." ) from e def cleanup(self): if self._finalizer.detach(): self._cleanup() The provided code snippet includes necessary dependencies for implementing the `get_temporary_cache_files_directory` function. Write a Python function `def get_temporary_cache_files_directory() -> str` to solve the following problem: Return a directory that is deleted when session closes. Here is the function: def get_temporary_cache_files_directory() -> str: """Return a directory that is deleted when session closes.""" global _TEMP_DIR_FOR_TEMP_CACHE_FILES if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempCacheDir() return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name
Return a directory that is deleted when session closes.
17,973
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger class Hasher: """Hasher that accepts python objects as inputs.""" dispatch: Dict = {} def __init__(self): self.m = xxhash.xxh64() def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str: value = [value] if isinstance(value, bytes) else value m = xxhash.xxh64() for x in value: m.update(x) return m.hexdigest() def hash_default(cls, value: Any) -> str: return cls.hash(value) def hash(cls, value: Any) -> str: return cls.hash_bytes(dumps(value)) def update(self, value: Any) -> None: header_for_update = f"=={type(value)}==" value_for_update = self.hash(value) self.m.update(header_for_update.encode("utf8")) self.m.update(value_for_update.encode("utf-8")) def hexdigest(self) -> str: return self.m.hexdigest() def hashregister(*types): def proxy(func): for t in types: Hasher.dispatch[t] = func return func return proxy
null
17,974
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger class Hasher: """Hasher that accepts python objects as inputs.""" dispatch: Dict = {} def __init__(self): self.m = xxhash.xxh64() def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str: value = [value] if isinstance(value, bytes) else value m = xxhash.xxh64() for x in value: m.update(x) return m.hexdigest() def hash_default(cls, value: Any) -> str: return cls.hash(value) def hash(cls, value: Any) -> str: return cls.hash_bytes(dumps(value)) def update(self, value: Any) -> None: header_for_update = f"=={type(value)}==" value_for_update = self.hash(value) self.m.update(header_for_update.encode("utf8")) self.m.update(value_for_update.encode("utf-8")) def hexdigest(self) -> str: return self.m.hexdigest() def generate_fingerprint(dataset: "Dataset") -> str: state = dataset.__dict__ hasher = Hasher() for key in sorted(state): if key == "_fingerprint": continue hasher.update(key) hasher.update(state[key]) # hash data files last modification timestamps as well for cache_file in dataset.cache_files: hasher.update(os.path.getmtime(cache_file["filename"])) return hasher.hexdigest()
null
17,975
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.deprecation_utils import deprecated from .utils.logging import get_logger def update_fingerprint(fingerprint, transform, transform_args): global fingerprint_warnings hasher = Hasher() hasher.update(fingerprint) try: hasher.update(transform) except: # noqa various errors might raise here from pickle or dill if _CACHING_ENABLED: if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False): logger.warning( f"Transform {transform} couldn't be hashed properly, a random hash was used instead. " "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. " "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. " "This warning is only showed once. Subsequent hashing failures won't be showed." ) fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True else: logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.") else: logger.info( f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled." ) return generate_random_fingerprint() for key in sorted(transform_args): hasher.update(key) try: hasher.update(transform_args[key]) except: # noqa various errors might raise here from pickle or dill if _CACHING_ENABLED: if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False): logger.warning( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. " "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. " "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. " "This warning is only showed once. Subsequent hashing failures won't be showed." ) fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True else: logger.info( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead." ) else: logger.info( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled." ) return generate_random_fingerprint() return hasher.hexdigest() def validate_fingerprint(fingerprint: str, max_length=64): """ Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default, so that the fingerprint can be used to name cache files without issues. """ if not isinstance(fingerprint, str) or not fingerprint: raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.") for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH: if invalid_char in fingerprint: raise ValueError( f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. " f"They could create issues when creating cache files." ) if len(fingerprint) > max_length: raise ValueError( f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}." "It could create issues when creating cache files." ) def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str: """ Format a transform to the format that will be used to update the fingerprint. """ transform = f"{func.__module__}.{func.__qualname__}" if version is not None: transform += f"@{version}" return transform def format_kwargs_for_fingerprint( func: Callable, args: Tuple, kwargs: Dict[str, Any], use_kwargs: Optional[List[str]] = None, ignore_kwargs: Optional[List[str]] = None, randomized_function: bool = False, ) -> Dict[str, Any]: """ Format the kwargs of a transform to the format that will be used to update the fingerprint. """ kwargs_for_fingerprint = kwargs.copy() if args: params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD] args = args[1:] # assume the first argument is the dataset params = params[1:] kwargs_for_fingerprint.update(zip(params, args)) else: del kwargs_for_fingerprint[ next(iter(inspect.signature(func).parameters)) ] # assume the first key is the dataset # keep the right kwargs to be hashed to generate the fingerprint if use_kwargs: kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs} if ignore_kwargs: kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs} if randomized_function: # randomized functions have `seed` and `generator` parameters if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] kwargs_for_fingerprint["generator"] = np.random.default_rng(seed) # remove kwargs that are the default values default_values = { p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty } for default_varname, default_value in default_values.items(): if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value: kwargs_for_fingerprint.pop(default_varname) return kwargs_for_fingerprint import inspect class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin): """A Dataset backed by an Arrow table.""" def __init__( self, arrow_table: Table, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_table: Optional[Table] = None, fingerprint: Optional[str] = None, ): info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) IndexableMixin.__init__(self) self._data: Table = _check_table(arrow_table) self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None maybe_register_dataset_for_temp_dir_deletion(self) self._format_type: Optional[str] = None self._format_kwargs: dict = {} self._format_columns: Optional[list] = None self._output_all_columns: bool = False self._fingerprint: str = fingerprint # Read metadata if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata: metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode()) if ( "fingerprint" in metadata and self._fingerprint is None ): # try to load fingerprint from the arrow file metadata self._fingerprint = metadata["fingerprint"] # Infer features if None inferred_features = Features.from_arrow_schema(arrow_table.schema) if self.info.features is None: self.info.features = inferred_features else: # make sure the nested columns are in the right order try: self.info.features = self.info.features.reorder_fields_as(inferred_features) except ValueError as e: raise ValueError( f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file." ) # Infer fingerprint if None if self._fingerprint is None: self._fingerprint = generate_fingerprint(self) # Sanity checks if self._info.features is None: raise ValueError("Features can't be None in a Dataset object") if self._fingerprint is None: raise ValueError("Fingerprint can't be None in a Dataset object") if self.info.features.type != inferred_features.type: raise ValueError( f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}" ) if self._indices is not None: if not pa.types.is_unsigned_integer(self._indices.column(0).type): raise ValueError( f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}" ) _check_column_names(self._data.column_names) self._data = update_metadata_with_features(self._data, self._info.features) def features(self) -> Features: features = super().features if features is None: # this is already checked in __init__ raise ValueError("Features can't be None in a Dataset object") return features def from_file( cls, filename: str, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_filename: Optional[str] = None, in_memory: bool = False, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow table at filename. Args: filename (`str`): File name of the dataset. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. indices_filename (`str`, *optional*): File names of the indices. in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. Returns: [`Dataset`] """ table = ArrowReader.read_table(filename, in_memory=in_memory) if indices_filename is not None: indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory) else: indices_pa_table = None return cls( arrow_table=table, info=info, split=split, indices_table=indices_pa_table, ) def from_buffer( cls, buffer: pa.Buffer, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_buffer: Optional[pa.Buffer] = None, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow buffer. Args: buffer (`pyarrow.Buffer`): Arrow buffer. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. indices_buffer (`pyarrow.Buffer`, *optional*): Indices Arrow buffer. Returns: [`Dataset`] """ table = InMemoryTable.from_buffer(buffer) if indices_buffer is not None: indices_table = InMemoryTable.from_buffer(buffer) else: indices_table = None return cls(table, info=info, split=split, indices_table=indices_table) def from_pandas( cls, df: pd.DataFrame, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, preserve_index: Optional[bool] = None, ) -> "Dataset": """ Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`]. The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit features and passing it to this function. Args: df (`pandas.DataFrame`): Dataframe that contains the dataset. features ([`Features`], *optional*): Dataset features. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. preserve_index (`bool`, *optional*): Whether to store the index as an additional column in the resulting Dataset. The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only. Use `preserve_index=True` to force it to be stored as a column. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_pandas(df) ``` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable.from_pandas( df=df, preserve_index=preserve_index, ) if features is not None: # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split) def from_polars( cls, df: "pl.DataFrame", features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Collect the underlying arrow arrays in an Arrow Table. This operation is mostly zero copy. Data types that do copy: * CategoricalType Args: df (`polars.DataFrame`): DataFrame to convert to Arrow Table features (`Features`, optional): Dataset features. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split. Examples: ```py >>> ds = Dataset.from_polars(df) ``` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable(df.to_arrow()) if features is not None: # more expensive cast than InMemoryTable.from_polars(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split) def from_dict( cls, mapping: dict, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert `dict` to a `pyarrow.Table` to create a [`Dataset`]. Args: mapping (`Mapping`): Mapping of strings to Arrays or Python lists. features ([`Features`], *optional*): Dataset features. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. Returns: [`Dataset`] """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None arrow_typed_mapping = {} for col, data in mapping.items(): if isinstance(data, (pa.Array, pa.ChunkedArray)): data = cast_array_to_feature(data, features[col]) if features is not None else data else: data = OptimizedTypedSequence( features.encode_column(data, col) if features is not None else data, type=features[col] if features is not None else None, col=col, ) arrow_typed_mapping[col] = data mapping = arrow_typed_mapping pa_table = InMemoryTable.from_pydict(mapping=mapping) if info is None: info = DatasetInfo() info.features = features if info.features is None: info.features = Features( { col: generate_from_arrow_type(data.type) if isinstance(data, (pa.Array, pa.ChunkedArray)) else data.get_inferred_type() for col, data in mapping.items() } ) return cls(pa_table, info=info, split=split) def from_list( cls, mapping: List[dict], features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`. Note that the keys of the first entry will be used to determine the dataset columns, regardless of what is passed to features. Args: mapping (`List[dict]`): A list of mappings of strings to row values. features (`Features`, optional): Dataset features. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split. Returns: [`Dataset`] """ # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {} return cls.from_dict(mapping, features, info, split) def from_csv( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from CSV file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the CSV file(s). split ([`NamedSplit`], *optional*): Split name to be assigned to the dataset. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`pandas.read_csv`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_csv('path/to/dataset.csv') ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetReader return CsvDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs, ).read() def from_generator( generator: Callable, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, gen_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, **kwargs, ): """Create a Dataset from a generator. Args: generator (:`Callable`): A generator function that `yields` examples. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. gen_kwargs(`dict`, *optional*): Keyword arguments to be passed to the `generator` callable. You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`. <Added version="2.7.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to :[`GeneratorConfig`]. Returns: [`Dataset`] Example: ```py >>> def gen(): ... yield {"text": "Good", "label": 0} ... yield {"text": "Bad", "label": 1} ... >>> ds = Dataset.from_generator(gen) ``` ```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards}) ``` """ from .io.generator import GeneratorDatasetInputStream return GeneratorDatasetInputStream( generator=generator, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, gen_kwargs=gen_kwargs, num_proc=num_proc, **kwargs, ).read() def from_json( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, field: Optional[str] = None, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from JSON or JSON Lines file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the JSON or JSON Lines file(s). split ([`NamedSplit`], *optional*): Split name to be assigned to the dataset. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. field (`str`, *optional*): Field name of the JSON file where the dataset is contained in. num_proc (`int`, *optional* defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`JsonConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_json('path/to/dataset.json') ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetReader return JsonDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, field=field, num_proc=num_proc, **kwargs, ).read() def from_parquet( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, columns: Optional[List[str]] = None, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from Parquet file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the Parquet file(s). split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. columns (`List[str]`, *optional*): If not `None`, only these columns will be read from the file. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e'. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`ParquetConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_parquet('path/to/dataset.parquet') ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetReader return ParquetDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, num_proc=num_proc, **kwargs, ).read() def from_text( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from text file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the text file(s). split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`TextConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_text('path/to/dataset.txt') ``` """ # Dynamic import to avoid circular dependency from .io.text import TextDatasetReader return TextDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs, ).read() def from_spark( df: "pyspark.sql.DataFrame", split: Optional[NamedSplit] = None, features: Optional[Features] = None, keep_in_memory: bool = False, cache_dir: str = None, working_dir: str = None, load_from_cache_file: bool = True, **kwargs, ): """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers. Args: df (`pyspark.sql.DataFrame`): The DataFrame containing the desired data. split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both workers and the driver. keep_in_memory (`bool`): Whether to copy the data in-memory. working_dir (`str`, *optional*) Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting a non-NFS intermediate directory may improve performance. load_from_cache_file (`bool`): Whether to load the dataset from the cache if possible. Returns: [`Dataset`] Example: ```py >>> df = spark.createDataFrame( >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], >>> columns=["id", "name"], >>> ) >>> ds = Dataset.from_spark(df) ``` """ # Dynamic import to avoid circular dependency from .io.spark import SparkDatasetReader if sys.platform == "win32": raise EnvironmentError("Dataset.from_spark is not currently supported on Windows") return SparkDatasetReader( df, split=split, features=features, streaming=False, cache_dir=cache_dir, keep_in_memory=keep_in_memory, working_dir=working_dir, load_from_cache_file=load_from_cache_file, **kwargs, ).read() def from_sql( sql: Union[str, "sqlalchemy.sql.Selectable"], con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): """Create Dataset from SQL query or database table. Args: sql (`str` or `sqlalchemy.sql.Selectable`): SQL query to be executed or a table name. con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`SqlConfig`]. Returns: [`Dataset`] Example: ```py >>> # Fetch a database table >>> ds = Dataset.from_sql("test_data", "postgres:///db_name") >>> # Execute a SQL query on the table >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name") >>> # Use a Selectable object to specify the query >>> from sqlalchemy import select, text >>> stmt = select([text("sentence")]).select_from(text("test_data")) >>> ds = Dataset.from_sql(stmt, "postgres:///db_name") ``` <Tip> The returned dataset can only be cached if `con` is specified as URI string. </Tip> """ from .io.sql import SqlDatasetReader return SqlDatasetReader( sql, con, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs, ).read() def __setstate__(self, state): self.__dict__.update(state) maybe_register_dataset_for_temp_dir_deletion(self) return self def __del__(self): if hasattr(self, "_data"): del self._data if hasattr(self, "_indices"): del self._indices def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables self.__del__() def save_to_disk( self, dataset_path: PathLike, fs="deprecated", max_shard_size: Optional[Union[str, int]] = None, num_shards: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, ): """ Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. For [`Image`] and [`Audio`] data: All the Image() and Audio() data are stored in the arrow files. If you want to store paths or urls, please use the Value("string") type. Args: dataset_path (`str`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset directory where the dataset will be saved to. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"50MB"`). num_shards (`int`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. <Added version="2.8.0"/> num_proc (`int`, *optional*): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.8.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Example: ```py >>> ds.save_to_disk("path/to/dataset/directory") >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024) ``` """ if max_shard_size is not None and num_shards is not None: raise ValueError( "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." ) if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options if self.list_indexes(): raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset") if num_shards is None: dataset_nbytes = self._estimate_nbytes() max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) num_shards = int(dataset_nbytes / max_shard_size) + 1 num_shards = max(num_shards, num_proc or 1) num_proc = num_proc if num_proc is not None else 1 num_shards = num_shards if num_shards is not None else num_proc fs: fsspec.AbstractFileSystem fs, _ = url_to_fs(dataset_path, **(storage_options or {})) if not is_remote_filesystem(fs): parent_cache_files_paths = { Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files } # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux. if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths: raise PermissionError( f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself." ) fs.makedirs(dataset_path, exist_ok=True) # Get json serializable state state = { key: self.__dict__[key] for key in [ "_fingerprint", "_format_columns", "_format_kwargs", "_format_type", "_output_all_columns", ] } state["_split"] = str(self.split) if self.split is not None else self.split state["_data_files"] = [ {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards) ] for k in state["_format_kwargs"].keys(): try: json.dumps(state["_format_kwargs"][k]) except TypeError as e: raise TypeError( str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't." ) from None # Get json serializable dataset info dataset_info = asdict(self._info) shards_done = 0 pbar = hf_tqdm( unit=" examples", total=len(self), desc=f"Saving the dataset ({shards_done}/{num_shards} shards)", ) kwargs_per_job = ( { "job_id": shard_idx, "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True), "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"), "storage_options": storage_options, } for shard_idx in range(num_shards) ) shard_lengths = [None] * num_shards shard_sizes = [None] * num_shards if num_proc > 1: with Pool(num_proc) as pool: with pbar: for job_id, done, content in iflatmap_unordered( pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job ): if done: shards_done += 1 pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") shard_lengths[job_id], shard_sizes[job_id] = content else: pbar.update(content) else: with pbar: for kwargs in kwargs_per_job: for job_id, done, content in Dataset._save_to_disk_single(**kwargs): if done: shards_done += 1 pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") shard_lengths[job_id], shard_sizes[job_id] = content else: pbar.update(content) with fs.open( posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8" ) as state_file: json.dump(state, state_file, indent=2, sort_keys=True) with fs.open( posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8" ) as dataset_info_file: # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)} json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2) def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]): batch_size = config.DEFAULT_MAX_BATCH_SIZE num_examples_progress_update = 0 writer = ArrowWriter( features=shard.features, path=fpath, storage_options=storage_options, embed_local_files=True, ) try: _time = time.time() for pa_table in shard.with_format("arrow").iter(batch_size): writer.write_table(pa_table) num_examples_progress_update += len(pa_table) if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield job_id, False, num_examples_progress_update num_examples_progress_update = 0 finally: yield job_id, False, num_examples_progress_update num_examples, num_bytes = writer.finalize() writer.close() yield job_id, True, (num_examples, num_bytes) def _build_local_temp_path(uri_or_path: str) -> Path: """ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative path extracted from the uri) passed. Args: uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) to concatenate. Returns: :class:`Path`: the concatenated path (temp dir + path) """ src_dataset_path = Path(uri_or_path) tmp_dir = get_temporary_cache_files_directory() return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor)) def load_from_disk( dataset_path: str, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None, ) -> "Dataset": """ Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. Args: dataset_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) of the dataset directory where the dataset will be loaded from. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Returns: [`Dataset`] or [`DatasetDict`]: - If `dataset_path` is a path of a dataset directory, the dataset requested. - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split. Example: ```py >>> ds = load_from_disk("path/to/dataset/directory") ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, dataset_path = url_to_fs(dataset_path, **(storage_options or {})) dest_dataset_path = dataset_path dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME) dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) dataset_dict_is_file = fs.isfile(dataset_dict_json_path) dataset_info_is_file = fs.isfile(dataset_info_path) dataset_state_is_file = fs.isfile(dataset_state_json_path) if not dataset_info_is_file and not dataset_state_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) if not dataset_info_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) if not dataset_state_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies if is_remote_filesystem(fs): src_dataset_path = dest_dataset_path dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path) fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True) dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) with open(dataset_state_json_path, encoding="utf-8") as state_file: state = json.load(state_file) with open(dataset_info_path, encoding="utf-8") as dataset_info_file: dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file)) dataset_size = estimate_dataset_size( Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"] ) keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size) table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable arrow_table = concat_tables( thread_map( table_cls.from_file, [posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]], tqdm_class=hf_tqdm, desc="Loading dataset from disk", # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached disable=len(state["_data_files"]) <= 16 or None, ) ) split = state["_split"] split = Split(split) if split is not None else split dataset = Dataset( arrow_table=arrow_table, info=dataset_info, split=split, fingerprint=state["_fingerprint"], ) format = { "type": state["_format_type"], "format_kwargs": state["_format_kwargs"], "columns": state["_format_columns"], "output_all_columns": state["_output_all_columns"], } dataset = dataset.with_format(**format) return dataset def data(self) -> Table: """The Apache Arrow table backing the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.data MemoryMappedTable text: string label: int64 ---- text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]] label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]] ``` """ return self._data def cache_files(self) -> List[dict]: """The cache files containing the Apache Arrow table backing the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.cache_files [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}] ``` """ cache_files = list_table_cache_files(self._data) if self._indices is not None: cache_files += list_table_cache_files(self._indices) return [{"filename": cache_filename} for cache_filename in cache_files] def num_columns(self) -> int: """Number of columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.num_columns 2 ``` """ return self._data.num_columns def num_rows(self) -> int: """Number of rows in the dataset (same as [`Dataset.__len__`]). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.num_rows 1066 ``` """ if self._indices is not None: return self._indices.num_rows return self._data.num_rows def column_names(self) -> List[str]: """Names of the columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.column_names ['text', 'label'] ``` """ return self._data.column_names def shape(self) -> Tuple[int, int]: """Shape of the dataset (number of columns, number of rows). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.shape (1066, 2) ``` """ if self._indices is not None: return (self._indices.num_rows, self._data.num_columns) return self._data.shape def unique(self, column: str) -> List: """Return a list of the unique elements in a column. This is implemented in the low-level backend and as such, very fast. Args: column (`str`): Column name (list all the column names with [`~datasets.Dataset.column_names`]). Returns: `list`: List of unique elements in the given column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.unique('label') [1, 0] ``` """ if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") if self._indices is not None and self._indices.num_rows != self._data.num_rows: dataset = self.flatten_indices() else: dataset = self return dataset._data.column(column).unique().to_pylist() def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset": """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table. Args: column (`str`): The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`]) include_nulls (`bool`, defaults to `False`): Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. <Added version="1.14.2"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("boolq", split="validation") >>> ds.features {'answer': Value(dtype='bool', id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} >>> ds = ds.class_encode_column('answer') >>> ds.features {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} ``` """ # Sanity checks if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") src_feat = self._info.features[column] if not isinstance(src_feat, Value): raise ValueError( f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}." ) if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)): def stringify_column(batch): batch[column] = [ str(sample) if include_nulls or sample is not None else None for sample in batch[column] ] return batch dset = self.map( stringify_column, batched=True, desc="Stringifying the column", ) else: dset = self # Create the new feature class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None) dst_feat = ClassLabel(names=class_names) def cast_to_class_labels(batch): batch[column] = [ dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None for sample in batch[column] ] return batch new_features = dset.features.copy() new_features[column] = dst_feat dset = dset.map( cast_to_class_labels, batched=True, features=new_features, desc="Casting to class labels", ) return dset def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset": """Flatten the table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with flattened columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) ``` """ dataset = copy.deepcopy(self) for depth in range(1, max_depth): if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema): dataset._data = dataset._data.flatten() else: break dataset.info.features = self._info.features.flatten(max_depth=max_depth) dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names}) dataset._data = update_metadata_with_features(dataset._data, dataset.features) logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.') dataset._fingerprint = new_fingerprint return dataset def cast( self, features: Features, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, num_proc: Optional[int] = None, ) -> "Dataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset. batch_size (`int`, defaults to `1000`): Number of examples per batch provided to cast. If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. load_from_cache_file (`bool`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`]. num_proc (`int`, *optional*, defaults to `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. Returns: [`Dataset`]: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset, ClassLabel, Value >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ if sorted(features) != sorted(self._data.column_names): raise ValueError( f"The columns in features ({list(features)}) must be identical " f"as the columns in the dataset: {self._data.column_names}" ) schema = features.arrow_schema format = self.format dataset = self.with_format("arrow") # capture the PyArrow version here to make the lambda serializable on Windows dataset = dataset.map( partial(table_cast, schema=schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc="Casting the dataset", ) dataset = dataset.with_format(**format) return dataset def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`FeatureType`): Target feature. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ if hasattr(feature, "decode_example"): dataset = copy.deepcopy(self) dataset._info.features[column] = feature dataset._fingerprint = new_fingerprint dataset._data = dataset._data.cast(dataset.features.arrow_schema) dataset._data = update_metadata_with_features(dataset._data, dataset.features) return dataset else: features = self.features features[column] = feature return self.cast(features) def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": """ Remove one or several column(s) in the dataset and the features associated to them. You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.remove_columns('label') Dataset({ features: ['text'], num_rows: 1066 }) >>> ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0 Dataset({ features: [], num_rows: 0 }) ``` """ dataset = copy.deepcopy(self) if isinstance(column_names, str): column_names = [column_names] missing_columns = set(column_names) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) for column_name in column_names: del dataset._info.features[column_name] dataset._data = dataset._data.drop(column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def rename_column( self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None ) -> "Dataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.rename_column('label', 'label_new') Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) ``` """ dataset = copy.deepcopy(self) if original_column_name not in dataset._data.column_names: raise ValueError( f"Original column name {original_column_name} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if new_column_name in dataset._data.column_names: raise ValueError( f"New column name {new_column_name} already in the dataset. " f"Please choose a column name which is not already in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if not new_column_name: raise ValueError("New column name is empty.") def rename(columns): return [new_column_name if col == original_column_name else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { new_column_name if col == original_column_name else col: feature for col, feature in self._info.features.items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with renamed columns Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) ``` """ dataset = copy.deepcopy(self) extra_columns = set(column_mapping.keys()) - set(dataset.column_names) if extra_columns: raise ValueError( f"Original column names {extra_columns} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values())) if number_of_duplicates_in_new_columns != 0: raise ValueError( "New column names must all be different, but this column mapping " f"has {number_of_duplicates_in_new_columns} duplicates" ) empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col] if empty_new_columns: raise ValueError(f"New column names {empty_new_columns} are empty.") def rename(columns): return [column_mapping[col] if col in column_mapping else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { column_mapping[col] if col in column_mapping else col: feature for col, feature in (self._info.features or {}).items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": """Select one or several column(s) in the dataset and the features associated to them. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset object which only consists of selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.select_columns(['text']) Dataset({ features: ['text'], num_rows: 1066 }) ``` """ if isinstance(column_names, str): column_names = [column_names] missing_columns = set(column_names) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Current columns in the dataset: " f"{self._data.column_names}." ) dataset = copy.deepcopy(self) dataset._data = dataset._data.select(column_names) dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names}) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def __len__(self): """Number of rows in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.__len__ <bound method Dataset.__len__ of Dataset({ features: ['text', 'label'], num_rows: 1066 })> ``` """ return self.num_rows def __iter__(self): """Iterate through the examples. If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the selected format. """ if self._indices is None: # Fast iteration # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER for pa_subtable in table_iter(self.data, batch_size=batch_size): for i in range(pa_subtable.num_rows): pa_subtable_ex = pa_subtable.slice(i, 1) formatted_output = format_table( pa_subtable_ex, 0, formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns, ) yield formatted_output else: for i in range(self.num_rows): yield self._getitem( i, ) def iter(self, batch_size: int, drop_last_batch: bool = False): """Iterate through the batches of size `batch_size`. If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the selected format. Args: batch_size (:obj:`int`): size of each batch to yield. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped """ if self._indices is None: # Fast iteration # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch): formatted_batch = format_table( pa_subtable, range(pa_subtable.num_rows), formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns, ) yield formatted_batch else: num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size for i in range(0, num_rows, batch_size): yield self._getitem( slice(i, i + batch_size), ) def __repr__(self): return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})" def format(self): return { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self.column_names if self._format_columns is None else self._format_columns, "output_all_columns": self._output_all_columns, } def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set `__getitem__` return format (type and columns). Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__`` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ old_format_type = self._format_type old_format_kwargs = self._format_kwargs old_format_columns = self._format_columns old_output_all_columns = self._output_all_columns try: self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs) def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`]. Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as: ``` new formatted columns = (all columns - previously unformatted columns) ``` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.set_format(type='numpy', columns=['text', 'label']) >>> ds.format {'type': 'numpy', 'format_kwargs': {}, 'columns': ['text', 'label'], 'output_all_columns': False} ``` """ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format) # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter type = get_format_type_from_alias(type) get_formatter(type, features=self._info.features, **format_kwargs) # Check filter column if isinstance(columns, str): columns = [columns] if isinstance(columns, tuple): columns = list(columns) if columns is not None: missing_columns = set(columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if columns is not None: columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs self._format_type = type self._format_kwargs = format_kwargs self._format_columns = columns self._output_all_columns = output_all_columns logger.debug( "Set __getitem__(key) output type to %s for %s columns " " (when key is int or slice) and %s output other (un-formatted) columns.", "python objects" if type is None else type, "no" if columns is None else str(columns), "do" if output_all_columns else "don't", ) def reset_format(self): """Reset `__getitem__` return format to python objects and all columns. Same as `self.set_format()` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds.format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} >>> ds.reset_format() >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} ``` """ self.set_format() def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Args: transform (`Callable`, *optional*): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, *optional*): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). If set to True, then the other un-formatted columns are kept with the output of the transform. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') >>> def encode(batch): ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt') >>> ds.set_transform(encode) >>> ds[0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895, 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211, 5637, 1998, 11690, 2336, 1012, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object. Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds.format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'tensorflow'} ``` """ dataset = copy.deepcopy(self) dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) return dataset def with_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object. Args: transform (`Callable`, `optional`): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, `optional`): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). If set to `True`, then the other un-formatted columns are kept with the output of the transform. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> def encode(example): ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt') >>> ds = ds.with_transform(encode) >>> ds[0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617, 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105, 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ dataset = copy.deepcopy(self) dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) return dataset def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset": """ Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates). Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting. Args: task (`Union[str, TaskTemplate]`): The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include: - `"text-classification"` - `"question-answering"` If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates). id (`int`, defaults to `0`): The id required to unambiguously identify the task template when multiple task templates of the same type are supported. """ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD if isinstance(task, str): tasks = [template.task for template in (self.info.task_templates or [])] compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task] if not compatible_templates: raise ValueError( f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}" ) if not 0 <= id < len(compatible_templates): templates_list_str = "\n".join( f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates) ) raise ValueError( f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}" ) template = compatible_templates[id] elif isinstance(task, TaskTemplate): template = task else: raise ValueError( f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}." ) template = template.align_with_features(self.info.features) column_mapping = template.column_mapping columns_to_drop = [column for column in self.column_names if column not in column_mapping] dataset = self.remove_columns(columns_to_drop) dataset = dataset.rename_columns(column_mapping) # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__` dataset.info.task_templates = None dataset = dataset.cast(features=template.features) return dataset def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]: """ Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices) """ if isinstance(key, bool): raise TypeError("dataset index must be int, str, slice or collection of int, not bool") format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns output_all_columns = ( kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns ) format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs format_kwargs = format_kwargs if format_kwargs is not None else {} formatter = get_formatter(format_type, features=self._info.features, **format_kwargs) pa_subtable = query_table(self._data, key, indices=self._indices) formatted_output = format_table( pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns ) return formatted_output def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811 ... def __getitem__(self, key: str) -> List: # noqa: F811 ... def __getitem__(self, key): # noqa: F811 """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).""" return self._getitem(key) def __getitems__(self, keys: List) -> List: """Can be used to get a batch using a list of integers indices.""" batch = self.__getitem__(keys) n_examples = len(batch[next(iter(batch))]) return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)] def cleanup_cache_files(self) -> int: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files. Returns: `int`: Number of removed files. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.cleanup_cache_files() 10 ``` """ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files] if not current_cache_files: return 0 cache_directory = os.path.dirname(current_cache_files[0]) logger.info(f"Listing files in {cache_directory}") files: List[str] = os.listdir(cache_directory) files_to_remove = [] for f_name in files: full_name = os.path.abspath(os.path.join(cache_directory, f_name)) if f_name.startswith("cache-") and f_name.endswith(".arrow"): if full_name in current_cache_files: logger.info(f"Keeping currently used cache file at {full_name}") continue files_to_remove.append(full_name) for file_path in files_to_remove: logger.info(f"Removing {file_path}") os.remove(file_path) return len(files_to_remove) def _get_cache_file_path(self, fingerprint): if is_caching_enabled() and self.cache_files: cache_file_name = "cache-" + fingerprint + ".arrow" cache_directory = os.path.dirname(self.cache_files[0]["filename"]) else: cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow" cache_directory = get_temporary_cache_files_directory() cache_file_path = os.path.join(cache_directory, cache_file_name) return cache_file_path def map( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """ Apply a function to all the examples in the table (individually or in batches) and update the table. If your function returns a column that already exists, then it overwrites it. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`. - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`): Function with one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function`. num_proc (`int`, *optional*, defaults to `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially. suffix_template (`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for `rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> ds[0:3]["text"] ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .', 'Review: the soundtrack alone is worth the price of admission .', 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .'] # process a batch of examples >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) # set number of processors >>> ds = ds.map(add_prefix, num_proc=4) ``` """ if keep_in_memory and cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.") if num_proc is not None and num_proc <= 0: raise ValueError("num_proc must be an integer > 0.") # If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway) if len(self) == 0: if self._indices is not None: # empty indices mapping self = Dataset( self.data.slice(0, 0), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint, ) if remove_columns: return self.remove_columns(remove_columns) else: return self if function is None: function = lambda x: x # noqa: E731 if isinstance(input_columns, str): input_columns = [input_columns] if input_columns is not None: missing_columns = set(input_columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if isinstance(remove_columns, str): remove_columns = [remove_columns] if remove_columns is not None: missing_columns = set(remove_columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if fn_kwargs is None: fn_kwargs = {} if num_proc is not None and num_proc > len(self): num_proc = len(self) logger.warning( f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}." ) dataset_kwargs = { "shard": self, "function": function, "with_indices": with_indices, "with_rank": with_rank, "input_columns": input_columns, "batched": batched, "batch_size": batch_size, "drop_last_batch": drop_last_batch, "remove_columns": remove_columns, "keep_in_memory": keep_in_memory, "writer_batch_size": writer_batch_size, "features": features, "disable_nullable": disable_nullable, "fn_kwargs": fn_kwargs, } if new_fingerprint is None: # we create a unique hash from the function, # current dataset file and the mapping args transform = format_transform_for_fingerprint(Dataset._map_single) kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs) kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint" new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint) else: validate_fingerprint(new_fingerprint) dataset_kwargs["new_fingerprint"] = new_fingerprint if self.cache_files: if cache_file_name is None: cache_file_name = self._get_cache_file_path(new_fingerprint) dataset_kwargs["cache_file_name"] = cache_file_name def load_processed_shard_from_cache(shard_kwargs): """Load a processed shard from cache if it exists, otherwise throw an error.""" shard = shard_kwargs["shard"] # Check if we've already cached this computation (indexed by a hash) if shard_kwargs["cache_file_name"] is not None: if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file: info = shard.info.copy() info.features = features info.task_templates = None return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split) raise NonExistentDatasetError num_shards = num_proc if num_proc is not None else 1 if batched and drop_last_batch: pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size else: pbar_total = len(self) shards_done = 0 if num_proc is None or num_proc == 1: transformed_dataset = None try: transformed_dataset = load_processed_shard_from_cache(dataset_kwargs) logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}") except NonExistentDatasetError: pass if transformed_dataset is None: with hf_tqdm( unit=" examples", total=pbar_total, desc=desc or "Map", ) as pbar: for rank, done, content in Dataset._map_single(**dataset_kwargs): if done: shards_done += 1 logger.debug(f"Finished processing shard number {rank} of {num_shards}.") transformed_dataset = content else: pbar.update(content) assert transformed_dataset is not None, "Failed to retrieve the result from map" # update fingerprint if the dataset changed if transformed_dataset._fingerprint != self._fingerprint: transformed_dataset._fingerprint = new_fingerprint return transformed_dataset else: def format_cache_file_name( cache_file_name: Optional[str], rank: Union[int, Literal["*"]], # noqa: F722 ) -> Optional[str]: if not cache_file_name: return cache_file_name sep = cache_file_name.rindex(".") base_name, extension = cache_file_name[:sep], cache_file_name[sep:] if isinstance(rank, int): cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension logger.info(f"Process #{rank} will write at {cache_file_name}") else: cache_file_name = ( base_name + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc) + extension ) return cache_file_name def format_new_fingerprint(new_fingerprint: str, rank: int) -> str: new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc) validate_fingerprint(new_fingerprint) return new_fingerprint prev_env = deepcopy(os.environ) # check if parallelism if off # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22 if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in ( "", "off", "false", "f", "no", "n", "0", ): logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.") os.environ["TOKENIZERS_PARALLELISM"] = "false" shards = [ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory) for rank in range(num_proc) ] kwargs_per_job = [ { **dataset_kwargs, "shard": shards[rank], "cache_file_name": format_cache_file_name(cache_file_name, rank), "rank": rank, "offset": sum(len(s) for s in shards[:rank]), "new_fingerprint": format_new_fingerprint(new_fingerprint, rank), } for rank in range(num_shards) ] transformed_shards = [None] * num_shards for rank in range(num_shards): try: transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank]) kwargs_per_job[rank] = None except NonExistentDatasetError: pass kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None] # We try to create a pool with as many workers as dataset not yet cached. if kwargs_per_job: if len(kwargs_per_job) < num_shards: logger.info( f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache." ) with Pool(len(kwargs_per_job)) as pool: os.environ = prev_env logger.info(f"Spawning {num_proc} processes") with hf_tqdm( unit=" examples", total=pbar_total, desc=(desc or "Map") + f" (num_proc={num_proc})", ) as pbar: for rank, done, content in iflatmap_unordered( pool, Dataset._map_single, kwargs_iterable=kwargs_per_job ): if done: shards_done += 1 logger.debug(f"Finished processing shard number {rank} of {num_shards}.") transformed_shards[rank] = content else: pbar.update(content) # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805) for kwargs in kwargs_per_job: del kwargs["shard"] else: logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}") assert ( None not in transformed_shards ), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results" logger.info(f"Concatenating {num_proc} shards") result = _concatenate_map_style_datasets(transformed_shards) # update fingerprint if the dataset changed if any( transformed_shard._fingerprint != shard._fingerprint for transformed_shard, shard in zip(transformed_shards, shards) ): result._fingerprint = new_fingerprint else: result._fingerprint = self._fingerprint return result def _map_single( shard: "Dataset", function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[List[str]] = None, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, new_fingerprint: Optional[str] = None, rank: Optional[int] = None, offset: int = 0, ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]: """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does update examples). Args: shard (`datasets.Dataset`): Dataset to map the transform on. function (`Callable`): with one of the following signature: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: lambda x: x with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function` drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function` new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`. """ if fn_kwargs is None: fn_kwargs = {} # If we do batch computation but no batch size is provided, default to the full dataset if batched and (batch_size is None or batch_size <= 0): batch_size = shard.num_rows # We set this variable to True after processing the first example/batch in # `apply_function_on_filtered_inputs` if the map function returns a dict. # If set to False, no new arrow table will be created update_data = None format_kwargs = shard._format_kwargs.copy() # Lazy formatting is only available for the default format (None/python) if not input_columns and shard._format_type is None: format_kwargs["lazy"] = True input_formatter = get_formatter( shard._format_type, features=shard.features, **format_kwargs, ) class NumExamplesMismatchError(Exception): pass def validate_function_output(processed_inputs, indices): """Validate output of the map function.""" if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)): raise TypeError( f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects." ) elif isinstance(indices, list) and isinstance(processed_inputs, Mapping): allowed_batch_return_types = (list, np.ndarray, pd.Series) if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl allowed_batch_return_types += (pl.Series, pl.DataFrame) if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf allowed_batch_return_types += (tf.Tensor,) if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch allowed_batch_return_types += (torch.Tensor,) if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp allowed_batch_return_types += (jnp.ndarray,) all_dict_values_are_lists = all( isinstance(value, allowed_batch_return_types) for value in processed_inputs.values() ) if all_dict_values_are_lists is False: raise TypeError( f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`." ) def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0): """Utility to apply the function on a selection of columns.""" nonlocal update_data inputs = format_table( pa_inputs, 0 if not batched else range(pa_inputs.num_rows), format_columns=input_columns, formatter=input_formatter, ) fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] if offset == 0: effective_indices = indices else: effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset additional_args = () if with_indices: additional_args += (effective_indices,) if with_rank: additional_args += (rank,) processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) if isinstance(processed_inputs, LazyDict): processed_inputs = { k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format } returned_lazy_dict = True else: returned_lazy_dict = False if update_data is None: # Check if the function returns updated examples update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)) validate_function_output(processed_inputs, indices) if not update_data: return None # Nothing to update, let's move on if shard._format_type or input_columns: # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release) inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns())) elif isinstance(inputs, LazyDict): inputs_to_merge = { k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items() } else: inputs_to_merge = inputs if remove_columns is not None: for column in remove_columns: # `function` can modify input in-place causing column to be already removed. if column in inputs_to_merge: inputs_to_merge.pop(column) if returned_lazy_dict and column in processed_inputs: processed_inputs.pop(column) if check_same_num_examples: input_num_examples = len(pa_inputs) processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))]) if input_num_examples != processed_inputs_num_examples: raise NumExamplesMismatchError() if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping): # The .map() transform *updates* the dataset: # the output dictionary contains both the the input data and the output data. # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently. return {**inputs_to_merge, **processed_inputs} else: return processed_inputs def init_buffer_and_writer(): # Prepare output buffer and batched writer in memory or on file if we update the table writer_features = features if writer_features is None: writer_features = shard.features update_features = True else: update_features = False if keep_in_memory or cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( features=writer_features, stream=buf_writer, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) else: buf_writer = None logger.info(f"Caching processed dataset at {cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False) writer = ArrowWriter( features=writer_features, path=tmp_file.name, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) return buf_writer, writer, tmp_file num_examples_progress_update = 0 # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer` buf_writer, writer, tmp_file = None, None, None # Check if Polars is available and import it if so if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl # Optionally initialize the writer as a context manager with contextlib.ExitStack() as stack: try: arrow_formatted_shard = shard.with_format("arrow") # Loop over single examples or batches and write to buffer/file if examples are to be updated if not batched: shard_iterable = enumerate(arrow_formatted_shard) else: num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size shard_iterable = zip( range(0, num_rows, batch_size), arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch), ) if not batched: _time = time.time() for i, example in shard_iterable: example = apply_function_on_filtered_inputs(example, i, offset=offset) if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(example, pa.Table): writer.write_row(example) elif isinstance(example, pd.DataFrame): writer.write_row(pa.Table.from_pandas(example)) elif ( config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(example, pl.DataFrame) ): writer.write_row(example.to_arrow()) else: writer.write(example) num_examples_progress_update += 1 if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield rank, False, num_examples_progress_update num_examples_progress_update = 0 else: _time = time.time() for i, batch in shard_iterable: num_examples_in_batch = len(batch) indices = list( range(*(slice(i, i + batch_size).indices(shard.num_rows))) ) # Something simpler? try: batch = apply_function_on_filtered_inputs( batch, indices, check_same_num_examples=len(shard.list_indexes()) > 0, offset=offset, ) except NumExamplesMismatchError: raise DatasetTransformationNotAllowedError( "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it." ) from None if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(batch, pa.Table): writer.write_table(batch) elif isinstance(batch, pd.DataFrame): writer.write_table(pa.Table.from_pandas(batch)) elif ( config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(batch, pl.DataFrame) ): writer.write_table(batch.to_arrow()) else: writer.write_batch(batch) num_examples_progress_update += num_examples_in_batch if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield rank, False, num_examples_progress_update num_examples_progress_update = 0 if update_data and writer is not None: writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file except (Exception, KeyboardInterrupt): yield rank, False, num_examples_progress_update if update_data: if writer is not None: writer.finalize() if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise yield rank, False, num_examples_progress_update if update_data and tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_file_name, 0o666 & ~umask) if update_data: # Create new Dataset from buffer or file info = shard.info.copy() info.features = writer._features info.task_templates = None if buf_writer is None: yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split) else: yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split) else: yield rank, True, shard inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1" ) def filter( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """Apply a filter function to all the elements in the table in batches and update the table so that the dataset only includes examples according to the filter function. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, defaults to an always `True` function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched = True`. If `batched = False`, one example per batch is passed to `function`. If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. fn_kwargs (`dict`, *optional*): Keyword arguments to be passed to `function`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. suffix_template (`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default `_{rank:05d}_of_{num_proc:05d}`). new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while filtering examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.filter(lambda x: x["label"] == 1) Dataset({ features: ['text', 'label'], num_rows: 533 }) ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`" ) if function is None: function = lambda x: True # noqa: E731 if len(self) == 0: return self indices = self.map( function=partial( get_indices_from_mask_function, function, batched, with_indices, with_rank, input_columns, self._indices, ), with_indices=True, with_rank=True, features=Features({"indices": Value("uint64")}), batched=True, batch_size=batch_size, remove_columns=self.column_names, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, suffix_template=suffix_template, new_fingerprint=new_fingerprint, input_columns=input_columns, desc=desc or "Filter", ) new_dataset = copy.deepcopy(self) new_dataset._indices = indices.data new_dataset._fingerprint = new_fingerprint return new_dataset def flatten_indices( self, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, num_proc: Optional[int] = None, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create and cache a new Dataset by flattening the indices mapping. Args: keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_name (`str`, *optional*, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific [`Features`] to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Allow null values in the table. num_proc (`int`, optional, default `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ return self.map( batched=True, # for speed keep_in_memory=keep_in_memory, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, new_fingerprint=new_fingerprint, desc="Flattening the indices", num_proc=num_proc, ) def _new_dataset_with_indices( self, indices_cache_file_name: Optional[str] = None, indices_buffer: Optional[pa.Buffer] = None, fingerprint: Optional[str] = None, ) -> "Dataset": """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the current Dataset. """ if indices_cache_file_name is None and indices_buffer is None: raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.") if fingerprint is None: raise ValueError("please specify a fingerprint for the dataset with indices") if indices_cache_file_name is not None: indices_table = MemoryMappedTable.from_file(indices_cache_file_name) else: indices_table = InMemoryTable.from_buffer(indices_buffer) # Return new Dataset object # don't forget to copy the objects return Dataset( self._data, info=self.info.copy(), split=self.split, indices_table=indices_table, fingerprint=fingerprint, ) def select( self, indices: Iterable, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows selected following the list/array of indices. Args: indices (`range`, `list`, `iterable`, `ndarray` or `Series`): Range, list or 1D-array of integer indices for indexing. If the indices correspond to a contiguous range, the Arrow table is simply sliced. However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient, but still faster than recreating an Arrow table made of the requested rows. keep_in_memory (`bool`, defaults to `False`): Keep the indices mapping in memory instead of writing it to a cache file. indices_cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.select(range(4)) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # If indices is a PyArrow array, we convert to NumPy if isinstance(indices, (pa.Array, pa.ChunkedArray)): indices = indices.to_numpy().astype(np.int64) # Convert generator objects to lists if isinstance(indices, Iterator): indices = list(indices) # If the indices are contiguous, simply slice the arrow table if isinstance(indices, range): if _is_range_contiguous(indices) and indices.start >= 0: start, length = indices.start, indices.stop - indices.start return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) else: try: start = next(iter(indices)) except StopIteration: # if `indices` is an empty iterable, we return an empty dataset return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) if start >= 0: counter_from_start = itertools.count(start=start) if all(i == j for i, j in zip(indices, counter_from_start)): length = next(counter_from_start) - start return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) # If not contiguous, we need to create a new indices mapping return self._select_with_indices_mapping( indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) def _select_contiguous( self, start: int, length: int, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows from a contiguous slice of data. The slice is defined by that start index and its length. Args: start (`int`): start index. length (`int`): length of the slice to select. new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds._select_contiguous(0, 4) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self _check_valid_indices_value(start, len(self)) _check_valid_indices_value(start + length - 1, len(self)) if self._indices is None or length == 0: return Dataset( self.data.slice(start, length), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint, ) else: return Dataset( self.data, info=self.info.copy(), split=self.split, indices_table=self._indices.slice(start, length), fingerprint=new_fingerprint, ) def _select_with_indices_mapping( self, indices: Iterable, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows selected following the list/array of indices. The new dataset is made by creating a new indices mapping on top of the main arrow table. Args: indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing. keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file. indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds._select_with_indices_mapping(range(4)) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Prepare the writer for our indices arrow table if keep_in_memory or indices_cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) else: buf_writer = None logger.info(f"Caching indices mapping at {indices_cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False) writer = ArrowWriter( path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) indices = indices if isinstance(indices, list) else list(indices) size = len(self) if indices: _check_valid_indices_value(int(max(indices)), size=size) _check_valid_indices_value(int(min(indices)), size=size) else: return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) indices_array = pa.array(indices, type=pa.uint64()) # Check if we need to convert indices if self._indices is not None: indices_array = self._indices.column(0).take(indices_array) indices_table = pa.Table.from_arrays([indices_array], names=["indices"]) with writer: try: writer.write_table(indices_table) writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file except (Exception, KeyboardInterrupt): if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise if tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, indices_cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(indices_cache_file_name, 0o666 & ~umask) # Return new Dataset object if buf_writer is None: return self._new_dataset_with_indices( indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint ) else: return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint) def sort( self, column_names: Union[str, Sequence_[str]], reverse: Union[bool, Sequence_[bool]] = False, kind="deprecated", null_placement: str = "at_end", keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset sorted according to a single or multiple columns. Args: column_names (`Union[str, Sequence[str]]`): Column name(s) to sort by. reverse (`Union[bool, Sequence[bool]]`, defaults to `False`): If `True`, sort by descending order rather than ascending. If a single bool is provided, the value is applied to the sorting of all column names. Otherwise a list of bools with the same length and order as column_names must be provided. kind (`str`, *optional*): Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`, The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general, the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility. <Deprecated version="2.8.0"> `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0. </Deprecated> null_placement (`str`, defaults to `at_end`): Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last` <Added version="1.14.2"/> keep_in_memory (`bool`, defaults to `False`): Keep the sorted indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the sorted indices can be identified, use it instead of recomputing. indices_cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the sorted indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. Higher value gives smaller cache files, lower value consume less temporary memory. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes', split='validation') >>> ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] >>> sorted_ds = ds.sort('label') >>> sorted_ds['label'][:10] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False]) >>> another_sorted_ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Deprecation warning if kind != "deprecated": warnings.warn( "'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.", category=FutureWarning, ) # Check proper format of and for duplicates in column_names if isinstance(column_names, str): column_names = [column_names] # Check proper format and length of reverse if not isinstance(reverse, bool): if len(reverse) != len(column_names): raise ValueError( "Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'." ) else: reverse = [reverse] * len(column_names) # Check whether column name(s) exist in dataset for column in column_names: if not isinstance(column, str) or column not in self._data.column_names: raise ValueError( f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}" ) # Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability if null_placement not in ["at_start", "at_end"]: if null_placement == "first": null_placement = "at_start" elif null_placement == "last": null_placement = "at_end" else: raise ValueError( f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'." ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) sort_table = query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ) sort_keys = [ (col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse) ] indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement) return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"] ) def shuffle( self, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new Dataset where the rows are shuffled. Currently shuffling uses numpy random generators. You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping. However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower. This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. This may take a lot of time depending of the size of your dataset though: ```python my_dataset[0] # fast my_dataset = my_dataset.shuffle(seed=42) my_dataset[0] # up to 10x slower my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data my_dataset[0] # fast again ``` In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`]. It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal: ```python my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128) for example in enumerate(my_iterable_dataset): # fast pass shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100) for example in enumerate(shuffled_iterable_dataset): # as fast as before pass ``` Args: seed (`int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. If `None`, then fresh, unpredictable entropy will be pulled from the OS. If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). keep_in_memory (`bool`, default `False`): Keep the shuffled indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the shuffled indices can be identified, use it instead of recomputing. indices_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the shuffled indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # set a seed >>> shuffled_ds = ds.shuffle(seed=42) >>> shuffled_ds['label'][:10] [1, 0, 1, 1, 0, 0, 0, 0, 0, 0] ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if seed is not None and generator is not None: raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.") if generator is not None and not isinstance(generator, np.random.Generator): raise ValueError("The provided generator must be an instance of numpy.random.Generator") load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if generator is None: if seed is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) permutation = generator.permutation(len(self)) return self.select( indices=permutation, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) inplace=False, randomized_function=True, fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"], ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"], ) def train_test_split( self, test_size: Union[float, int, None] = None, train_size: Union[float, int, None] = None, shuffle: bool = True, stratify_by_column: Optional[str] = None, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, train_indices_cache_file_name: Optional[str] = None, test_indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, train_new_fingerprint: Optional[str] = None, test_new_fingerprint: Optional[str] = None, ) -> "DatasetDict": """Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits). Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`. This method is similar to scikit-learn `train_test_split`. Args: test_size (`numpy.random.Generator`, *optional*): Size of the test split If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split. If `int`, represents the absolute number of test samples. If `None`, the value is set to the complement of the train size. If `train_size` is also `None`, it will be set to `0.25`. train_size (`numpy.random.Generator`, *optional*): Size of the train split If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split. If `int`, represents the absolute number of train samples. If `None`, the value is automatically set to the complement of the test size. shuffle (`bool`, *optional*, defaults to `True`): Whether or not to shuffle the data before splitting. stratify_by_column (`str`, *optional*, defaults to `None`): The column name of labels to be used to perform stratified split of data. seed (`int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. If `None`, then fresh, unpredictable entropy will be pulled from the OS. If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). keep_in_memory (`bool`, defaults to `False`): Keep the splits indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the splits indices can be identified, use it instead of recomputing. train_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the train split indices instead of the automatically generated cache file name. test_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the test split indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. train_new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the train set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments test_new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the test set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds = ds.train_test_split(test_size=0.2, shuffle=True) DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 852 }) test: Dataset({ features: ['text', 'label'], num_rows: 214 }) }) # set a seed >>> ds = ds.train_test_split(test_size=0.2, seed=42) # stratified split >>> ds = load_dataset("imdb",split="train") Dataset({ features: ['text', 'label'], num_rows: 25000 }) >>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label") DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 20000 }) test: Dataset({ features: ['text', 'label'], num_rows: 5000 }) }) ``` """ from .dataset_dict import DatasetDict # import here because of circular dependency if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return DatasetDict({"train": self, "test": self}) if test_size is None and train_size is None: test_size = 0.25 # Safety checks similar to scikit-learn's ones. # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750) n_samples = len(self) if ( isinstance(test_size, int) and (test_size >= n_samples or test_size <= 0) or isinstance(test_size, float) and (test_size <= 0 or test_size >= 1) ): raise ValueError( f"test_size={test_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if ( isinstance(train_size, int) and (train_size >= n_samples or train_size <= 0) or isinstance(train_size, float) and (train_size <= 0 or train_size >= 1) ): raise ValueError( f"train_size={train_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if train_size is not None and not isinstance(train_size, (int, float)): raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}") if test_size is not None and not isinstance(test_size, (int, float)): raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}") if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1: raise ValueError( f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)" " range. Reduce test_size and/or train_size." ) if isinstance(test_size, float): n_test = ceil(test_size * n_samples) elif isinstance(test_size, int): n_test = float(test_size) if isinstance(train_size, float): n_train = floor(train_size * n_samples) elif isinstance(train_size, int): n_train = float(train_size) if train_size is None: n_train = n_samples - n_test elif test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError( f"The sum of train_size and test_size = {n_train + n_test}, " "should be smaller than the number of " f"samples {n_samples}. Reduce test_size and/or " "train_size." ) n_train, n_test = int(n_train), int(n_test) if n_train == 0: raise ValueError( f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the " "resulting train set will be empty. Adjust any of the " "aforementioned parameters." ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if generator is None and shuffle is True: if seed is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if train_indices_cache_file_name is None or test_indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args if train_indices_cache_file_name is None: train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint) if test_indices_cache_file_name is None: test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint) if ( os.path.exists(train_indices_cache_file_name) and os.path.exists(test_indices_cache_file_name) and load_from_cache_file ): logger.info( f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}" ) return DatasetDict( { "train": self._new_dataset_with_indices( fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name ), "test": self._new_dataset_with_indices( fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name ), } ) if not shuffle: if stratify_by_column is not None: raise ValueError("Stratified train/test split is not implemented for `shuffle=False`") train_indices = np.arange(n_train) test_indices = np.arange(n_train, n_train + n_test) else: # stratified partition if stratify_by_column is not None: if stratify_by_column not in self._info.features.keys(): raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}") if not isinstance(self._info.features[stratify_by_column], ClassLabel): raise ValueError( f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}." ) try: train_indices, test_indices = next( stratified_shuffle_split_generate_indices( self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator ) ) except Exception as error: if str(error) == "Minimum class count error": raise ValueError( f"The least populated class in {stratify_by_column} column has only 1" " member, which is too few. The minimum" " number of groups for any class cannot" " be less than 2." ) else: raise error # random partition else: permutation = generator.permutation(len(self)) test_indices = permutation[:n_test] train_indices = permutation[n_test : (n_test + n_train)] train_split = self.select( indices=train_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=train_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=train_new_fingerprint, ) test_split = self.select( indices=test_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=test_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=test_new_fingerprint, ) return DatasetDict({"train": train_split, "test": test_split}) def shard( self, num_shards: int, index: int, contiguous: bool = False, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, ) -> "Dataset": """Return the `index`-nth shard from dataset split into `num_shards` pieces. This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose index mod `n = i`. `dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks, so it can be easily concatenated back together after processing. If `n % i == l`, then the first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`. `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return a dataset with the same order as the original. Be sure to shard before using any randomizing operator (such as `shuffle`). It is best if the shard operator is used early in the dataset pipeline. Args: num_shards (`int`): How many shards to split the dataset into. index (`int`): Which shard to select and return. contiguous: (`bool`, defaults to `False`): Whether to select contiguous blocks of indices for shards. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. indices_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the indices of each shard instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds Dataset({ features: ['text', 'label'], num_rows: 1066 }) >>> ds.shard(num_shards=2, index=0) Dataset({ features: ['text', 'label'], num_rows: 533 }) ``` """ if not 0 <= index < num_shards: raise ValueError("index should be in [0, num_shards-1]") if contiguous: div = len(self) // num_shards mod = len(self) % num_shards start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) indices = range(start, end) else: indices = np.arange(index, len(self), num_shards) return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, ) def export( self, filename: str, format: str = "tfrecord", ): """Writes the Arrow dataset to a TFRecord file. The dataset must already be in tensorflow format. The records will be written with keys from `dataset._format_columns`. Args: filename (`str`): The filename, including the `.tfrecord` extension, to write to. format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as TFRecords are the only option. This enables a more flexible function signature later. """ try: import tensorflow as tf # noqa: F401 except ImportError: logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.") # From https://www.tensorflow.org/tutorials/load_data/tfrecord def _bytes_feature(values): """Returns a bytes_list from a list of string / byte.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=values)) def _float_feature(values): """Returns a float_list from a list of float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=values)) def _int64_feature(values): """Returns an int64_list from a list of bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature": """Typechecks `values` and returns the corresponding tf.train.Feature.""" if isinstance(values, list): if values and isinstance(values[0], str): return _bytes_feature([v.encode() for v in values]) else: raise ValueError(f"values={values} is empty or contains items that cannot be serialized") elif isinstance(values, np.ndarray): if values.dtype == np.dtype(float): return _float_feature(values) elif values.dtype == np.int64: return _int64_feature(values) elif values.dtype == np.dtype(str) or ( values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str) ): return _bytes_feature([v.encode() for v in values]) else: raise ValueError( f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized" ) elif hasattr(values, "dtype"): if np.issubdtype(values.dtype, np.floating): return _float_feature([values.item()]) elif np.issubdtype(values.dtype, np.integer): return _int64_feature([values.item()]) elif np.issubdtype(values.dtype, str): return _bytes_feature([values.item().encode()]) else: raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized") else: raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized") def serialize_example(ex): feature = {key: _feature(value) for key, value in ex.items()} example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(ex): tf_string = tf.py_function(serialize_example, (ex,), tf.string) return tf.reshape(tf_string, ()) def generator(): for ex in self: yield serialize_example(ex) if self._format_type != "numpy": raise ValueError("Dataset format must be numpy before exporting") if not filename.endswith(".tfrecord"): raise ValueError("filename {filename} must end with .tfrecord") tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=()) writer = tf.data.experimental.TFRecordWriter(filename) logger.info(f"Writing TFRecord to {filename}") writer.write(tf_dataset) logger.info(f"Finished writing TFRecord to {filename}") self = None # delete the dataset reference used by tf_dataset def to_csv( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **to_csv_kwargs, ) -> int: """Exports the dataset to csv Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.csv`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.csv`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. `batch_size` in this case defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **to_csv_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html). <Changed version="2.10.0"> Now, `index` defaults to `False` if not specified. If you would like to write the index, pass `index=True` and also set a name for the index column by passing `index_label`. </Changed> Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_csv("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetWriter return CsvDatasetWriter( self, path_or_buf, batch_size=batch_size, num_proc=num_proc, storage_options=storage_options, **to_csv_kwargs, ).write() def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]: """Returns the dataset as a Python dict. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). <Deprecated version="2.11.0"> Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead. </Deprecated> batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `dict` or `Iterator[dict]` Example: ```py >>> ds.to_dict() ``` """ if batched != "deprecated": warnings.warn( "'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.", FutureWarning, ) else: batched = False if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pydict() else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices, ).to_pydict() for offset in range(0, len(self), batch_size) ) def to_list(self) -> list: """Returns the dataset as a Python list. Returns: `list` Example: ```py >>> ds.to_list() ``` """ return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pylist() def to_json( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **to_json_kwargs, ) -> int: """Export the dataset to JSON Lines or JSON. Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.json`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.json`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. `batch_size` in this case defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **to_json_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html). <Changed version="2.11.0"> Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`. If you would like to write the index, pass `index=True`. </Changed> Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_json("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetWriter return JsonDatasetWriter( self, path_or_buf, batch_size=batch_size, num_proc=num_proc, storage_options=storage_options, **to_json_kwargs, ).write() def to_pandas( self, batch_size: Optional[int] = None, batched: bool = False ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `pandas.DataFrame` or `Iterator[pandas.DataFrame]` Example: ```py >>> ds.to_pandas() ``` """ if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pandas(types_mapper=pandas_types_mapper) else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices, ).to_pandas(types_mapper=pandas_types_mapper) for offset in range(0, len(self), batch_size) ) def to_polars( self, batch_size: Optional[int] = None, batched: bool = False, schema_overrides: Optional[dict] = None, rechunk: bool = True, ) -> Union["pl.DataFrame", Iterator["pl.DataFrame"]]: """Returns the dataset as a `polars.DataFrame`. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `genomicsml.datasets.config.DEFAULT_MAX_BATCH_SIZE`. schema_overrides (`dict`, *optional*): Support type specification or override of one or more columns; note that any dtypes inferred from the schema param will be overridden. rechunk (`bool`): Make sure that all data is in contiguous memory. Defaults to `True`. Returns: `polars.DataFrame` or `Iterator[polars.DataFrame]` Example: ```py >>> ds.to_polars() ``` """ if config.POLARS_AVAILABLE: import polars as pl if not batched: return pl.from_arrow( query_table( table=self._data, key=slice(0, len(self)), indices=self._indices if self._indices is not None else None, ), schema_overrides=schema_overrides, rechunk=rechunk, ) else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( pl.from_arrow( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices if self._indices is not None else None, ), schema_overrides=schema_overrides, rechunk=rechunk, ) for offset in range(0, len(self), batch_size) ) else: raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") def to_parquet( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, storage_options: Optional[dict] = None, **parquet_writer_kwargs, ) -> int: """Exports the dataset to parquet Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.parquet`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.parquet`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **parquet_writer_kwargs (additional keyword arguments): Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`. Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_parquet("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetWriter return ParquetDatasetWriter( self, path_or_buf, batch_size=batch_size, storage_options=storage_options, **parquet_writer_kwargs ).write() def to_sql( self, name: str, con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], batch_size: Optional[int] = None, **sql_writer_kwargs, ) -> int: """Exports the dataset to a SQL database. Args: name (`str`): Name of SQL table. con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. **sql_writer_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html). <Changed version="2.11.0"> Now, `index` defaults to `False` if not specified. If you would like to write the index, pass `index=True` and also set a name for the index column by passing `index_label`. </Changed> Returns: `int`: The number of records written. Example: ```py >>> # con provided as a connection URI string >>> ds.to_sql("data", "sqlite:///my_own_db.sql") >>> # con provided as a sqlite3 connection object >>> import sqlite3 >>> con = sqlite3.connect("my_own_db.sql") >>> with con: ... ds.to_sql("data", con) ``` """ # Dynamic import to avoid circular dependency from .io.sql import SqlDatasetWriter return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write() def _estimate_nbytes(self) -> int: dataset_nbytes = self.data.nbytes # Find decodable columns, because if there are any, we need to # adjust the dataset size computation (needed for sharding) to account for possible external files decodable_columns = [ k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True) ] if decodable_columns: # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples extra_nbytes = 0 def extra_nbytes_visitor(array, feature): nonlocal extra_nbytes if isinstance(feature, (Audio, Image)): for x in array.to_pylist(): if x is not None and x["bytes"] is None and x["path"] is not None: size = xgetsize(x["path"]) extra_nbytes += size extra_nbytes -= array.field("path").nbytes table = self.with_format("arrow")[:1000] table_visitor(table, extra_nbytes_visitor) extra_nbytes = extra_nbytes * len(self.data) / len(table) dataset_nbytes = dataset_nbytes + extra_nbytes if self._indices is not None: dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data) return dataset_nbytes def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int): for shard_idx, shard in enumerate(shards): for pa_table in shard.with_format("arrow").iter(batch_size): yield shard_idx, pa_table def _generate_tables_from_cache_file(filename: str): for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)): yield batch_idx, pa.Table.from_batches([batch]) def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset": """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`]. This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files. Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop). Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets. All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset. Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`]. This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough. To get the best speed performance, make sure your dataset doesn't have an indices mapping. If this is the case, the data are not read contiguously, which can be slow sometimes. You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset. Args: num_shards (`int`, default to `1`): Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly, and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example. Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk. Returns: [`datasets.IterableDataset`] Example: Basic usage: ```python >>> ids = ds.to_iterable_dataset() >>> for example in ids: ... pass ``` With lazy filtering and processing: ```python >>> ids = ds.to_iterable_dataset() >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset >>> for example in ids: ... pass ``` With sharding to enable efficient shuffling: ```python >>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating >>> for example in ids: ... pass ``` With a PyTorch DataLoader: ```python >>> import torch >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.filter(filter_fn).map(process_fn) >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating >>> for example in ids: ... pass ``` With a PyTorch DataLoader and shuffling: ```python >>> import torch >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating >>> for example in ids: ... pass ``` In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling ```python >>> from datasets.distributed import split_dataset_by_node >>> ids = ds.to_iterable_dataset(num_shards=512) >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating >>> for example in ids: ... pass ``` With shuffling and multiple epochs: ```python >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> for epoch in range(n_epochs): ... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating ... for example in ids: ... pass ``` Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups. """ from .iterable_dataset import ArrowExamplesIterable, IterableDataset if self._format_type is not None: raise NotImplementedError( "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset" ) if num_shards > len(self): raise ValueError( f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)." ) if self._indices is not None: logger.info( "Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. " "You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed." ) shards = ( [copy.deepcopy(self)] if num_shards == 1 else [ self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards) ] ) ex_iterable = ArrowExamplesIterable( Dataset._generate_tables_from_shards, kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE}, ) return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features)) def _push_parquet_shards_to_hub( self, repo_id: str, data_dir: str = "data", split: Optional[str] = None, token: Optional[str] = None, revision: Optional[str] = None, create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[int] = None, embed_external_files: bool = True, ) -> Tuple[str, str, int, int, List[str], int]: """Pushes the dataset shards as Parquet files to the hub. Returns: additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards uploaded_size (`int`): number of uploaded bytes to the repository dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression """ # Find decodable columns, because if there are any, we need to: # embed the bytes from the files in the shards decodable_columns = ( [k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)] if embed_external_files else [] ) dataset_nbytes = self._estimate_nbytes() if num_shards is None: max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) num_shards = int(dataset_nbytes / max_shard_size) + 1 num_shards = max(num_shards, 1) shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) if decodable_columns: def shards_with_embedded_external_files(shards): for shard in shards: format = shard.format shard = shard.with_format("arrow") shard = shard.map( embed_table_storage, batched=True, batch_size=1000, keep_in_memory=True, ) shard = shard.with_format(**format) yield shard shards = shards_with_embedded_external_files(shards) api = HfApi(endpoint=config.HF_ENDPOINT, token=token) uploaded_size = 0 additions = [] for index, shard in hf_tqdm( enumerate(shards), desc="Uploading the dataset shards", total=num_shards, ): shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet" buffer = BytesIO() shard.to_parquet(buffer) uploaded_size += buffer.tell() shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer) preupload_lfs_files( api, repo_id=repo_id, additions=[shard_addition], token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) additions.append(shard_addition) return additions, uploaded_size, dataset_nbytes def push_to_hub( self, repo_id: str, config_name: str = "default", set_default: Optional[bool] = None, split: Optional[str] = None, data_dir: Optional[str] = None, commit_message: Optional[str] = None, commit_description: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, revision: Optional[str] = None, branch="deprecated", create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[int] = None, embed_external_files: bool = True, ) -> CommitInfo: """Pushes the dataset to the hub as a Parquet dataset. The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`] data, the Parquet files will store the bytes of your images or audio files. You can disable this by setting `embed_external_files` to `False`. Args: repo_id (`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. config_name (`str`, defaults to "default"): The configuration name (or subset) of a dataset. Defaults to "default". set_default (`bool`, *optional*): Whether to set this configuration as the default one. Otherwise, the default configuration is the one named "default". split (`str`, *optional*): The name of the split that will be given to that dataset. Defaults to `self.split`. data_dir (`str`, *optional*): Directory name that will contain the uploaded data files. Defaults to the `config_name` if different from "default", else "data". <Added version="2.17.0"/> commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload dataset"`. commit_description (`str`, *optional*): Description of the commit that will be created. Additionally, description of the PR if a PR is created (`create_pr` is True). <Added version="2.16.0"/> private (`bool`, *optional*, defaults to `False`): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (`str`, *optional*): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with `huggingface-cli login`. Will raise an error if no token is passed and the user is not logged-in. revision (`str`, *optional*): Branch to push the uploaded files to. Defaults to the `"main"` branch. <Added version="2.15.0"/> branch (`str`, *optional*): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. <Deprecated version="2.15.0"> `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0. </Deprecated> create_pr (`bool`, *optional*, defaults to `False`): Whether to create a PR with the uploaded files or directly commit. <Added version="2.15.0"/> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). num_shards (`int`, *optional*): Number of shards to write. By default, the number of shards depends on `max_shard_size`. <Added version="2.8.0"/> embed_external_files (`bool`, defaults to `True`): Whether to embed file bytes in the shards. In particular, this will do the following before the push for the fields of type: - [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files. Return: huggingface_hub.CommitInfo Example: ```python >>> dataset.push_to_hub("<organization>/<dataset_id>") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True) >>> dataset.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB") >>> dataset.push_to_hub("<organization>/<dataset_id>", num_shards=1024) ``` If your dataset has multiple splits (e.g. train/validation/test): ```python >>> train_dataset.push_to_hub("<organization>/<dataset_id>", split="train") >>> val_dataset.push_to_hub("<organization>/<dataset_id>", split="validation") >>> # later >>> dataset = load_dataset("<organization>/<dataset_id>") >>> train_dataset = dataset["train"] >>> val_dataset = dataset["validation"] ``` If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): ```python >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en") >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr") >>> # later >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en") >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr") ``` """ if config_name == "data": raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.") if max_shard_size is not None and num_shards is not None: raise ValueError( "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." ) if split is None: split = str(self.split) if self.split is not None else "train" if not re.match(_split_re, split): raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") if branch != "deprecated": warnings.warn( "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'revision={branch}' instead.", FutureWarning, ) revision = branch api = HfApi(endpoint=config.HF_ENDPOINT, token=token) repo_url = api.create_repo( repo_id, token=token, repo_type="dataset", private=private, exist_ok=True, ) repo_id = repo_url.repo_id if revision is not None: api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) if not data_dir: data_dir = config_name if config_name != "default" else "data" # for backward compatibility additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub( repo_id=repo_id, data_dir=data_dir, split=split, token=token, revision=revision, max_shard_size=max_shard_size, num_shards=num_shards, create_pr=create_pr, embed_external_files=embed_external_files, ) # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) # and delete old split shards (if they exist) repo_with_dataset_card, repo_with_dataset_infos = False, False deletions, deleted_size = [], 0 repo_splits = [] # use a list to keep the order of the splits repo_files_to_add = [addition.path_in_repo for addition in additions] for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token): if repo_file.rfilename == config.REPOCARD_FILENAME: repo_with_dataset_card = True elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: repo_with_dataset_infos = True elif ( repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add ): deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) deleted_size += repo_file.size elif fnmatch.fnmatch( repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*") ): repo_split = string_to_dict( repo_file.rfilename, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED), )["split"] if repo_split not in repo_splits: repo_splits.append(repo_split) organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id) info_to_dump = self.info.copy() info_to_dump.download_checksums = None info_to_dump.download_size = uploaded_size info_to_dump.dataset_size = dataset_nbytes info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes info_to_dump.config_name = config_name info_to_dump.splits = SplitDict( {split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)} ) # get the info from the README to update them if repo_with_dataset_card: dataset_card_path = api.hf_hub_download( repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision ) dataset_card = DatasetCard.load(Path(dataset_card_path)) dataset_card_data = dataset_card.data metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data) if dataset_infos and config_name in dataset_infos: repo_info = dataset_infos[config_name] else: repo_info = None # get the deprecated dataset_infos.json to update them elif repo_with_dataset_infos: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None else: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() repo_info = None # update the total info to dump from existing info if repo_info is not None: logger.info("Updating downloaded metadata with the new split.") if repo_info.splits and list(repo_info.splits) != [split]: if self._info.features != repo_info.features: raise ValueError( f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}" ) if split in repo_info.splits: repo_info.download_size -= deleted_size repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0 repo_info.download_checksums = None repo_info.download_size = (repo_info.download_size or 0) + uploaded_size repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size repo_info.splits.pop(split, None) repo_info.splits[split] = SplitInfo( split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name ) info_to_dump = repo_info # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed if not metadata_configs and repo_splits: default_metadata_configs_to_dump = { "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits] } MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) # update the metadata configs if config_name in metadata_configs: metadata_config = metadata_configs[config_name] if "data_files" in metadata_config: data_files_to_dump = sanitize_patterns(metadata_config["data_files"]) else: data_files_to_dump = {} # add the new split data_files_to_dump[split] = [f"{data_dir}/{split}-*"] metadata_config_to_dump = { "data_files": [ { "split": _split, "path": _pattern[0] if len(_pattern) == 1 else _pattern, } for _split, _pattern in data_files_to_dump.items() ] } else: metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]} if set_default and config_name != "default": if metadata_configs: default_config_name = metadata_configs.get_default_config_name() if default_config_name == "default": raise ValueError( "There exists a configuration named 'default'. To set a different configuration as default, " "rename the 'default' one first." ) else: _ = metadata_configs[default_config_name].pop("default") metadata_config_to_dump["default"] = True # push to the deprecated dataset_infos.json if repo_with_dataset_infos: dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_infos[config_name] = asdict(info_to_dump) buffer = BytesIO() buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) additions.append( CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) ) # push to README DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card additions.append( CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) commit_message = commit_message if commit_message is not None else "Upload dataset" if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: commit_info = api.create_commit( repo_id, operations=additions + deletions, commit_message=commit_message, commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) else: logger.info( f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits." ) num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) for i in range(0, num_commits): operations = additions[ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT ] + (deletions if i == 0 else []) commit_info = api.create_commit( repo_id, operations=operations, commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) logger.info( f"Commit #{i+1} completed" + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "") + "." ) return commit_info def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str): """Add column to Dataset. <Added version="1.7"/> Args: name (`str`): Column name. column (`list` or `np.array`): Column data to be added. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> more_text = ds["text"] >>> ds.add_column(name="text_2", column=more_text) Dataset({ features: ['text', 'label', 'text_2'], num_rows: 1066 }) ``` """ column_table = InMemoryTable.from_pydict({name: column}) _check_column_names(self._data.column_names + column_table.column_names) dataset = self.flatten_indices() if self._indices is not None else self # Concatenate tables horizontally table = concat_tables([dataset._data, column_table], axis=1) # Update features info = dataset.info.copy() info.features.update(Features.from_arrow_schema(column_table.schema)) table = update_metadata_with_features(table, info.features) return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint) def add_faiss_index( self, column: str, index_name: Optional[str] = None, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. By default the index is done over the vectors of the specified column. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) Args: column (`str`): The column of the vectors to add to the index. index_name (`str`, *optional*): The `index_name`/identifier of the index. This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`]. By default it corresponds to `column`. device (`Union[int, List[int]]`, *optional*): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (`str`, *optional*): This is passed to the index factory of Faiss to create the index. Default index class is `IndexFlat`. metric_type (`int`, *optional*): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (`faiss.Index`, *optional*): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (`int`): Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`. <Added version="2.4.0"/> train_size (`int`, *optional*): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to `False`): Enable the verbosity of the Faiss index. dtype (`data-type`): The dtype of the numpy arrays that are indexed. Default is `np.float32`. Example: ```python >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']})) >>> ds_with_embeddings.add_faiss_index(column='embeddings') >>> # query >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10) >>> # save index >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> # load index >>> ds.load_faiss_index('embeddings', 'my_index.faiss') >>> # query >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10) ``` """ with self.formatted_as(type="numpy", columns=[column], dtype=dtype): super().add_faiss_index( column=column, index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose, ) return self def add_faiss_index_from_external_arrays( self, external_arrays: np.array, index_name: str, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of `external_arrays`. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) Args: external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`. It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`. index_name (`str`): The `index_name`/identifier of the index. This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`]. device (Optional `Union[int, List[int]]`, *optional*): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (`str`, *optional*): This is passed to the index factory of Faiss to create the index. Default index class is `IndexFlat`. metric_type (`int`, *optional*): Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (`faiss.Index`, *optional*): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (`int`, *optional*): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. <Added version="2.4.0"/> train_size (`int`, *optional*): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. dtype (`numpy.dtype`): The dtype of the numpy arrays that are indexed. Default is np.float32. """ super().add_faiss_index_from_external_arrays( external_arrays=external_arrays.astype(dtype), index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose, ) def add_elasticsearch_index( self, column: str, index_name: Optional[str] = None, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821 es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): """Add a text index using ElasticSearch for fast retrieval. This is done in-place. Args: column (`str`): The column of the documents to add to the index. index_name (`str`, *optional*): The `index_name`/identifier of the index. This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`Dataset.search`]. By default it corresponds to `column`. host (`str`, *optional*, defaults to `localhost`): Host of where ElasticSearch is running. port (`str`, *optional*, defaults to `9200`): Port of where ElasticSearch is running. es_client (`elasticsearch.Elasticsearch`, *optional*): The elasticsearch client used to create the index if host and port are `None`. es_index_name (`str`, *optional*): The elasticsearch index name used to create the index. es_index_config (`dict`, *optional*): The configuration of the elasticsearch index. Default config is: ``` { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } ``` Example: ```python >>> es_client = elasticsearch.Elasticsearch() >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index") >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10) ``` """ with self.formatted_as(type=None, columns=[column]): super().add_elasticsearch_index( column=column, index_name=index_name, host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config, ) return self def add_item(self, item: dict, new_fingerprint: str): """Add item to Dataset. <Added version="1.7"/> Args: item (`dict`): Item data to be added. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} >>> ds = ds.add_item(new_review) >>> ds[-1] {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} ``` """ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()}) # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe" dset_features, item_features = _align_features( [self._info.features, Features.from_arrow_schema(item_table.schema)] ) # Cast to align the schemas of the tables and concatenate the tables table = concat_tables( [ self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data, item_table.cast(item_features.arrow_schema), ] ) if self._indices is None: indices_table = None else: item_indices_array = pa.array([len(self._data)], type=pa.uint64()) item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"]) indices_table = concat_tables([self._indices, item_indices_table]) info = self.info.copy() info.features.update(item_features) table = update_metadata_with_features(table, info.features) return Dataset( table, info=info, split=self.split, indices_table=indices_table, fingerprint=new_fingerprint, ) def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset": """Align the dataset's label ID and label name mapping to match an input `label2id` mapping. This is useful when you want to ensure that a model's predicted labels are aligned with the dataset. The alignment in done using the lowercase label names. Args: label2id (`dict`): The label name to ID mapping to align the dataset with. label_column (`str`): The column name of labels to align on. Example: ```python >>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2} >>> ds = load_dataset("glue", "mnli", split="train") >>> # mapping to align with >>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2} >>> ds_aligned = ds.align_labels_with_mapping(label2id, "label") ``` """ # Sanity checks if label_column not in self._data.column_names: raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).") label_feature = self._info.features[label_column] if not ( isinstance(label_feature, ClassLabel) or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel)) ): raise ValueError( f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}." ) # Sort input mapping by ID value to ensure the label names are aligned label2id = dict(sorted(label2id.items(), key=lambda item: item[1])) label_names = list(label2id.keys()) # Some label mappings use uppercase label names so we lowercase them during alignment label2id = {k.lower(): v for k, v in label2id.items()} int2str_function = ( label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str ) if isinstance(label_feature, ClassLabel): def process_label_ids(batch): dset_label_names = [ int2str_function(label_id).lower() if label_id is not None else None for label_id in batch[label_column] ] batch[label_column] = [ label2id[label_name] if label_name is not None else None for label_name in dset_label_names ] return batch else: def process_label_ids(batch): dset_label_names = [ [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq] for seq in batch[label_column] ] batch[label_column] = [ [label2id[label_name] if label_name is not None else None for label_name in seq] for seq in dset_label_names ] return batch features = self.features features[label_column] = ( ClassLabel(num_classes=len(label_names), names=label_names) if isinstance(label_feature, ClassLabel) else Sequence(ClassLabel(num_classes=len(label_names), names=label_names)) ) return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels") The provided code snippet includes necessary dependencies for implementing the `fingerprint_transform` function. Write a Python function `def fingerprint_transform( inplace: bool, use_kwargs: Optional[List[str]] = None, ignore_kwargs: Optional[List[str]] = None, fingerprint_names: Optional[List[str]] = None, randomized_function: bool = False, version: Optional[str] = None, )` to solve the following problem: Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint`` Args: inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace. Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of setting the fingerprint of the returned Dataset. use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account to update the fingerprint to the wrapped method that should take care of setting the fingerprint of the returned Dataset. By default all the arguments are used. ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs. fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]): If the dataset transforms is not inplace and returns a DatasetDict, then it can require several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names, one fingerprint named after each element of fingerprint_names is going to be passed. randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has optional parameters "seed" and "generator", then you can set randomized_function to True. This way, even if users set "seed" and "generator" to None, then the fingerprint is going to be randomly generated depending on numpy's current state. In this case, the generator is set to np.random.default_rng(np.random.get_state()[1][0]). version (:obj:`str`, optional): version of the transform. The version is taken into account when computing the fingerprint. If a datase transform changes (or at least if the output data that are cached changes), then one should increase the version. If the version stays the same, then old cached data could be reused that are not compatible with the new transform. It should be in the format "MAJOR.MINOR.PATCH". Here is the function: def fingerprint_transform( inplace: bool, use_kwargs: Optional[List[str]] = None, ignore_kwargs: Optional[List[str]] = None, fingerprint_names: Optional[List[str]] = None, randomized_function: bool = False, version: Optional[str] = None, ): """ Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint`` Args: inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace. Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of setting the fingerprint of the returned Dataset. use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account to update the fingerprint to the wrapped method that should take care of setting the fingerprint of the returned Dataset. By default all the arguments are used. ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs. fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]): If the dataset transforms is not inplace and returns a DatasetDict, then it can require several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names, one fingerprint named after each element of fingerprint_names is going to be passed. randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has optional parameters "seed" and "generator", then you can set randomized_function to True. This way, even if users set "seed" and "generator" to None, then the fingerprint is going to be randomly generated depending on numpy's current state. In this case, the generator is set to np.random.default_rng(np.random.get_state()[1][0]). version (:obj:`str`, optional): version of the transform. The version is taken into account when computing the fingerprint. If a datase transform changes (or at least if the output data that are cached changes), then one should increase the version. If the version stays the same, then old cached data could be reused that are not compatible with the new transform. It should be in the format "MAJOR.MINOR.PATCH". """ if use_kwargs is not None and not isinstance(use_kwargs, list): raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}") if ignore_kwargs is not None and not isinstance(ignore_kwargs, list): raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}") if inplace and fingerprint_names: raise ValueError("fingerprint_names are only used when inplace is False") fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"] def _fingerprint(func): if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names): raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature") if randomized_function: # randomized function have seed and generator parameters if "seed" not in func.__code__.co_varnames: raise ValueError(f"'seed' must be in {func}'s signature") if "generator" not in func.__code__.co_varnames: raise ValueError(f"'generator' must be in {func}'s signature") # this call has to be outside the wrapper or since __qualname__ changes in multiprocessing transform = format_transform_for_fingerprint(func, version=version) @wraps(func) def wrapper(*args, **kwargs): kwargs_for_fingerprint = format_kwargs_for_fingerprint( func, args, kwargs, use_kwargs=use_kwargs, ignore_kwargs=ignore_kwargs, randomized_function=randomized_function, ) if args: dataset: Dataset = args[0] args = args[1:] else: dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters))) # compute new_fingerprint and add it to the args of not in-place transforms if inplace: new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint) else: for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes if kwargs.get(fingerprint_name) is None: kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name kwargs[fingerprint_name] = update_fingerprint( dataset._fingerprint, transform, kwargs_for_fingerprint ) else: validate_fingerprint(kwargs[fingerprint_name]) # Call actual function out = func(dataset, *args, **kwargs) # Update fingerprint of in-place transforms + update in-place history of transforms if inplace: # update after calling func so that the fingerprint doesn't change if the function fails dataset._fingerprint = new_fingerprint return out wrapper._decorator_name_ = "fingerprint" return wrapper return _fingerprint
Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint`` Args: inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace. Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of setting the fingerprint of the returned Dataset. use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account to update the fingerprint to the wrapped method that should take care of setting the fingerprint of the returned Dataset. By default all the arguments are used. ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs. fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]): If the dataset transforms is not inplace and returns a DatasetDict, then it can require several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names, one fingerprint named after each element of fingerprint_names is going to be passed. randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has optional parameters "seed" and "generator", then you can set randomized_function to True. This way, even if users set "seed" and "generator" to None, then the fingerprint is going to be randomly generated depending on numpy's current state. In this case, the generator is set to np.random.default_rng(np.random.get_state()[1][0]). version (:obj:`str`, optional): version of the transform. The version is taken into account when computing the fingerprint. If a datase transform changes (or at least if the output data that are cached changes), then one should increase the version. If the version stays the same, then old cached data could be reused that are not compatible with the new transform. It should be in the format "MAJOR.MINOR.PATCH".
17,976
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger def inject_arrow_table_documentation(arrow_table_method): def wrapper(fn): fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "") fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table") if hasattr(arrow_table_method, "__annotations__"): fn.__annotations__ = arrow_table_method.__annotations__ return fn return wrapper
null
17,977
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() def _in_memory_arrow_table_from_file(filename: str) -> pa.Table: in_memory_stream = pa.input_stream(filename) opened_stream = pa.ipc.open_stream(in_memory_stream) pa_table = opened_stream.read_all() return pa_table
null
17,978
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger class Table(IndexedTableMixin): def __init__(self, table: pa.Table): def __deepcopy__(self, memo: dict): def validate(self, *args, **kwargs): def equals(self, *args, **kwargs): def to_batches(self, *args, **kwargs): def to_pydict(self, *args, **kwargs): def to_pylist(self, *args, **kwargs): def to_pandas(self, *args, **kwargs): def to_string(self, *args, **kwargs): def to_reader(self, max_chunksize: Optional[int] = None): def field(self, *args, **kwargs): def column(self, *args, **kwargs): def itercolumns(self, *args, **kwargs): def schema(self): def columns(self): def num_columns(self): def num_rows(self): def shape(self): def nbytes(self): def column_names(self): def __eq__(self, other): def __getitem__(self, i): def __len__(self): def __repr__(self): def __str__(self): def slice(self, *args, **kwargs): def filter(self, *args, **kwargs): def flatten(self, *args, **kwargs): def combine_chunks(self, *args, **kwargs): def cast(self, *args, **kwargs): def replace_schema_metadata(self, *args, **kwargs): def add_column(self, *args, **kwargs): def append_column(self, *args, **kwargs): def remove_column(self, *args, **kwargs): def set_column(self, *args, **kwargs): def rename_columns(self, *args, **kwargs): def drop(self, *args, **kwargs): def select(self, *args, **kwargs): def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table: stream = pa.BufferReader(buffer) opened_stream = pa.ipc.open_stream(stream) table = opened_stream.read_all() return table
null
17,979
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger The provided code snippet includes necessary dependencies for implementing the `read_schema_from_file` function. Write a Python function `def read_schema_from_file(filename: str) -> pa.Schema` to solve the following problem: Infer arrow table schema from file without loading whole file into memory. Usefull especially while having very big files. Here is the function: def read_schema_from_file(filename: str) -> pa.Schema: """ Infer arrow table schema from file without loading whole file into memory. Usefull especially while having very big files. """ with pa.memory_map(filename) as memory_mapped_stream: schema = pa.ipc.open_stream(memory_mapped_stream).schema return schema
Infer arrow table schema from file without loading whole file into memory. Usefull especially while having very big files.
17,980
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader: memory_mapped_stream = pa.memory_map(filename) return pa.ipc.open_stream(memory_mapped_stream) class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table: opened_stream = _memory_mapped_record_batch_reader_from_file(filename) pa_table = opened_stream.read_all() return pa_table
null
17,981
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger The provided code snippet includes necessary dependencies for implementing the `_deepcopy` function. Write a Python function `def _deepcopy(x, memo: dict)` to solve the following problem: deepcopy a regular class instance Here is the function: def _deepcopy(x, memo: dict): """deepcopy a regular class instance""" cls = x.__class__ result = cls.__new__(cls) memo[id(x)] = result for k, v in x.__dict__.items(): setattr(result, k, copy.deepcopy(v, memo)) return result
deepcopy a regular class instance
17,982
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger The provided code snippet includes necessary dependencies for implementing the `_interpolation_search` function. Write a Python function `def _interpolation_search(arr: List[int], x: int) -> int` to solve the following problem: Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (`List[int]`): non-empty sorted list of integers x (`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values Here is the function: def _interpolation_search(arr: List[int], x: int) -> int: """ Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (`List[int]`): non-empty sorted list of integers x (`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values """ i, j = 0, len(arr) - 1 while i < j and arr[i] <= x < arr[j]: k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) if arr[k] <= x < arr[k + 1]: return k elif arr[k] < x: i, j = k + 1, j else: i, j = i, k raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.")
Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (`List[int]`): non-empty sorted list of integers x (`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values
17,983
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() class MemoryMappedTable(TableBlock): """ The table is said memory mapped when it doesn't use the user's RAM but loads the data from the disk instead. Pickling it doesn't copy the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replay" when reloading the table from the disk. Its implementation requires to store an history of all the transforms that were applied to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table from the disk. This is different from the `InMemoryTable` table, for which pickling does copy all the data in memory. `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None): super().__init__(table) self.path = os.path.abspath(path) self.replays: List[Replay] = replays if replays is not None else [] def from_file(cls, filename: str, replays=None): table = _memory_mapped_arrow_table_from_file(filename) table = cls._apply_replays(table, replays) return cls(table, filename, replays) def __getstate__(self): return {"path": self.path, "replays": self.replays} def __setstate__(self, state): path = state["path"] replays = state["replays"] table = _memory_mapped_arrow_table_from_file(path) table = self._apply_replays(table, replays) MemoryMappedTable.__init__(self, table, path=path, replays=replays) def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table: if replays is not None: for name, args, kwargs in replays: if name == "cast": table = table_cast(table, *args, **kwargs) elif name == "flatten": table = table_flatten(table, *args, **kwargs) else: table = getattr(table, name)(*args, **kwargs) return table def _append_replay(self, replay: Replay) -> List[Replay]: replays = copy.deepcopy(self.replays) replays.append(replay) return replays def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ replay = ("slice", (offset, length), {}) replays = self._append_replay(replay) # Use fast slicing here return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays) def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the ChunkedArray of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays) def cast(self, *args, **kwargs): """ Cast table values to another schema Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata. Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays) def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays) def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays) def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays) def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays) def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays) class ConcatenationTable(Table): """ The table comes from the concatenation of several tables called blocks. It enables concatenation on both axis 0 (append rows) and axis 1 (append columns). The underlying tables are called "blocks" and can be either `InMemoryTable` or `MemoryMappedTable` objects. This allows to combine tables that come from memory or that are memory mapped. When a `ConcatenationTable` is pickled, then each block is pickled: - the `InMemoryTable` objects are pickled by copying all the data in memory. - the MemoryMappedTable objects are pickled without copying the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replays" when reloading the table from the disk. Its implementation requires to store each block separately. The `blocks` attributes stores a list of list of blocks. The first axis concatenates the tables along the axis 0 (it appends rows), while the second axis concatenates tables along the axis 1 (it appends columns). If some columns are missing when concatenating on axis 0, they are filled with null values. This is done using `pyarrow.concat_tables(tables, promote=True)`. You can access the fully combined table by accessing the `ConcatenationTable.table` attribute, and the blocks by accessing the `ConcatenationTable.blocks` attribute. """ def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]): super().__init__(table) self.blocks = blocks # Check that all the blocks have the right type. # Only InMemoryTable and MemoryMappedTable are allowed. for subtables in blocks: for subtable in subtables: if not isinstance(subtable, TableBlock): raise TypeError( "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects" f", but got {subtable}." ) def __getstate__(self): return {"blocks": self.blocks, "schema": self.table.schema} def __setstate__(self, state): blocks = state["blocks"] schema = state["schema"] table = self._concat_blocks_horizontally_and_vertically(blocks) if schema is not None and table.schema != schema: # We fix the columns by concatenating with an empty table with the right columns empty_table = pa.Table.from_batches([], schema=schema) # we set promote=True to fill missing columns with null values if config.PYARROW_VERSION.major < 14: table = pa.concat_tables([table, empty_table], promote=True) else: table = pa.concat_tables([table, empty_table], promote_options="default") ConcatenationTable.__init__(self, table, blocks=blocks) def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table: pa_tables = [table.table if hasattr(table, "table") else table for table in blocks] if axis == 0: # we set promote=True to fill missing columns with null values if config.PYARROW_VERSION.major < 14: return pa.concat_tables(pa_tables, promote=True) else: return pa.concat_tables(pa_tables, promote_options="default") elif axis == 1: for i, table in enumerate(pa_tables): if i == 0: pa_table = table else: for name, col in zip(table.column_names, table.columns): pa_table = pa_table.append_column(name, col) return pa_table else: raise ValueError("'axis' must be either 0 or 1") def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table: pa_tables_to_concat_vertically = [] for i, tables in enumerate(blocks): if not tables: continue pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1) pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated) return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0) def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer: if axis is not None: merged_blocks = [] for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)): if is_in_memory: block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))] merged_blocks += list(block_group) else: # both merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks] if all(len(row_block) == 1 for row_block in merged_blocks): merged_blocks = cls._merge_blocks( [block for row_block in merged_blocks for block in row_block], axis=0 ) return merged_blocks def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer: if isinstance(blocks, TableBlock): return blocks elif isinstance(blocks[0], TableBlock): return cls._merge_blocks(blocks, axis=0) else: return cls._merge_blocks(blocks) def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable": blocks = cls._consolidate_blocks(blocks) if isinstance(blocks, TableBlock): table = blocks return cls(table.table, [[table]]) elif isinstance(blocks[0], TableBlock): table = cls._concat_blocks(blocks, axis=0) blocks = [[t] for t in blocks] return cls(table, blocks) else: table = cls._concat_blocks_horizontally_and_vertically(blocks) return cls(table, blocks) def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable": """Create `ConcatenationTable` from list of tables. Args: tables (list of `Table` or list of `pyarrow.Table`): List of tables. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> """ def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]: if isinstance(table, pa.Table): return [[InMemoryTable(table)]] elif isinstance(table, ConcatenationTable): return copy.deepcopy(table.blocks) else: return [[table]] def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]: sliced = [table.slice(0, length) for table in row_block] remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block] return sliced, remainder def _split_both_like( result: List[List[TableBlock]], blocks: List[List[TableBlock]] ) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]: """ Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1. To do so, we modify both blocks sets to have the same row_blocks boundaries. For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows, we modify both to have 4 row_blocks of size 2, 1, 1 and 2: [ x x x | x x x ] + [ y y | y y | y y ] ----------------------------- = [ x x | x | x | x x ] [ y y | y | y | y y ] """ result, blocks = list(result), list(blocks) new_result, new_blocks = [], [] while result and blocks: # we slice the longest row block to save two row blocks of same length # and we replace the long row block by its remainder if necessary if len(result[0][0]) > len(blocks[0][0]): new_blocks.append(blocks[0]) sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0])) new_result.append(sliced) elif len(result[0][0]) < len(blocks[0][0]): new_result.append(result[0]) sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0])) new_blocks.append(sliced) else: new_result.append(result.pop(0)) new_blocks.append(blocks.pop(0)) if result or blocks: raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows") return new_result, new_blocks def _extend_blocks( result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0 ) -> List[List[TableBlock]]: if axis == 0: result.extend(blocks) elif axis == 1: # We make sure each row_block have the same num_rows result, blocks = _split_both_like(result, blocks) for i, row_block in enumerate(blocks): result[i].extend(row_block) return result blocks = to_blocks(tables[0]) for table in tables[1:]: table_blocks = to_blocks(table) blocks = _extend_blocks(blocks, table_blocks, axis=axis) return cls.from_blocks(blocks) def _slices(self): offset = 0 for tables in self.blocks: length = len(tables[0]) yield (offset, length) offset += length def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ table = self.table.slice(offset, length=length) length = length if length is not None else self.num_rows - offset blocks = [] for tables in self.blocks: n_rows = len(tables[0]) if length == 0: break elif n_rows <= offset: offset = offset - n_rows elif n_rows <= offset + length: blocks.append([t.slice(offset) for t in tables]) length, offset = length + offset - n_rows, 0 else: blocks.append([t.slice(offset, length) for t in tables]) length, offset = 0, 0 return ConcatenationTable(table, blocks) def filter(self, mask, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ table = self.table.filter(mask, *args, **kwargs) blocks = [] for (offset, length), tables in zip(self._slices, self.blocks): submask = mask.slice(offset, length) blocks.append([t.filter(submask, *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ table = table_flatten(self.table, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.flatten(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ table = self.table.combine_chunks(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.combine_chunks(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def cast(self, target_schema, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ from .features import Features table = table_cast(self.table, target_schema, *args, **kwargs) target_features = Features.from_arrow_schema(target_schema) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields}) subschema = subfeatures.arrow_schema new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be `None`, which deletes any existing metadata). Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ table = self.table.replace_schema_metadata(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables]) return ConcatenationTable(table, self.blocks) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, i, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, names, *args, **kwargs): """ Create new table with columns renamed to provided names. """ table = self.table.rename_columns(names, *args, **kwargs) names = dict(zip(self.table.column_names, names)) blocks = [] for tables in self.blocks: blocks.append( [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables] ) return ConcatenationTable(table, blocks) def drop(self, columns, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ table = self.table.drop(columns, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def select(self, columns, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ table = self.table.select(columns, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) The provided code snippet includes necessary dependencies for implementing the `list_table_cache_files` function. Write a Python function `def list_table_cache_files(table: Table) -> List[str]` to solve the following problem: Get the cache files that are loaded by the table. Cache file are used when parts of the table come from the disk via memory mapping. Returns: `List[str]`: A list of paths to the cache files loaded by the table. Here is the function: def list_table_cache_files(table: Table) -> List[str]: """ Get the cache files that are loaded by the table. Cache file are used when parts of the table come from the disk via memory mapping. Returns: `List[str]`: A list of paths to the cache files loaded by the table. """ if isinstance(table, ConcatenationTable): cache_files = [] for subtables in table.blocks: for subtable in subtables: cache_files += list_table_cache_files(subtable) return cache_files elif isinstance(table, MemoryMappedTable): return [table.path] else: return []
Get the cache files that are loaded by the table. Cache file are used when parts of the table come from the disk via memory mapping. Returns: `List[str]`: A list of paths to the cache files loaded by the table.
17,984
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger The provided code snippet includes necessary dependencies for implementing the `_wrap_for_chunked_arrays` function. Write a Python function `def _wrap_for_chunked_arrays(func)` to solve the following problem: Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly Here is the function: def _wrap_for_chunked_arrays(func): """Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly""" def wrapper(array, *args, **kwargs): if isinstance(array, pa.ChunkedArray): return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) else: return func(array, *args, **kwargs) return wrapper
Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly
17,985
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True): """Cast an array to the arrow type that corresponds to the requested feature type. For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods they defined to enable casting from other arrow types. Args: array (`pa.Array`): The PyArrow array to cast. feature (`datasets.features.FeatureType`): The target feature type. allow_number_to_str (`bool`, defaults to `True`): Whether to allow casting numbers to strings. Defaults to `True`. Raises: `pa.ArrowInvalidError`: if the arrow data casting fails `TypeError`: if the target type is not supported according, e.g. - if a field is missing - if casting from numbers to strings and `allow_number_to_str` is `False` Returns: array (`pyarrow.Array`): the casted array """ from .features.features import Sequence, get_nested_type _c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str) if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "cast_storage"): return feature.cast_storage(array) elif pa.types.is_struct(array.type): # feature must be a dict or Sequence(subfeatures_dict) if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } if isinstance(feature, dict) and {field.name for field in array.type} == set(feature): if array.type.num_fields == 0: return array arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): casted_array_values = _c(array.values, feature[0]) if casted_array_values.type == array.values.type: return array else: # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.ListArray.from_arrays(array_offsets, casted_array_values) elif isinstance(feature, Sequence): if feature.length > -1: if _are_list_values_of_length(array, feature.length): if array.null_count > 0: # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array array_type = array.type storage_type = _storage_type(array_type) if array_type != storage_type: # Temporarily convert to the storage type to support extension types in the slice operation array = array_cast(array, storage_type, allow_number_to_str=allow_number_to_str) array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True) array = array_cast(array, array_type, allow_number_to_str=allow_number_to_str) else: array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True) array_values = array.values casted_array_values = _c(array_values, feature.feature) if config.PYARROW_VERSION.major < 15: return pa.Array.from_buffers( pa.list_(casted_array_values.type, feature.length), len(array), [array.is_valid().buffers()[1]], children=[casted_array_values], ) else: return pa.FixedSizeListArray.from_arrays( casted_array_values, feature.length, mask=array.is_null() ) else: array_values = array.values[ array.offset * feature.length : (array.offset + len(array)) * feature.length ] return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length) else: casted_array_values = _c(array.values, feature.feature) if casted_array_values.type == array.values.type: return array else: # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.ListArray.from_arrays(array_offsets, casted_array_values) elif pa.types.is_fixed_size_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature[0]), mask=array.is_null()) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length == array.type.list_size: array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] casted_array_values = _c(array_values, feature.feature) if config.PYARROW_VERSION.major < 15: return pa.Array.from_buffers( pa.list_(casted_array_values.type, feature.length), len(array), [array.is_valid().buffers()[1]], children=[casted_array_values], ) else: return pa.FixedSizeListArray.from_arrays( casted_array_values, feature.length, mask=array.is_null() ) else: array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null()) if pa.types.is_null(array.type): return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str) elif not isinstance(feature, (Sequence, dict, list, tuple)): return array_cast(array, feature(), allow_number_to_str=allow_number_to_str) raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}") class CastError(ValueError): """When it's not possible to cast an Arrow table to a specific schema or set of features""" def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None: super().__init__(*args) self.table_column_names = table_column_names self.requested_column_names = requested_column_names def __reduce__(self): # Fix unpickling: TypeError: __init__() missing 2 required keyword-only arguments: 'table_column_names' and 'requested_column_names' return partial( CastError, table_column_names=self.table_column_names, requested_column_names=self.requested_column_names ), () def details(self): new_columns = set(self.table_column_names) - set(self.requested_column_names) missing_columns = set(self.requested_column_names) - set(self.table_column_names) if new_columns and missing_columns: return f"there are {len(new_columns)} new columns ({', '.join(new_columns)}) and {len(missing_columns)} missing columns ({', '.join(missing_columns)})." elif new_columns: return f"there are {len(new_columns)} new columns ({new_columns})" else: return f"there are {len(missing_columns)} missing columns ({missing_columns})" The provided code snippet includes necessary dependencies for implementing the `cast_table_to_features` function. Write a Python function `def cast_table_to_features(table: pa.Table, features: "Features")` to solve the following problem: Cast a table to the arrow schema that corresponds to the requested features. Args: table (`pyarrow.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: table (`pyarrow.Table`): the casted table Here is the function: def cast_table_to_features(table: pa.Table, features: "Features"): """Cast a table to the arrow schema that corresponds to the requested features. Args: table (`pyarrow.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: table (`pyarrow.Table`): the casted table """ if sorted(table.column_names) != sorted(features): raise CastError( f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features), ) arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
Cast a table to the arrow schema that corresponds to the requested features. Args: table (`pyarrow.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: table (`pyarrow.Table`): the casted table
17,986
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() def embed_array_storage(array: pa.Array, feature: "FeatureType"): """Embed data into an arrays's storage. For custom features like Audio or Image, it takes into account the "embed_storage" methods they define to embed external data (e.g. an image file) into an array. <Added version="2.4.0"/> Args: array (`pa.Array`): The PyArrow array in which to embed data. feature (`datasets.features.FeatureType`): Array features. Raises: `TypeError`: if the target type is not supported according, e.g. - if a field is missing Returns: array (`pyarrow.Array`): the casted array """ from .features import Sequence _e = embed_array_storage if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "embed_storage"): return feature.embed_storage(array) elif pa.types.is_struct(array.type): # feature must be a dict or Sequence(subfeatures_dict) if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } if isinstance(feature, dict): arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) if isinstance(feature, list): return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature[0])) if isinstance(feature, Sequence) and feature.length == -1: return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature)) elif pa.types.is_fixed_size_list(array.type): # feature must be Sequence(subfeature) if isinstance(feature, Sequence) and feature.length > -1: array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] embedded_array_values = _e(array_values, feature.feature) if config.PYARROW_VERSION.major < 15: return pa.Array.from_buffers( pa.list_(array_values.type, feature.length), len(array), [array.is_valid().buffers()[1]], children=[embedded_array_values], ) else: return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null()) if not isinstance(feature, (Sequence, dict, list, tuple)): return array raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}") def require_storage_embed(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires embedding data into storage. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "embed_storage") class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`. - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields features. It's possible to have nested fields of nested fields in an arbitrary manner. - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature type hosted in this list. <Tip> A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the [`~datasets.Sequence`]. </Tip> - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data. - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation. """ def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) self._column_requires_decoding: Dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) def type(self): """ Features field types. Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Args: pa_schema (`pyarrow.Schema`): Arrow Schema. Returns: [`Features`] """ # try to load features from the arrow schema metadata metadata_features = Features() if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata: metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: metadata_features = Features.from_dict(metadata["info"]["features"]) metadata_features_schema = metadata_features.arrow_schema obj = { field.name: ( metadata_features[field.name] if field.name in metadata_features and metadata_features_schema.field(field.name) == field else generate_from_arrow_type(field.type) ) for field in pa_schema } return cls(**obj) def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features` Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value(dtype='string', id=None)} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: -> sequence: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]: feature["sequence"] = feature["sequence"]["dtype"] # # sequence: -> sequence: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]: feature["sequence"] = feature["sequence"]["struct"] # # list: -> list: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]: feature["list"] = feature["list"]["dtype"] # # list: -> list: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]: feature["list"] = feature["list"]["struct"] # # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "Sequence": _feature = obj.pop("feature") return simplify({"sequence": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list): return simplify({"list": simplify(to_yaml_inner(obj[0]))}) elif isinstance(obj, tuple): return to_yaml_inner(list(obj)) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") def to_yaml_types(obj: dict) -> dict: if isinstance(obj, dict): return {k: to_yaml_types(v) for k, v in obj.items()} elif isinstance(obj, list): return [to_yaml_types(v) for v in obj] elif isinstance(obj, tuple): return to_yaml_types(list(obj)) else: return obj return to_yaml_types(to_yaml_inner(yaml_data)["struct"]) def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: int32 -> sequence: # -> dtype: int32 # if isinstance(feature.get("sequence"), str): feature["sequence"] = {"dtype": feature["sequence"]} # # list: int32 -> list: # -> dtype: int32 # if isinstance(feature.get("list"), str): feature["list"] = {"dtype": feature["list"]} # # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "sequence": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"} if _type == "list": return [from_yaml_inner(unsimplify(obj)[_type])] if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try: Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # e.g. Audio, Image, ArrayXD return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow. Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding. Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column(self, column: list, column_name: str): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name. Returns: `list[Any]` """ return ( [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str) Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have to features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): source, id_, length = source.feature, source.id, source.length if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], id=id_, length=length) elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys()-target.keys()} are missing from target " f"and {target.keys()-source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target): raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] else: return source return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self The provided code snippet includes necessary dependencies for implementing the `embed_table_storage` function. Write a Python function `def embed_table_storage(table: pa.Table)` to solve the following problem: Embed external data into a table's storage. <Added version="2.4.0"/> Args: table (`pyarrow.Table`): PyArrow table in which to embed data. Returns: table (`pyarrow.Table`): the table with embedded data Here is the function: def embed_table_storage(table: pa.Table): """Embed external data into a table's storage. <Added version="2.4.0"/> Args: table (`pyarrow.Table`): PyArrow table in which to embed data. Returns: table (`pyarrow.Table`): the table with embedded data """ from .features.features import Features, require_storage_embed features = Features.from_arrow_schema(table.schema) arrays = [ embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name] for name, feature in features.items() ] return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
Embed external data into a table's storage. <Added version="2.4.0"/> Args: table (`pyarrow.Table`): PyArrow table in which to embed data. Returns: table (`pyarrow.Table`): the table with embedded data
17,987
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`. - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields features. It's possible to have nested fields of nested fields in an arbitrary manner. - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature type hosted in this list. <Tip> A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the [`~datasets.Sequence`]. </Tip> - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data. - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation. """ def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) self._column_requires_decoding: Dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) def type(self): """ Features field types. Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Args: pa_schema (`pyarrow.Schema`): Arrow Schema. Returns: [`Features`] """ # try to load features from the arrow schema metadata metadata_features = Features() if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata: metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: metadata_features = Features.from_dict(metadata["info"]["features"]) metadata_features_schema = metadata_features.arrow_schema obj = { field.name: ( metadata_features[field.name] if field.name in metadata_features and metadata_features_schema.field(field.name) == field else generate_from_arrow_type(field.type) ) for field in pa_schema } return cls(**obj) def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features` Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value(dtype='string', id=None)} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: -> sequence: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]: feature["sequence"] = feature["sequence"]["dtype"] # # sequence: -> sequence: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]: feature["sequence"] = feature["sequence"]["struct"] # # list: -> list: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]: feature["list"] = feature["list"]["dtype"] # # list: -> list: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]: feature["list"] = feature["list"]["struct"] # # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "Sequence": _feature = obj.pop("feature") return simplify({"sequence": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list): return simplify({"list": simplify(to_yaml_inner(obj[0]))}) elif isinstance(obj, tuple): return to_yaml_inner(list(obj)) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") def to_yaml_types(obj: dict) -> dict: if isinstance(obj, dict): return {k: to_yaml_types(v) for k, v in obj.items()} elif isinstance(obj, list): return [to_yaml_types(v) for v in obj] elif isinstance(obj, tuple): return to_yaml_types(list(obj)) else: return obj return to_yaml_types(to_yaml_inner(yaml_data)["struct"]) def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: int32 -> sequence: # -> dtype: int32 # if isinstance(feature.get("sequence"), str): feature["sequence"] = {"dtype": feature["sequence"]} # # list: int32 -> list: # -> dtype: int32 # if isinstance(feature.get("list"), str): feature["list"] = {"dtype": feature["list"]} # # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "sequence": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"} if _type == "list": return [from_yaml_inner(unsimplify(obj)[_type])] if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try: Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # e.g. Audio, Image, ArrayXD return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow. Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding. Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column(self, column: list, column_name: str): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name. Returns: `list[Any]` """ return ( [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str) Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have to features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): source, id_, length = source.feature, source.id, source.length if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], id=id_, length=length) elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys()-target.keys()} are missing from target " f"and {target.keys()-source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target): raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] else: return source return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self The provided code snippet includes necessary dependencies for implementing the `table_flatten` function. Write a Python function `def table_flatten(table: pa.Table)` to solve the following problem: Improved version of `pa.Table.flatten`. It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field, but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False. Args: table (`pa.Table`): PyArrow table to flatten. Returns: `Table`: the flattened table Here is the function: def table_flatten(table: pa.Table): """Improved version of `pa.Table.flatten`. It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field, but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False. Args: table (`pa.Table`): PyArrow table to flatten. Returns: `Table`: the flattened table """ from .features import Features features = Features.from_arrow_schema(table.schema) if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()): flat_arrays = [] flat_column_names = [] for field in table.schema: array = table.column(field.name) subfeature = features[field.name] if pa.types.is_struct(field.type) and ( not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature ): flat_arrays.extend(array.flatten()) flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type]) else: flat_arrays.append(array) flat_column_names.append(field.name) flat_table = pa.Table.from_arrays( flat_arrays, names=flat_column_names, ) else: flat_table = table.flatten() # Preserve complex types in the metadata flat_features = features.flatten(max_depth=2) flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names}) return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata)
Improved version of `pa.Table.flatten`. It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field, but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False. Args: table (`pa.Table`): PyArrow table to flatten. Returns: `Table`: the flattened table
17,988
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature: A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`. - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields features. It's possible to have nested fields of nested fields in an arbitrary manner. - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature type hosted in this list. <Tip> A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the [`~datasets.Sequence`]. </Tip> - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data. - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation. """ def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) self._column_requires_decoding: Dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) def type(self): """ Features field types. Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Args: pa_schema (`pyarrow.Schema`): Arrow Schema. Returns: [`Features`] """ # try to load features from the arrow schema metadata metadata_features = Features() if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata: metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: metadata_features = Features.from_dict(metadata["info"]["features"]) metadata_features_schema = metadata_features.arrow_schema obj = { field.name: ( metadata_features[field.name] if field.name in metadata_features and metadata_features_schema.field(field.name) == field else generate_from_arrow_type(field.type) ) for field in pa_schema } return cls(**obj) def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features` Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value(dtype='string', id=None)} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: -> sequence: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]: feature["sequence"] = feature["sequence"]["dtype"] # # sequence: -> sequence: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]: feature["sequence"] = feature["sequence"]["struct"] # # list: -> list: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]: feature["list"] = feature["list"]["dtype"] # # list: -> list: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]: feature["list"] = feature["list"]["struct"] # # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "Sequence": _feature = obj.pop("feature") return simplify({"sequence": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list): return simplify({"list": simplify(to_yaml_inner(obj[0]))}) elif isinstance(obj, tuple): return to_yaml_inner(list(obj)) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") def to_yaml_types(obj: dict) -> dict: if isinstance(obj, dict): return {k: to_yaml_types(v) for k, v in obj.items()} elif isinstance(obj, list): return [to_yaml_types(v) for v in obj] elif isinstance(obj, tuple): return to_yaml_types(list(obj)) else: return obj return to_yaml_types(to_yaml_inner(yaml_data)["struct"]) def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: int32 -> sequence: # -> dtype: int32 # if isinstance(feature.get("sequence"), str): feature["sequence"] = {"dtype": feature["sequence"]} # # list: int32 -> list: # -> dtype: int32 # if isinstance(feature.get("list"), str): feature["list"] = {"dtype": feature["list"]} # # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "sequence": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"} if _type == "list": return [from_yaml_inner(unsimplify(obj)[_type])] if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try: Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # e.g. Audio, Image, ArrayXD return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow. Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding. Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column(self, column: list, column_name: str): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name. Returns: `list[Any]` """ return ( [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str) Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have to features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): source, id_, length = source.feature, source.id, source.length if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], id=id_, length=length) elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys()-target.keys()} are missing from target " f"and {target.keys()-source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target): raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] else: return source return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self The provided code snippet includes necessary dependencies for implementing the `table_visitor` function. Write a Python function `def table_visitor(table: pa.Table, function: Callable[[pa.Array], None])` to solve the following problem: Visit all arrays in a table and apply a function to them. Args: table (`pyarrow.Table`): PyArrow table to visit. function (`Callable[[pa.Array], None]`): Function to apply to each array. Here is the function: def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]): """Visit all arrays in a table and apply a function to them. Args: table (`pyarrow.Table`): PyArrow table to visit. function (`Callable[[pa.Array], None]`): Function to apply to each array. """ from .features import Features, Sequence features = Features.from_arrow_schema(table.schema) def _visit(array, feature): if isinstance(array, pa.ChunkedArray): for chunk in array.chunks: _visit(chunk, feature) else: if isinstance(array, pa.ExtensionArray): array = array.storage function(array, feature) if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"): if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } for name, subfeature in feature.items(): _visit(array.field(name), subfeature) elif pa.types.is_list(array.type): if isinstance(feature, list): _visit(array.values, feature[0]) elif isinstance(feature, Sequence): _visit(array.values, feature.feature) for name, feature in features.items(): _visit(table[name], feature)
Visit all arrays in a table and apply a function to them. Args: table (`pyarrow.Table`): PyArrow table to visit. function (`Callable[[pa.Array], None]`): Function to apply to each array.
17,989
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from . import config from .utils.logging import get_logger class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() The provided code snippet includes necessary dependencies for implementing the `table_iter` function. Write a Python function `def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]` to solve the following problem: Iterate over sub-tables of size `batch_size`. Args: table (`pyarrow.Table`): PyArrow table to iterate over. batch_size (`int`): Size of each sub-table to yield. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. Here is the function: def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]: """Iterate over sub-tables of size `batch_size`. Args: table (`pyarrow.Table`): PyArrow table to iterate over. batch_size (`int`): Size of each sub-table to yield. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. """ chunks_buffer = [] chunks_buffer_size = 0 for chunk in table.to_reader(max_chunksize=batch_size): if len(chunk) == 0: continue elif chunks_buffer_size + len(chunk) < batch_size: chunks_buffer.append(chunk) chunks_buffer_size += len(chunk) continue elif chunks_buffer_size + len(chunk) == batch_size: chunks_buffer.append(chunk) yield pa.Table.from_batches(chunks_buffer) chunks_buffer = [] chunks_buffer_size = 0 else: cropped_chunk_length = batch_size - chunks_buffer_size chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) yield pa.Table.from_batches(chunks_buffer) chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] chunks_buffer_size = len(chunk) - cropped_chunk_length if not drop_last_batch and chunks_buffer: yield pa.Table.from_batches(chunks_buffer)
Iterate over sub-tables of size `batch_size`. Args: table (`pyarrow.Table`): PyArrow table to iterate over. batch_size (`int`): Size of each sub-table to yield. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`.
17,990
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin): """A Dataset backed by an Arrow table.""" def __init__( self, arrow_table: Table, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_table: Optional[Table] = None, fingerprint: Optional[str] = None, ): info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) IndexableMixin.__init__(self) self._data: Table = _check_table(arrow_table) self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None maybe_register_dataset_for_temp_dir_deletion(self) self._format_type: Optional[str] = None self._format_kwargs: dict = {} self._format_columns: Optional[list] = None self._output_all_columns: bool = False self._fingerprint: str = fingerprint # Read metadata if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata: metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode()) if ( "fingerprint" in metadata and self._fingerprint is None ): # try to load fingerprint from the arrow file metadata self._fingerprint = metadata["fingerprint"] # Infer features if None inferred_features = Features.from_arrow_schema(arrow_table.schema) if self.info.features is None: self.info.features = inferred_features else: # make sure the nested columns are in the right order try: self.info.features = self.info.features.reorder_fields_as(inferred_features) except ValueError as e: raise ValueError( f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file." ) # Infer fingerprint if None if self._fingerprint is None: self._fingerprint = generate_fingerprint(self) # Sanity checks if self._info.features is None: raise ValueError("Features can't be None in a Dataset object") if self._fingerprint is None: raise ValueError("Fingerprint can't be None in a Dataset object") if self.info.features.type != inferred_features.type: raise ValueError( f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}" ) if self._indices is not None: if not pa.types.is_unsigned_integer(self._indices.column(0).type): raise ValueError( f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}" ) _check_column_names(self._data.column_names) self._data = update_metadata_with_features(self._data, self._info.features) def features(self) -> Features: features = super().features if features is None: # this is already checked in __init__ raise ValueError("Features can't be None in a Dataset object") return features def from_file( cls, filename: str, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_filename: Optional[str] = None, in_memory: bool = False, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow table at filename. Args: filename (`str`): File name of the dataset. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. indices_filename (`str`, *optional*): File names of the indices. in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. Returns: [`Dataset`] """ table = ArrowReader.read_table(filename, in_memory=in_memory) if indices_filename is not None: indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory) else: indices_pa_table = None return cls( arrow_table=table, info=info, split=split, indices_table=indices_pa_table, ) def from_buffer( cls, buffer: pa.Buffer, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_buffer: Optional[pa.Buffer] = None, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow buffer. Args: buffer (`pyarrow.Buffer`): Arrow buffer. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. indices_buffer (`pyarrow.Buffer`, *optional*): Indices Arrow buffer. Returns: [`Dataset`] """ table = InMemoryTable.from_buffer(buffer) if indices_buffer is not None: indices_table = InMemoryTable.from_buffer(buffer) else: indices_table = None return cls(table, info=info, split=split, indices_table=indices_table) def from_pandas( cls, df: pd.DataFrame, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, preserve_index: Optional[bool] = None, ) -> "Dataset": """ Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`]. The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit features and passing it to this function. Args: df (`pandas.DataFrame`): Dataframe that contains the dataset. features ([`Features`], *optional*): Dataset features. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. preserve_index (`bool`, *optional*): Whether to store the index as an additional column in the resulting Dataset. The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only. Use `preserve_index=True` to force it to be stored as a column. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_pandas(df) ``` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable.from_pandas( df=df, preserve_index=preserve_index, ) if features is not None: # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split) def from_polars( cls, df: "pl.DataFrame", features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Collect the underlying arrow arrays in an Arrow Table. This operation is mostly zero copy. Data types that do copy: * CategoricalType Args: df (`polars.DataFrame`): DataFrame to convert to Arrow Table features (`Features`, optional): Dataset features. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split. Examples: ```py >>> ds = Dataset.from_polars(df) ``` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable(df.to_arrow()) if features is not None: # more expensive cast than InMemoryTable.from_polars(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split) def from_dict( cls, mapping: dict, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert `dict` to a `pyarrow.Table` to create a [`Dataset`]. Args: mapping (`Mapping`): Mapping of strings to Arrays or Python lists. features ([`Features`], *optional*): Dataset features. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. Returns: [`Dataset`] """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None arrow_typed_mapping = {} for col, data in mapping.items(): if isinstance(data, (pa.Array, pa.ChunkedArray)): data = cast_array_to_feature(data, features[col]) if features is not None else data else: data = OptimizedTypedSequence( features.encode_column(data, col) if features is not None else data, type=features[col] if features is not None else None, col=col, ) arrow_typed_mapping[col] = data mapping = arrow_typed_mapping pa_table = InMemoryTable.from_pydict(mapping=mapping) if info is None: info = DatasetInfo() info.features = features if info.features is None: info.features = Features( { col: generate_from_arrow_type(data.type) if isinstance(data, (pa.Array, pa.ChunkedArray)) else data.get_inferred_type() for col, data in mapping.items() } ) return cls(pa_table, info=info, split=split) def from_list( cls, mapping: List[dict], features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`. Note that the keys of the first entry will be used to determine the dataset columns, regardless of what is passed to features. Args: mapping (`List[dict]`): A list of mappings of strings to row values. features (`Features`, optional): Dataset features. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split. Returns: [`Dataset`] """ # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {} return cls.from_dict(mapping, features, info, split) def from_csv( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from CSV file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the CSV file(s). split ([`NamedSplit`], *optional*): Split name to be assigned to the dataset. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`pandas.read_csv`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_csv('path/to/dataset.csv') ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetReader return CsvDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs, ).read() def from_generator( generator: Callable, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, gen_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, **kwargs, ): """Create a Dataset from a generator. Args: generator (:`Callable`): A generator function that `yields` examples. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. gen_kwargs(`dict`, *optional*): Keyword arguments to be passed to the `generator` callable. You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`. <Added version="2.7.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to :[`GeneratorConfig`]. Returns: [`Dataset`] Example: ```py >>> def gen(): ... yield {"text": "Good", "label": 0} ... yield {"text": "Bad", "label": 1} ... >>> ds = Dataset.from_generator(gen) ``` ```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards}) ``` """ from .io.generator import GeneratorDatasetInputStream return GeneratorDatasetInputStream( generator=generator, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, gen_kwargs=gen_kwargs, num_proc=num_proc, **kwargs, ).read() def from_json( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, field: Optional[str] = None, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from JSON or JSON Lines file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the JSON or JSON Lines file(s). split ([`NamedSplit`], *optional*): Split name to be assigned to the dataset. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. field (`str`, *optional*): Field name of the JSON file where the dataset is contained in. num_proc (`int`, *optional* defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`JsonConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_json('path/to/dataset.json') ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetReader return JsonDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, field=field, num_proc=num_proc, **kwargs, ).read() def from_parquet( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, columns: Optional[List[str]] = None, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from Parquet file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the Parquet file(s). split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. columns (`List[str]`, *optional*): If not `None`, only these columns will be read from the file. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e'. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`ParquetConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_parquet('path/to/dataset.parquet') ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetReader return ParquetDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, num_proc=num_proc, **kwargs, ).read() def from_text( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from text file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the text file(s). split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`TextConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_text('path/to/dataset.txt') ``` """ # Dynamic import to avoid circular dependency from .io.text import TextDatasetReader return TextDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs, ).read() def from_spark( df: "pyspark.sql.DataFrame", split: Optional[NamedSplit] = None, features: Optional[Features] = None, keep_in_memory: bool = False, cache_dir: str = None, working_dir: str = None, load_from_cache_file: bool = True, **kwargs, ): """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers. Args: df (`pyspark.sql.DataFrame`): The DataFrame containing the desired data. split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both workers and the driver. keep_in_memory (`bool`): Whether to copy the data in-memory. working_dir (`str`, *optional*) Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting a non-NFS intermediate directory may improve performance. load_from_cache_file (`bool`): Whether to load the dataset from the cache if possible. Returns: [`Dataset`] Example: ```py >>> df = spark.createDataFrame( >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], >>> columns=["id", "name"], >>> ) >>> ds = Dataset.from_spark(df) ``` """ # Dynamic import to avoid circular dependency from .io.spark import SparkDatasetReader if sys.platform == "win32": raise EnvironmentError("Dataset.from_spark is not currently supported on Windows") return SparkDatasetReader( df, split=split, features=features, streaming=False, cache_dir=cache_dir, keep_in_memory=keep_in_memory, working_dir=working_dir, load_from_cache_file=load_from_cache_file, **kwargs, ).read() def from_sql( sql: Union[str, "sqlalchemy.sql.Selectable"], con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): """Create Dataset from SQL query or database table. Args: sql (`str` or `sqlalchemy.sql.Selectable`): SQL query to be executed or a table name. con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`SqlConfig`]. Returns: [`Dataset`] Example: ```py >>> # Fetch a database table >>> ds = Dataset.from_sql("test_data", "postgres:///db_name") >>> # Execute a SQL query on the table >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name") >>> # Use a Selectable object to specify the query >>> from sqlalchemy import select, text >>> stmt = select([text("sentence")]).select_from(text("test_data")) >>> ds = Dataset.from_sql(stmt, "postgres:///db_name") ``` <Tip> The returned dataset can only be cached if `con` is specified as URI string. </Tip> """ from .io.sql import SqlDatasetReader return SqlDatasetReader( sql, con, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs, ).read() def __setstate__(self, state): self.__dict__.update(state) maybe_register_dataset_for_temp_dir_deletion(self) return self def __del__(self): if hasattr(self, "_data"): del self._data if hasattr(self, "_indices"): del self._indices def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables self.__del__() def save_to_disk( self, dataset_path: PathLike, fs="deprecated", max_shard_size: Optional[Union[str, int]] = None, num_shards: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, ): """ Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. For [`Image`] and [`Audio`] data: All the Image() and Audio() data are stored in the arrow files. If you want to store paths or urls, please use the Value("string") type. Args: dataset_path (`str`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset directory where the dataset will be saved to. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"50MB"`). num_shards (`int`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. <Added version="2.8.0"/> num_proc (`int`, *optional*): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.8.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Example: ```py >>> ds.save_to_disk("path/to/dataset/directory") >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024) ``` """ if max_shard_size is not None and num_shards is not None: raise ValueError( "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." ) if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options if self.list_indexes(): raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset") if num_shards is None: dataset_nbytes = self._estimate_nbytes() max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) num_shards = int(dataset_nbytes / max_shard_size) + 1 num_shards = max(num_shards, num_proc or 1) num_proc = num_proc if num_proc is not None else 1 num_shards = num_shards if num_shards is not None else num_proc fs: fsspec.AbstractFileSystem fs, _ = url_to_fs(dataset_path, **(storage_options or {})) if not is_remote_filesystem(fs): parent_cache_files_paths = { Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files } # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux. if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths: raise PermissionError( f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself." ) fs.makedirs(dataset_path, exist_ok=True) # Get json serializable state state = { key: self.__dict__[key] for key in [ "_fingerprint", "_format_columns", "_format_kwargs", "_format_type", "_output_all_columns", ] } state["_split"] = str(self.split) if self.split is not None else self.split state["_data_files"] = [ {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards) ] for k in state["_format_kwargs"].keys(): try: json.dumps(state["_format_kwargs"][k]) except TypeError as e: raise TypeError( str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't." ) from None # Get json serializable dataset info dataset_info = asdict(self._info) shards_done = 0 pbar = hf_tqdm( unit=" examples", total=len(self), desc=f"Saving the dataset ({shards_done}/{num_shards} shards)", ) kwargs_per_job = ( { "job_id": shard_idx, "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True), "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"), "storage_options": storage_options, } for shard_idx in range(num_shards) ) shard_lengths = [None] * num_shards shard_sizes = [None] * num_shards if num_proc > 1: with Pool(num_proc) as pool: with pbar: for job_id, done, content in iflatmap_unordered( pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job ): if done: shards_done += 1 pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") shard_lengths[job_id], shard_sizes[job_id] = content else: pbar.update(content) else: with pbar: for kwargs in kwargs_per_job: for job_id, done, content in Dataset._save_to_disk_single(**kwargs): if done: shards_done += 1 pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") shard_lengths[job_id], shard_sizes[job_id] = content else: pbar.update(content) with fs.open( posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8" ) as state_file: json.dump(state, state_file, indent=2, sort_keys=True) with fs.open( posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8" ) as dataset_info_file: # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)} json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2) def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]): batch_size = config.DEFAULT_MAX_BATCH_SIZE num_examples_progress_update = 0 writer = ArrowWriter( features=shard.features, path=fpath, storage_options=storage_options, embed_local_files=True, ) try: _time = time.time() for pa_table in shard.with_format("arrow").iter(batch_size): writer.write_table(pa_table) num_examples_progress_update += len(pa_table) if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield job_id, False, num_examples_progress_update num_examples_progress_update = 0 finally: yield job_id, False, num_examples_progress_update num_examples, num_bytes = writer.finalize() writer.close() yield job_id, True, (num_examples, num_bytes) def _build_local_temp_path(uri_or_path: str) -> Path: """ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative path extracted from the uri) passed. Args: uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) to concatenate. Returns: :class:`Path`: the concatenated path (temp dir + path) """ src_dataset_path = Path(uri_or_path) tmp_dir = get_temporary_cache_files_directory() return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor)) def load_from_disk( dataset_path: str, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None, ) -> "Dataset": """ Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. Args: dataset_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) of the dataset directory where the dataset will be loaded from. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Returns: [`Dataset`] or [`DatasetDict`]: - If `dataset_path` is a path of a dataset directory, the dataset requested. - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split. Example: ```py >>> ds = load_from_disk("path/to/dataset/directory") ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, dataset_path = url_to_fs(dataset_path, **(storage_options or {})) dest_dataset_path = dataset_path dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME) dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) dataset_dict_is_file = fs.isfile(dataset_dict_json_path) dataset_info_is_file = fs.isfile(dataset_info_path) dataset_state_is_file = fs.isfile(dataset_state_json_path) if not dataset_info_is_file and not dataset_state_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) if not dataset_info_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) if not dataset_state_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies if is_remote_filesystem(fs): src_dataset_path = dest_dataset_path dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path) fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True) dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) with open(dataset_state_json_path, encoding="utf-8") as state_file: state = json.load(state_file) with open(dataset_info_path, encoding="utf-8") as dataset_info_file: dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file)) dataset_size = estimate_dataset_size( Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"] ) keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size) table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable arrow_table = concat_tables( thread_map( table_cls.from_file, [posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]], tqdm_class=hf_tqdm, desc="Loading dataset from disk", # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached disable=len(state["_data_files"]) <= 16 or None, ) ) split = state["_split"] split = Split(split) if split is not None else split dataset = Dataset( arrow_table=arrow_table, info=dataset_info, split=split, fingerprint=state["_fingerprint"], ) format = { "type": state["_format_type"], "format_kwargs": state["_format_kwargs"], "columns": state["_format_columns"], "output_all_columns": state["_output_all_columns"], } dataset = dataset.with_format(**format) return dataset def data(self) -> Table: """The Apache Arrow table backing the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.data MemoryMappedTable text: string label: int64 ---- text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]] label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]] ``` """ return self._data def cache_files(self) -> List[dict]: """The cache files containing the Apache Arrow table backing the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.cache_files [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}] ``` """ cache_files = list_table_cache_files(self._data) if self._indices is not None: cache_files += list_table_cache_files(self._indices) return [{"filename": cache_filename} for cache_filename in cache_files] def num_columns(self) -> int: """Number of columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.num_columns 2 ``` """ return self._data.num_columns def num_rows(self) -> int: """Number of rows in the dataset (same as [`Dataset.__len__`]). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.num_rows 1066 ``` """ if self._indices is not None: return self._indices.num_rows return self._data.num_rows def column_names(self) -> List[str]: """Names of the columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.column_names ['text', 'label'] ``` """ return self._data.column_names def shape(self) -> Tuple[int, int]: """Shape of the dataset (number of columns, number of rows). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.shape (1066, 2) ``` """ if self._indices is not None: return (self._indices.num_rows, self._data.num_columns) return self._data.shape def unique(self, column: str) -> List: """Return a list of the unique elements in a column. This is implemented in the low-level backend and as such, very fast. Args: column (`str`): Column name (list all the column names with [`~datasets.Dataset.column_names`]). Returns: `list`: List of unique elements in the given column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.unique('label') [1, 0] ``` """ if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") if self._indices is not None and self._indices.num_rows != self._data.num_rows: dataset = self.flatten_indices() else: dataset = self return dataset._data.column(column).unique().to_pylist() def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset": """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table. Args: column (`str`): The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`]) include_nulls (`bool`, defaults to `False`): Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. <Added version="1.14.2"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("boolq", split="validation") >>> ds.features {'answer': Value(dtype='bool', id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} >>> ds = ds.class_encode_column('answer') >>> ds.features {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} ``` """ # Sanity checks if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") src_feat = self._info.features[column] if not isinstance(src_feat, Value): raise ValueError( f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}." ) if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)): def stringify_column(batch): batch[column] = [ str(sample) if include_nulls or sample is not None else None for sample in batch[column] ] return batch dset = self.map( stringify_column, batched=True, desc="Stringifying the column", ) else: dset = self # Create the new feature class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None) dst_feat = ClassLabel(names=class_names) def cast_to_class_labels(batch): batch[column] = [ dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None for sample in batch[column] ] return batch new_features = dset.features.copy() new_features[column] = dst_feat dset = dset.map( cast_to_class_labels, batched=True, features=new_features, desc="Casting to class labels", ) return dset def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset": """Flatten the table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with flattened columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) ``` """ dataset = copy.deepcopy(self) for depth in range(1, max_depth): if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema): dataset._data = dataset._data.flatten() else: break dataset.info.features = self._info.features.flatten(max_depth=max_depth) dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names}) dataset._data = update_metadata_with_features(dataset._data, dataset.features) logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.') dataset._fingerprint = new_fingerprint return dataset def cast( self, features: Features, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, num_proc: Optional[int] = None, ) -> "Dataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset. batch_size (`int`, defaults to `1000`): Number of examples per batch provided to cast. If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. load_from_cache_file (`bool`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`]. num_proc (`int`, *optional*, defaults to `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. Returns: [`Dataset`]: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset, ClassLabel, Value >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ if sorted(features) != sorted(self._data.column_names): raise ValueError( f"The columns in features ({list(features)}) must be identical " f"as the columns in the dataset: {self._data.column_names}" ) schema = features.arrow_schema format = self.format dataset = self.with_format("arrow") # capture the PyArrow version here to make the lambda serializable on Windows dataset = dataset.map( partial(table_cast, schema=schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc="Casting the dataset", ) dataset = dataset.with_format(**format) return dataset def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`FeatureType`): Target feature. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ if hasattr(feature, "decode_example"): dataset = copy.deepcopy(self) dataset._info.features[column] = feature dataset._fingerprint = new_fingerprint dataset._data = dataset._data.cast(dataset.features.arrow_schema) dataset._data = update_metadata_with_features(dataset._data, dataset.features) return dataset else: features = self.features features[column] = feature return self.cast(features) def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": """ Remove one or several column(s) in the dataset and the features associated to them. You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.remove_columns('label') Dataset({ features: ['text'], num_rows: 1066 }) >>> ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0 Dataset({ features: [], num_rows: 0 }) ``` """ dataset = copy.deepcopy(self) if isinstance(column_names, str): column_names = [column_names] missing_columns = set(column_names) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) for column_name in column_names: del dataset._info.features[column_name] dataset._data = dataset._data.drop(column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def rename_column( self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None ) -> "Dataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.rename_column('label', 'label_new') Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) ``` """ dataset = copy.deepcopy(self) if original_column_name not in dataset._data.column_names: raise ValueError( f"Original column name {original_column_name} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if new_column_name in dataset._data.column_names: raise ValueError( f"New column name {new_column_name} already in the dataset. " f"Please choose a column name which is not already in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if not new_column_name: raise ValueError("New column name is empty.") def rename(columns): return [new_column_name if col == original_column_name else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { new_column_name if col == original_column_name else col: feature for col, feature in self._info.features.items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with renamed columns Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) ``` """ dataset = copy.deepcopy(self) extra_columns = set(column_mapping.keys()) - set(dataset.column_names) if extra_columns: raise ValueError( f"Original column names {extra_columns} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values())) if number_of_duplicates_in_new_columns != 0: raise ValueError( "New column names must all be different, but this column mapping " f"has {number_of_duplicates_in_new_columns} duplicates" ) empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col] if empty_new_columns: raise ValueError(f"New column names {empty_new_columns} are empty.") def rename(columns): return [column_mapping[col] if col in column_mapping else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { column_mapping[col] if col in column_mapping else col: feature for col, feature in (self._info.features or {}).items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": """Select one or several column(s) in the dataset and the features associated to them. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset object which only consists of selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.select_columns(['text']) Dataset({ features: ['text'], num_rows: 1066 }) ``` """ if isinstance(column_names, str): column_names = [column_names] missing_columns = set(column_names) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Current columns in the dataset: " f"{self._data.column_names}." ) dataset = copy.deepcopy(self) dataset._data = dataset._data.select(column_names) dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names}) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def __len__(self): """Number of rows in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.__len__ <bound method Dataset.__len__ of Dataset({ features: ['text', 'label'], num_rows: 1066 })> ``` """ return self.num_rows def __iter__(self): """Iterate through the examples. If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the selected format. """ if self._indices is None: # Fast iteration # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER for pa_subtable in table_iter(self.data, batch_size=batch_size): for i in range(pa_subtable.num_rows): pa_subtable_ex = pa_subtable.slice(i, 1) formatted_output = format_table( pa_subtable_ex, 0, formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns, ) yield formatted_output else: for i in range(self.num_rows): yield self._getitem( i, ) def iter(self, batch_size: int, drop_last_batch: bool = False): """Iterate through the batches of size `batch_size`. If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the selected format. Args: batch_size (:obj:`int`): size of each batch to yield. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped """ if self._indices is None: # Fast iteration # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch): formatted_batch = format_table( pa_subtable, range(pa_subtable.num_rows), formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns, ) yield formatted_batch else: num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size for i in range(0, num_rows, batch_size): yield self._getitem( slice(i, i + batch_size), ) def __repr__(self): return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})" def format(self): return { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self.column_names if self._format_columns is None else self._format_columns, "output_all_columns": self._output_all_columns, } def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set `__getitem__` return format (type and columns). Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__`` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ old_format_type = self._format_type old_format_kwargs = self._format_kwargs old_format_columns = self._format_columns old_output_all_columns = self._output_all_columns try: self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs) def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`]. Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as: ``` new formatted columns = (all columns - previously unformatted columns) ``` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.set_format(type='numpy', columns=['text', 'label']) >>> ds.format {'type': 'numpy', 'format_kwargs': {}, 'columns': ['text', 'label'], 'output_all_columns': False} ``` """ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format) # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter type = get_format_type_from_alias(type) get_formatter(type, features=self._info.features, **format_kwargs) # Check filter column if isinstance(columns, str): columns = [columns] if isinstance(columns, tuple): columns = list(columns) if columns is not None: missing_columns = set(columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if columns is not None: columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs self._format_type = type self._format_kwargs = format_kwargs self._format_columns = columns self._output_all_columns = output_all_columns logger.debug( "Set __getitem__(key) output type to %s for %s columns " " (when key is int or slice) and %s output other (un-formatted) columns.", "python objects" if type is None else type, "no" if columns is None else str(columns), "do" if output_all_columns else "don't", ) def reset_format(self): """Reset `__getitem__` return format to python objects and all columns. Same as `self.set_format()` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds.format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} >>> ds.reset_format() >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} ``` """ self.set_format() def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Args: transform (`Callable`, *optional*): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, *optional*): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). If set to True, then the other un-formatted columns are kept with the output of the transform. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') >>> def encode(batch): ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt') >>> ds.set_transform(encode) >>> ds[0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895, 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211, 5637, 1998, 11690, 2336, 1012, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object. Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds.format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'tensorflow'} ``` """ dataset = copy.deepcopy(self) dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) return dataset def with_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object. Args: transform (`Callable`, `optional`): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, `optional`): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). If set to `True`, then the other un-formatted columns are kept with the output of the transform. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> def encode(example): ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt') >>> ds = ds.with_transform(encode) >>> ds[0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617, 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105, 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ dataset = copy.deepcopy(self) dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) return dataset def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset": """ Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates). Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting. Args: task (`Union[str, TaskTemplate]`): The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include: - `"text-classification"` - `"question-answering"` If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates). id (`int`, defaults to `0`): The id required to unambiguously identify the task template when multiple task templates of the same type are supported. """ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD if isinstance(task, str): tasks = [template.task for template in (self.info.task_templates or [])] compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task] if not compatible_templates: raise ValueError( f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}" ) if not 0 <= id < len(compatible_templates): templates_list_str = "\n".join( f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates) ) raise ValueError( f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}" ) template = compatible_templates[id] elif isinstance(task, TaskTemplate): template = task else: raise ValueError( f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}." ) template = template.align_with_features(self.info.features) column_mapping = template.column_mapping columns_to_drop = [column for column in self.column_names if column not in column_mapping] dataset = self.remove_columns(columns_to_drop) dataset = dataset.rename_columns(column_mapping) # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__` dataset.info.task_templates = None dataset = dataset.cast(features=template.features) return dataset def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]: """ Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices) """ if isinstance(key, bool): raise TypeError("dataset index must be int, str, slice or collection of int, not bool") format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns output_all_columns = ( kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns ) format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs format_kwargs = format_kwargs if format_kwargs is not None else {} formatter = get_formatter(format_type, features=self._info.features, **format_kwargs) pa_subtable = query_table(self._data, key, indices=self._indices) formatted_output = format_table( pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns ) return formatted_output def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811 ... def __getitem__(self, key: str) -> List: # noqa: F811 ... def __getitem__(self, key): # noqa: F811 """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).""" return self._getitem(key) def __getitems__(self, keys: List) -> List: """Can be used to get a batch using a list of integers indices.""" batch = self.__getitem__(keys) n_examples = len(batch[next(iter(batch))]) return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)] def cleanup_cache_files(self) -> int: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files. Returns: `int`: Number of removed files. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.cleanup_cache_files() 10 ``` """ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files] if not current_cache_files: return 0 cache_directory = os.path.dirname(current_cache_files[0]) logger.info(f"Listing files in {cache_directory}") files: List[str] = os.listdir(cache_directory) files_to_remove = [] for f_name in files: full_name = os.path.abspath(os.path.join(cache_directory, f_name)) if f_name.startswith("cache-") and f_name.endswith(".arrow"): if full_name in current_cache_files: logger.info(f"Keeping currently used cache file at {full_name}") continue files_to_remove.append(full_name) for file_path in files_to_remove: logger.info(f"Removing {file_path}") os.remove(file_path) return len(files_to_remove) def _get_cache_file_path(self, fingerprint): if is_caching_enabled() and self.cache_files: cache_file_name = "cache-" + fingerprint + ".arrow" cache_directory = os.path.dirname(self.cache_files[0]["filename"]) else: cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow" cache_directory = get_temporary_cache_files_directory() cache_file_path = os.path.join(cache_directory, cache_file_name) return cache_file_path def map( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """ Apply a function to all the examples in the table (individually or in batches) and update the table. If your function returns a column that already exists, then it overwrites it. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`. - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`): Function with one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function`. num_proc (`int`, *optional*, defaults to `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially. suffix_template (`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for `rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> ds[0:3]["text"] ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .', 'Review: the soundtrack alone is worth the price of admission .', 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .'] # process a batch of examples >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) # set number of processors >>> ds = ds.map(add_prefix, num_proc=4) ``` """ if keep_in_memory and cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.") if num_proc is not None and num_proc <= 0: raise ValueError("num_proc must be an integer > 0.") # If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway) if len(self) == 0: if self._indices is not None: # empty indices mapping self = Dataset( self.data.slice(0, 0), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint, ) if remove_columns: return self.remove_columns(remove_columns) else: return self if function is None: function = lambda x: x # noqa: E731 if isinstance(input_columns, str): input_columns = [input_columns] if input_columns is not None: missing_columns = set(input_columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if isinstance(remove_columns, str): remove_columns = [remove_columns] if remove_columns is not None: missing_columns = set(remove_columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if fn_kwargs is None: fn_kwargs = {} if num_proc is not None and num_proc > len(self): num_proc = len(self) logger.warning( f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}." ) dataset_kwargs = { "shard": self, "function": function, "with_indices": with_indices, "with_rank": with_rank, "input_columns": input_columns, "batched": batched, "batch_size": batch_size, "drop_last_batch": drop_last_batch, "remove_columns": remove_columns, "keep_in_memory": keep_in_memory, "writer_batch_size": writer_batch_size, "features": features, "disable_nullable": disable_nullable, "fn_kwargs": fn_kwargs, } if new_fingerprint is None: # we create a unique hash from the function, # current dataset file and the mapping args transform = format_transform_for_fingerprint(Dataset._map_single) kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs) kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint" new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint) else: validate_fingerprint(new_fingerprint) dataset_kwargs["new_fingerprint"] = new_fingerprint if self.cache_files: if cache_file_name is None: cache_file_name = self._get_cache_file_path(new_fingerprint) dataset_kwargs["cache_file_name"] = cache_file_name def load_processed_shard_from_cache(shard_kwargs): """Load a processed shard from cache if it exists, otherwise throw an error.""" shard = shard_kwargs["shard"] # Check if we've already cached this computation (indexed by a hash) if shard_kwargs["cache_file_name"] is not None: if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file: info = shard.info.copy() info.features = features info.task_templates = None return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split) raise NonExistentDatasetError num_shards = num_proc if num_proc is not None else 1 if batched and drop_last_batch: pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size else: pbar_total = len(self) shards_done = 0 if num_proc is None or num_proc == 1: transformed_dataset = None try: transformed_dataset = load_processed_shard_from_cache(dataset_kwargs) logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}") except NonExistentDatasetError: pass if transformed_dataset is None: with hf_tqdm( unit=" examples", total=pbar_total, desc=desc or "Map", ) as pbar: for rank, done, content in Dataset._map_single(**dataset_kwargs): if done: shards_done += 1 logger.debug(f"Finished processing shard number {rank} of {num_shards}.") transformed_dataset = content else: pbar.update(content) assert transformed_dataset is not None, "Failed to retrieve the result from map" # update fingerprint if the dataset changed if transformed_dataset._fingerprint != self._fingerprint: transformed_dataset._fingerprint = new_fingerprint return transformed_dataset else: def format_cache_file_name( cache_file_name: Optional[str], rank: Union[int, Literal["*"]], # noqa: F722 ) -> Optional[str]: if not cache_file_name: return cache_file_name sep = cache_file_name.rindex(".") base_name, extension = cache_file_name[:sep], cache_file_name[sep:] if isinstance(rank, int): cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension logger.info(f"Process #{rank} will write at {cache_file_name}") else: cache_file_name = ( base_name + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc) + extension ) return cache_file_name def format_new_fingerprint(new_fingerprint: str, rank: int) -> str: new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc) validate_fingerprint(new_fingerprint) return new_fingerprint prev_env = deepcopy(os.environ) # check if parallelism if off # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22 if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in ( "", "off", "false", "f", "no", "n", "0", ): logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.") os.environ["TOKENIZERS_PARALLELISM"] = "false" shards = [ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory) for rank in range(num_proc) ] kwargs_per_job = [ { **dataset_kwargs, "shard": shards[rank], "cache_file_name": format_cache_file_name(cache_file_name, rank), "rank": rank, "offset": sum(len(s) for s in shards[:rank]), "new_fingerprint": format_new_fingerprint(new_fingerprint, rank), } for rank in range(num_shards) ] transformed_shards = [None] * num_shards for rank in range(num_shards): try: transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank]) kwargs_per_job[rank] = None except NonExistentDatasetError: pass kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None] # We try to create a pool with as many workers as dataset not yet cached. if kwargs_per_job: if len(kwargs_per_job) < num_shards: logger.info( f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache." ) with Pool(len(kwargs_per_job)) as pool: os.environ = prev_env logger.info(f"Spawning {num_proc} processes") with hf_tqdm( unit=" examples", total=pbar_total, desc=(desc or "Map") + f" (num_proc={num_proc})", ) as pbar: for rank, done, content in iflatmap_unordered( pool, Dataset._map_single, kwargs_iterable=kwargs_per_job ): if done: shards_done += 1 logger.debug(f"Finished processing shard number {rank} of {num_shards}.") transformed_shards[rank] = content else: pbar.update(content) # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805) for kwargs in kwargs_per_job: del kwargs["shard"] else: logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}") assert ( None not in transformed_shards ), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results" logger.info(f"Concatenating {num_proc} shards") result = _concatenate_map_style_datasets(transformed_shards) # update fingerprint if the dataset changed if any( transformed_shard._fingerprint != shard._fingerprint for transformed_shard, shard in zip(transformed_shards, shards) ): result._fingerprint = new_fingerprint else: result._fingerprint = self._fingerprint return result def _map_single( shard: "Dataset", function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[List[str]] = None, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, new_fingerprint: Optional[str] = None, rank: Optional[int] = None, offset: int = 0, ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]: """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does update examples). Args: shard (`datasets.Dataset`): Dataset to map the transform on. function (`Callable`): with one of the following signature: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: lambda x: x with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function` drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function` new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`. """ if fn_kwargs is None: fn_kwargs = {} # If we do batch computation but no batch size is provided, default to the full dataset if batched and (batch_size is None or batch_size <= 0): batch_size = shard.num_rows # We set this variable to True after processing the first example/batch in # `apply_function_on_filtered_inputs` if the map function returns a dict. # If set to False, no new arrow table will be created update_data = None format_kwargs = shard._format_kwargs.copy() # Lazy formatting is only available for the default format (None/python) if not input_columns and shard._format_type is None: format_kwargs["lazy"] = True input_formatter = get_formatter( shard._format_type, features=shard.features, **format_kwargs, ) class NumExamplesMismatchError(Exception): pass def validate_function_output(processed_inputs, indices): """Validate output of the map function.""" if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)): raise TypeError( f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects." ) elif isinstance(indices, list) and isinstance(processed_inputs, Mapping): allowed_batch_return_types = (list, np.ndarray, pd.Series) if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl allowed_batch_return_types += (pl.Series, pl.DataFrame) if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf allowed_batch_return_types += (tf.Tensor,) if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch allowed_batch_return_types += (torch.Tensor,) if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp allowed_batch_return_types += (jnp.ndarray,) all_dict_values_are_lists = all( isinstance(value, allowed_batch_return_types) for value in processed_inputs.values() ) if all_dict_values_are_lists is False: raise TypeError( f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`." ) def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0): """Utility to apply the function on a selection of columns.""" nonlocal update_data inputs = format_table( pa_inputs, 0 if not batched else range(pa_inputs.num_rows), format_columns=input_columns, formatter=input_formatter, ) fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] if offset == 0: effective_indices = indices else: effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset additional_args = () if with_indices: additional_args += (effective_indices,) if with_rank: additional_args += (rank,) processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) if isinstance(processed_inputs, LazyDict): processed_inputs = { k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format } returned_lazy_dict = True else: returned_lazy_dict = False if update_data is None: # Check if the function returns updated examples update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)) validate_function_output(processed_inputs, indices) if not update_data: return None # Nothing to update, let's move on if shard._format_type or input_columns: # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release) inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns())) elif isinstance(inputs, LazyDict): inputs_to_merge = { k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items() } else: inputs_to_merge = inputs if remove_columns is not None: for column in remove_columns: # `function` can modify input in-place causing column to be already removed. if column in inputs_to_merge: inputs_to_merge.pop(column) if returned_lazy_dict and column in processed_inputs: processed_inputs.pop(column) if check_same_num_examples: input_num_examples = len(pa_inputs) processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))]) if input_num_examples != processed_inputs_num_examples: raise NumExamplesMismatchError() if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping): # The .map() transform *updates* the dataset: # the output dictionary contains both the the input data and the output data. # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently. return {**inputs_to_merge, **processed_inputs} else: return processed_inputs def init_buffer_and_writer(): # Prepare output buffer and batched writer in memory or on file if we update the table writer_features = features if writer_features is None: writer_features = shard.features update_features = True else: update_features = False if keep_in_memory or cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( features=writer_features, stream=buf_writer, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) else: buf_writer = None logger.info(f"Caching processed dataset at {cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False) writer = ArrowWriter( features=writer_features, path=tmp_file.name, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) return buf_writer, writer, tmp_file num_examples_progress_update = 0 # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer` buf_writer, writer, tmp_file = None, None, None # Check if Polars is available and import it if so if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl # Optionally initialize the writer as a context manager with contextlib.ExitStack() as stack: try: arrow_formatted_shard = shard.with_format("arrow") # Loop over single examples or batches and write to buffer/file if examples are to be updated if not batched: shard_iterable = enumerate(arrow_formatted_shard) else: num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size shard_iterable = zip( range(0, num_rows, batch_size), arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch), ) if not batched: _time = time.time() for i, example in shard_iterable: example = apply_function_on_filtered_inputs(example, i, offset=offset) if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(example, pa.Table): writer.write_row(example) elif isinstance(example, pd.DataFrame): writer.write_row(pa.Table.from_pandas(example)) elif ( config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(example, pl.DataFrame) ): writer.write_row(example.to_arrow()) else: writer.write(example) num_examples_progress_update += 1 if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield rank, False, num_examples_progress_update num_examples_progress_update = 0 else: _time = time.time() for i, batch in shard_iterable: num_examples_in_batch = len(batch) indices = list( range(*(slice(i, i + batch_size).indices(shard.num_rows))) ) # Something simpler? try: batch = apply_function_on_filtered_inputs( batch, indices, check_same_num_examples=len(shard.list_indexes()) > 0, offset=offset, ) except NumExamplesMismatchError: raise DatasetTransformationNotAllowedError( "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it." ) from None if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(batch, pa.Table): writer.write_table(batch) elif isinstance(batch, pd.DataFrame): writer.write_table(pa.Table.from_pandas(batch)) elif ( config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(batch, pl.DataFrame) ): writer.write_table(batch.to_arrow()) else: writer.write_batch(batch) num_examples_progress_update += num_examples_in_batch if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield rank, False, num_examples_progress_update num_examples_progress_update = 0 if update_data and writer is not None: writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file except (Exception, KeyboardInterrupt): yield rank, False, num_examples_progress_update if update_data: if writer is not None: writer.finalize() if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise yield rank, False, num_examples_progress_update if update_data and tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_file_name, 0o666 & ~umask) if update_data: # Create new Dataset from buffer or file info = shard.info.copy() info.features = writer._features info.task_templates = None if buf_writer is None: yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split) else: yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split) else: yield rank, True, shard inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1" ) def filter( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """Apply a filter function to all the elements in the table in batches and update the table so that the dataset only includes examples according to the filter function. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, defaults to an always `True` function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched = True`. If `batched = False`, one example per batch is passed to `function`. If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. fn_kwargs (`dict`, *optional*): Keyword arguments to be passed to `function`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. suffix_template (`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default `_{rank:05d}_of_{num_proc:05d}`). new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while filtering examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.filter(lambda x: x["label"] == 1) Dataset({ features: ['text', 'label'], num_rows: 533 }) ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`" ) if function is None: function = lambda x: True # noqa: E731 if len(self) == 0: return self indices = self.map( function=partial( get_indices_from_mask_function, function, batched, with_indices, with_rank, input_columns, self._indices, ), with_indices=True, with_rank=True, features=Features({"indices": Value("uint64")}), batched=True, batch_size=batch_size, remove_columns=self.column_names, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, suffix_template=suffix_template, new_fingerprint=new_fingerprint, input_columns=input_columns, desc=desc or "Filter", ) new_dataset = copy.deepcopy(self) new_dataset._indices = indices.data new_dataset._fingerprint = new_fingerprint return new_dataset def flatten_indices( self, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, num_proc: Optional[int] = None, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create and cache a new Dataset by flattening the indices mapping. Args: keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_name (`str`, *optional*, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific [`Features`] to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Allow null values in the table. num_proc (`int`, optional, default `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ return self.map( batched=True, # for speed keep_in_memory=keep_in_memory, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, new_fingerprint=new_fingerprint, desc="Flattening the indices", num_proc=num_proc, ) def _new_dataset_with_indices( self, indices_cache_file_name: Optional[str] = None, indices_buffer: Optional[pa.Buffer] = None, fingerprint: Optional[str] = None, ) -> "Dataset": """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the current Dataset. """ if indices_cache_file_name is None and indices_buffer is None: raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.") if fingerprint is None: raise ValueError("please specify a fingerprint for the dataset with indices") if indices_cache_file_name is not None: indices_table = MemoryMappedTable.from_file(indices_cache_file_name) else: indices_table = InMemoryTable.from_buffer(indices_buffer) # Return new Dataset object # don't forget to copy the objects return Dataset( self._data, info=self.info.copy(), split=self.split, indices_table=indices_table, fingerprint=fingerprint, ) def select( self, indices: Iterable, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows selected following the list/array of indices. Args: indices (`range`, `list`, `iterable`, `ndarray` or `Series`): Range, list or 1D-array of integer indices for indexing. If the indices correspond to a contiguous range, the Arrow table is simply sliced. However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient, but still faster than recreating an Arrow table made of the requested rows. keep_in_memory (`bool`, defaults to `False`): Keep the indices mapping in memory instead of writing it to a cache file. indices_cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.select(range(4)) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # If indices is a PyArrow array, we convert to NumPy if isinstance(indices, (pa.Array, pa.ChunkedArray)): indices = indices.to_numpy().astype(np.int64) # Convert generator objects to lists if isinstance(indices, Iterator): indices = list(indices) # If the indices are contiguous, simply slice the arrow table if isinstance(indices, range): if _is_range_contiguous(indices) and indices.start >= 0: start, length = indices.start, indices.stop - indices.start return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) else: try: start = next(iter(indices)) except StopIteration: # if `indices` is an empty iterable, we return an empty dataset return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) if start >= 0: counter_from_start = itertools.count(start=start) if all(i == j for i, j in zip(indices, counter_from_start)): length = next(counter_from_start) - start return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) # If not contiguous, we need to create a new indices mapping return self._select_with_indices_mapping( indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) def _select_contiguous( self, start: int, length: int, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows from a contiguous slice of data. The slice is defined by that start index and its length. Args: start (`int`): start index. length (`int`): length of the slice to select. new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds._select_contiguous(0, 4) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self _check_valid_indices_value(start, len(self)) _check_valid_indices_value(start + length - 1, len(self)) if self._indices is None or length == 0: return Dataset( self.data.slice(start, length), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint, ) else: return Dataset( self.data, info=self.info.copy(), split=self.split, indices_table=self._indices.slice(start, length), fingerprint=new_fingerprint, ) def _select_with_indices_mapping( self, indices: Iterable, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows selected following the list/array of indices. The new dataset is made by creating a new indices mapping on top of the main arrow table. Args: indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing. keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file. indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds._select_with_indices_mapping(range(4)) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Prepare the writer for our indices arrow table if keep_in_memory or indices_cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) else: buf_writer = None logger.info(f"Caching indices mapping at {indices_cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False) writer = ArrowWriter( path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) indices = indices if isinstance(indices, list) else list(indices) size = len(self) if indices: _check_valid_indices_value(int(max(indices)), size=size) _check_valid_indices_value(int(min(indices)), size=size) else: return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) indices_array = pa.array(indices, type=pa.uint64()) # Check if we need to convert indices if self._indices is not None: indices_array = self._indices.column(0).take(indices_array) indices_table = pa.Table.from_arrays([indices_array], names=["indices"]) with writer: try: writer.write_table(indices_table) writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file except (Exception, KeyboardInterrupt): if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise if tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, indices_cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(indices_cache_file_name, 0o666 & ~umask) # Return new Dataset object if buf_writer is None: return self._new_dataset_with_indices( indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint ) else: return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint) def sort( self, column_names: Union[str, Sequence_[str]], reverse: Union[bool, Sequence_[bool]] = False, kind="deprecated", null_placement: str = "at_end", keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset sorted according to a single or multiple columns. Args: column_names (`Union[str, Sequence[str]]`): Column name(s) to sort by. reverse (`Union[bool, Sequence[bool]]`, defaults to `False`): If `True`, sort by descending order rather than ascending. If a single bool is provided, the value is applied to the sorting of all column names. Otherwise a list of bools with the same length and order as column_names must be provided. kind (`str`, *optional*): Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`, The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general, the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility. <Deprecated version="2.8.0"> `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0. </Deprecated> null_placement (`str`, defaults to `at_end`): Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last` <Added version="1.14.2"/> keep_in_memory (`bool`, defaults to `False`): Keep the sorted indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the sorted indices can be identified, use it instead of recomputing. indices_cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the sorted indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. Higher value gives smaller cache files, lower value consume less temporary memory. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes', split='validation') >>> ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] >>> sorted_ds = ds.sort('label') >>> sorted_ds['label'][:10] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False]) >>> another_sorted_ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Deprecation warning if kind != "deprecated": warnings.warn( "'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.", category=FutureWarning, ) # Check proper format of and for duplicates in column_names if isinstance(column_names, str): column_names = [column_names] # Check proper format and length of reverse if not isinstance(reverse, bool): if len(reverse) != len(column_names): raise ValueError( "Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'." ) else: reverse = [reverse] * len(column_names) # Check whether column name(s) exist in dataset for column in column_names: if not isinstance(column, str) or column not in self._data.column_names: raise ValueError( f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}" ) # Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability if null_placement not in ["at_start", "at_end"]: if null_placement == "first": null_placement = "at_start" elif null_placement == "last": null_placement = "at_end" else: raise ValueError( f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'." ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) sort_table = query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ) sort_keys = [ (col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse) ] indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement) return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"] ) def shuffle( self, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new Dataset where the rows are shuffled. Currently shuffling uses numpy random generators. You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping. However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower. This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. This may take a lot of time depending of the size of your dataset though: ```python my_dataset[0] # fast my_dataset = my_dataset.shuffle(seed=42) my_dataset[0] # up to 10x slower my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data my_dataset[0] # fast again ``` In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`]. It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal: ```python my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128) for example in enumerate(my_iterable_dataset): # fast pass shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100) for example in enumerate(shuffled_iterable_dataset): # as fast as before pass ``` Args: seed (`int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. If `None`, then fresh, unpredictable entropy will be pulled from the OS. If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). keep_in_memory (`bool`, default `False`): Keep the shuffled indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the shuffled indices can be identified, use it instead of recomputing. indices_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the shuffled indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # set a seed >>> shuffled_ds = ds.shuffle(seed=42) >>> shuffled_ds['label'][:10] [1, 0, 1, 1, 0, 0, 0, 0, 0, 0] ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if seed is not None and generator is not None: raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.") if generator is not None and not isinstance(generator, np.random.Generator): raise ValueError("The provided generator must be an instance of numpy.random.Generator") load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if generator is None: if seed is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) permutation = generator.permutation(len(self)) return self.select( indices=permutation, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) inplace=False, randomized_function=True, fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"], ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"], ) def train_test_split( self, test_size: Union[float, int, None] = None, train_size: Union[float, int, None] = None, shuffle: bool = True, stratify_by_column: Optional[str] = None, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, train_indices_cache_file_name: Optional[str] = None, test_indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, train_new_fingerprint: Optional[str] = None, test_new_fingerprint: Optional[str] = None, ) -> "DatasetDict": """Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits). Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`. This method is similar to scikit-learn `train_test_split`. Args: test_size (`numpy.random.Generator`, *optional*): Size of the test split If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split. If `int`, represents the absolute number of test samples. If `None`, the value is set to the complement of the train size. If `train_size` is also `None`, it will be set to `0.25`. train_size (`numpy.random.Generator`, *optional*): Size of the train split If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split. If `int`, represents the absolute number of train samples. If `None`, the value is automatically set to the complement of the test size. shuffle (`bool`, *optional*, defaults to `True`): Whether or not to shuffle the data before splitting. stratify_by_column (`str`, *optional*, defaults to `None`): The column name of labels to be used to perform stratified split of data. seed (`int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. If `None`, then fresh, unpredictable entropy will be pulled from the OS. If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). keep_in_memory (`bool`, defaults to `False`): Keep the splits indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the splits indices can be identified, use it instead of recomputing. train_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the train split indices instead of the automatically generated cache file name. test_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the test split indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. train_new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the train set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments test_new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the test set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds = ds.train_test_split(test_size=0.2, shuffle=True) DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 852 }) test: Dataset({ features: ['text', 'label'], num_rows: 214 }) }) # set a seed >>> ds = ds.train_test_split(test_size=0.2, seed=42) # stratified split >>> ds = load_dataset("imdb",split="train") Dataset({ features: ['text', 'label'], num_rows: 25000 }) >>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label") DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 20000 }) test: Dataset({ features: ['text', 'label'], num_rows: 5000 }) }) ``` """ from .dataset_dict import DatasetDict # import here because of circular dependency if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return DatasetDict({"train": self, "test": self}) if test_size is None and train_size is None: test_size = 0.25 # Safety checks similar to scikit-learn's ones. # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750) n_samples = len(self) if ( isinstance(test_size, int) and (test_size >= n_samples or test_size <= 0) or isinstance(test_size, float) and (test_size <= 0 or test_size >= 1) ): raise ValueError( f"test_size={test_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if ( isinstance(train_size, int) and (train_size >= n_samples or train_size <= 0) or isinstance(train_size, float) and (train_size <= 0 or train_size >= 1) ): raise ValueError( f"train_size={train_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if train_size is not None and not isinstance(train_size, (int, float)): raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}") if test_size is not None and not isinstance(test_size, (int, float)): raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}") if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1: raise ValueError( f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)" " range. Reduce test_size and/or train_size." ) if isinstance(test_size, float): n_test = ceil(test_size * n_samples) elif isinstance(test_size, int): n_test = float(test_size) if isinstance(train_size, float): n_train = floor(train_size * n_samples) elif isinstance(train_size, int): n_train = float(train_size) if train_size is None: n_train = n_samples - n_test elif test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError( f"The sum of train_size and test_size = {n_train + n_test}, " "should be smaller than the number of " f"samples {n_samples}. Reduce test_size and/or " "train_size." ) n_train, n_test = int(n_train), int(n_test) if n_train == 0: raise ValueError( f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the " "resulting train set will be empty. Adjust any of the " "aforementioned parameters." ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if generator is None and shuffle is True: if seed is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if train_indices_cache_file_name is None or test_indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args if train_indices_cache_file_name is None: train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint) if test_indices_cache_file_name is None: test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint) if ( os.path.exists(train_indices_cache_file_name) and os.path.exists(test_indices_cache_file_name) and load_from_cache_file ): logger.info( f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}" ) return DatasetDict( { "train": self._new_dataset_with_indices( fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name ), "test": self._new_dataset_with_indices( fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name ), } ) if not shuffle: if stratify_by_column is not None: raise ValueError("Stratified train/test split is not implemented for `shuffle=False`") train_indices = np.arange(n_train) test_indices = np.arange(n_train, n_train + n_test) else: # stratified partition if stratify_by_column is not None: if stratify_by_column not in self._info.features.keys(): raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}") if not isinstance(self._info.features[stratify_by_column], ClassLabel): raise ValueError( f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}." ) try: train_indices, test_indices = next( stratified_shuffle_split_generate_indices( self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator ) ) except Exception as error: if str(error) == "Minimum class count error": raise ValueError( f"The least populated class in {stratify_by_column} column has only 1" " member, which is too few. The minimum" " number of groups for any class cannot" " be less than 2." ) else: raise error # random partition else: permutation = generator.permutation(len(self)) test_indices = permutation[:n_test] train_indices = permutation[n_test : (n_test + n_train)] train_split = self.select( indices=train_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=train_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=train_new_fingerprint, ) test_split = self.select( indices=test_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=test_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=test_new_fingerprint, ) return DatasetDict({"train": train_split, "test": test_split}) def shard( self, num_shards: int, index: int, contiguous: bool = False, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, ) -> "Dataset": """Return the `index`-nth shard from dataset split into `num_shards` pieces. This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose index mod `n = i`. `dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks, so it can be easily concatenated back together after processing. If `n % i == l`, then the first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`. `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return a dataset with the same order as the original. Be sure to shard before using any randomizing operator (such as `shuffle`). It is best if the shard operator is used early in the dataset pipeline. Args: num_shards (`int`): How many shards to split the dataset into. index (`int`): Which shard to select and return. contiguous: (`bool`, defaults to `False`): Whether to select contiguous blocks of indices for shards. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. indices_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the indices of each shard instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds Dataset({ features: ['text', 'label'], num_rows: 1066 }) >>> ds.shard(num_shards=2, index=0) Dataset({ features: ['text', 'label'], num_rows: 533 }) ``` """ if not 0 <= index < num_shards: raise ValueError("index should be in [0, num_shards-1]") if contiguous: div = len(self) // num_shards mod = len(self) % num_shards start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) indices = range(start, end) else: indices = np.arange(index, len(self), num_shards) return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, ) def export( self, filename: str, format: str = "tfrecord", ): """Writes the Arrow dataset to a TFRecord file. The dataset must already be in tensorflow format. The records will be written with keys from `dataset._format_columns`. Args: filename (`str`): The filename, including the `.tfrecord` extension, to write to. format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as TFRecords are the only option. This enables a more flexible function signature later. """ try: import tensorflow as tf # noqa: F401 except ImportError: logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.") # From https://www.tensorflow.org/tutorials/load_data/tfrecord def _bytes_feature(values): """Returns a bytes_list from a list of string / byte.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=values)) def _float_feature(values): """Returns a float_list from a list of float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=values)) def _int64_feature(values): """Returns an int64_list from a list of bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature": """Typechecks `values` and returns the corresponding tf.train.Feature.""" if isinstance(values, list): if values and isinstance(values[0], str): return _bytes_feature([v.encode() for v in values]) else: raise ValueError(f"values={values} is empty or contains items that cannot be serialized") elif isinstance(values, np.ndarray): if values.dtype == np.dtype(float): return _float_feature(values) elif values.dtype == np.int64: return _int64_feature(values) elif values.dtype == np.dtype(str) or ( values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str) ): return _bytes_feature([v.encode() for v in values]) else: raise ValueError( f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized" ) elif hasattr(values, "dtype"): if np.issubdtype(values.dtype, np.floating): return _float_feature([values.item()]) elif np.issubdtype(values.dtype, np.integer): return _int64_feature([values.item()]) elif np.issubdtype(values.dtype, str): return _bytes_feature([values.item().encode()]) else: raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized") else: raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized") def serialize_example(ex): feature = {key: _feature(value) for key, value in ex.items()} example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(ex): tf_string = tf.py_function(serialize_example, (ex,), tf.string) return tf.reshape(tf_string, ()) def generator(): for ex in self: yield serialize_example(ex) if self._format_type != "numpy": raise ValueError("Dataset format must be numpy before exporting") if not filename.endswith(".tfrecord"): raise ValueError("filename {filename} must end with .tfrecord") tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=()) writer = tf.data.experimental.TFRecordWriter(filename) logger.info(f"Writing TFRecord to {filename}") writer.write(tf_dataset) logger.info(f"Finished writing TFRecord to {filename}") self = None # delete the dataset reference used by tf_dataset def to_csv( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **to_csv_kwargs, ) -> int: """Exports the dataset to csv Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.csv`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.csv`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. `batch_size` in this case defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **to_csv_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html). <Changed version="2.10.0"> Now, `index` defaults to `False` if not specified. If you would like to write the index, pass `index=True` and also set a name for the index column by passing `index_label`. </Changed> Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_csv("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetWriter return CsvDatasetWriter( self, path_or_buf, batch_size=batch_size, num_proc=num_proc, storage_options=storage_options, **to_csv_kwargs, ).write() def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]: """Returns the dataset as a Python dict. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). <Deprecated version="2.11.0"> Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead. </Deprecated> batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `dict` or `Iterator[dict]` Example: ```py >>> ds.to_dict() ``` """ if batched != "deprecated": warnings.warn( "'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.", FutureWarning, ) else: batched = False if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pydict() else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices, ).to_pydict() for offset in range(0, len(self), batch_size) ) def to_list(self) -> list: """Returns the dataset as a Python list. Returns: `list` Example: ```py >>> ds.to_list() ``` """ return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pylist() def to_json( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **to_json_kwargs, ) -> int: """Export the dataset to JSON Lines or JSON. Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.json`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.json`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. `batch_size` in this case defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **to_json_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html). <Changed version="2.11.0"> Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`. If you would like to write the index, pass `index=True`. </Changed> Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_json("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetWriter return JsonDatasetWriter( self, path_or_buf, batch_size=batch_size, num_proc=num_proc, storage_options=storage_options, **to_json_kwargs, ).write() def to_pandas( self, batch_size: Optional[int] = None, batched: bool = False ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `pandas.DataFrame` or `Iterator[pandas.DataFrame]` Example: ```py >>> ds.to_pandas() ``` """ if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pandas(types_mapper=pandas_types_mapper) else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices, ).to_pandas(types_mapper=pandas_types_mapper) for offset in range(0, len(self), batch_size) ) def to_polars( self, batch_size: Optional[int] = None, batched: bool = False, schema_overrides: Optional[dict] = None, rechunk: bool = True, ) -> Union["pl.DataFrame", Iterator["pl.DataFrame"]]: """Returns the dataset as a `polars.DataFrame`. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `genomicsml.datasets.config.DEFAULT_MAX_BATCH_SIZE`. schema_overrides (`dict`, *optional*): Support type specification or override of one or more columns; note that any dtypes inferred from the schema param will be overridden. rechunk (`bool`): Make sure that all data is in contiguous memory. Defaults to `True`. Returns: `polars.DataFrame` or `Iterator[polars.DataFrame]` Example: ```py >>> ds.to_polars() ``` """ if config.POLARS_AVAILABLE: import polars as pl if not batched: return pl.from_arrow( query_table( table=self._data, key=slice(0, len(self)), indices=self._indices if self._indices is not None else None, ), schema_overrides=schema_overrides, rechunk=rechunk, ) else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( pl.from_arrow( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices if self._indices is not None else None, ), schema_overrides=schema_overrides, rechunk=rechunk, ) for offset in range(0, len(self), batch_size) ) else: raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") def to_parquet( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, storage_options: Optional[dict] = None, **parquet_writer_kwargs, ) -> int: """Exports the dataset to parquet Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.parquet`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.parquet`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **parquet_writer_kwargs (additional keyword arguments): Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`. Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_parquet("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetWriter return ParquetDatasetWriter( self, path_or_buf, batch_size=batch_size, storage_options=storage_options, **parquet_writer_kwargs ).write() def to_sql( self, name: str, con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], batch_size: Optional[int] = None, **sql_writer_kwargs, ) -> int: """Exports the dataset to a SQL database. Args: name (`str`): Name of SQL table. con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. **sql_writer_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html). <Changed version="2.11.0"> Now, `index` defaults to `False` if not specified. If you would like to write the index, pass `index=True` and also set a name for the index column by passing `index_label`. </Changed> Returns: `int`: The number of records written. Example: ```py >>> # con provided as a connection URI string >>> ds.to_sql("data", "sqlite:///my_own_db.sql") >>> # con provided as a sqlite3 connection object >>> import sqlite3 >>> con = sqlite3.connect("my_own_db.sql") >>> with con: ... ds.to_sql("data", con) ``` """ # Dynamic import to avoid circular dependency from .io.sql import SqlDatasetWriter return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write() def _estimate_nbytes(self) -> int: dataset_nbytes = self.data.nbytes # Find decodable columns, because if there are any, we need to # adjust the dataset size computation (needed for sharding) to account for possible external files decodable_columns = [ k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True) ] if decodable_columns: # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples extra_nbytes = 0 def extra_nbytes_visitor(array, feature): nonlocal extra_nbytes if isinstance(feature, (Audio, Image)): for x in array.to_pylist(): if x is not None and x["bytes"] is None and x["path"] is not None: size = xgetsize(x["path"]) extra_nbytes += size extra_nbytes -= array.field("path").nbytes table = self.with_format("arrow")[:1000] table_visitor(table, extra_nbytes_visitor) extra_nbytes = extra_nbytes * len(self.data) / len(table) dataset_nbytes = dataset_nbytes + extra_nbytes if self._indices is not None: dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data) return dataset_nbytes def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int): for shard_idx, shard in enumerate(shards): for pa_table in shard.with_format("arrow").iter(batch_size): yield shard_idx, pa_table def _generate_tables_from_cache_file(filename: str): for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)): yield batch_idx, pa.Table.from_batches([batch]) def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset": """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`]. This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files. Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop). Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets. All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset. Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`]. This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough. To get the best speed performance, make sure your dataset doesn't have an indices mapping. If this is the case, the data are not read contiguously, which can be slow sometimes. You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset. Args: num_shards (`int`, default to `1`): Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly, and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example. Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk. Returns: [`datasets.IterableDataset`] Example: Basic usage: ```python >>> ids = ds.to_iterable_dataset() >>> for example in ids: ... pass ``` With lazy filtering and processing: ```python >>> ids = ds.to_iterable_dataset() >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset >>> for example in ids: ... pass ``` With sharding to enable efficient shuffling: ```python >>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating >>> for example in ids: ... pass ``` With a PyTorch DataLoader: ```python >>> import torch >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.filter(filter_fn).map(process_fn) >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating >>> for example in ids: ... pass ``` With a PyTorch DataLoader and shuffling: ```python >>> import torch >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating >>> for example in ids: ... pass ``` In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling ```python >>> from datasets.distributed import split_dataset_by_node >>> ids = ds.to_iterable_dataset(num_shards=512) >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating >>> for example in ids: ... pass ``` With shuffling and multiple epochs: ```python >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> for epoch in range(n_epochs): ... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating ... for example in ids: ... pass ``` Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups. """ from .iterable_dataset import ArrowExamplesIterable, IterableDataset if self._format_type is not None: raise NotImplementedError( "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset" ) if num_shards > len(self): raise ValueError( f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)." ) if self._indices is not None: logger.info( "Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. " "You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed." ) shards = ( [copy.deepcopy(self)] if num_shards == 1 else [ self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards) ] ) ex_iterable = ArrowExamplesIterable( Dataset._generate_tables_from_shards, kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE}, ) return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features)) def _push_parquet_shards_to_hub( self, repo_id: str, data_dir: str = "data", split: Optional[str] = None, token: Optional[str] = None, revision: Optional[str] = None, create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[int] = None, embed_external_files: bool = True, ) -> Tuple[str, str, int, int, List[str], int]: """Pushes the dataset shards as Parquet files to the hub. Returns: additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards uploaded_size (`int`): number of uploaded bytes to the repository dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression """ # Find decodable columns, because if there are any, we need to: # embed the bytes from the files in the shards decodable_columns = ( [k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)] if embed_external_files else [] ) dataset_nbytes = self._estimate_nbytes() if num_shards is None: max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) num_shards = int(dataset_nbytes / max_shard_size) + 1 num_shards = max(num_shards, 1) shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) if decodable_columns: def shards_with_embedded_external_files(shards): for shard in shards: format = shard.format shard = shard.with_format("arrow") shard = shard.map( embed_table_storage, batched=True, batch_size=1000, keep_in_memory=True, ) shard = shard.with_format(**format) yield shard shards = shards_with_embedded_external_files(shards) api = HfApi(endpoint=config.HF_ENDPOINT, token=token) uploaded_size = 0 additions = [] for index, shard in hf_tqdm( enumerate(shards), desc="Uploading the dataset shards", total=num_shards, ): shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet" buffer = BytesIO() shard.to_parquet(buffer) uploaded_size += buffer.tell() shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer) preupload_lfs_files( api, repo_id=repo_id, additions=[shard_addition], token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) additions.append(shard_addition) return additions, uploaded_size, dataset_nbytes def push_to_hub( self, repo_id: str, config_name: str = "default", set_default: Optional[bool] = None, split: Optional[str] = None, data_dir: Optional[str] = None, commit_message: Optional[str] = None, commit_description: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, revision: Optional[str] = None, branch="deprecated", create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[int] = None, embed_external_files: bool = True, ) -> CommitInfo: """Pushes the dataset to the hub as a Parquet dataset. The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`] data, the Parquet files will store the bytes of your images or audio files. You can disable this by setting `embed_external_files` to `False`. Args: repo_id (`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. config_name (`str`, defaults to "default"): The configuration name (or subset) of a dataset. Defaults to "default". set_default (`bool`, *optional*): Whether to set this configuration as the default one. Otherwise, the default configuration is the one named "default". split (`str`, *optional*): The name of the split that will be given to that dataset. Defaults to `self.split`. data_dir (`str`, *optional*): Directory name that will contain the uploaded data files. Defaults to the `config_name` if different from "default", else "data". <Added version="2.17.0"/> commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload dataset"`. commit_description (`str`, *optional*): Description of the commit that will be created. Additionally, description of the PR if a PR is created (`create_pr` is True). <Added version="2.16.0"/> private (`bool`, *optional*, defaults to `False`): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (`str`, *optional*): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with `huggingface-cli login`. Will raise an error if no token is passed and the user is not logged-in. revision (`str`, *optional*): Branch to push the uploaded files to. Defaults to the `"main"` branch. <Added version="2.15.0"/> branch (`str`, *optional*): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. <Deprecated version="2.15.0"> `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0. </Deprecated> create_pr (`bool`, *optional*, defaults to `False`): Whether to create a PR with the uploaded files or directly commit. <Added version="2.15.0"/> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). num_shards (`int`, *optional*): Number of shards to write. By default, the number of shards depends on `max_shard_size`. <Added version="2.8.0"/> embed_external_files (`bool`, defaults to `True`): Whether to embed file bytes in the shards. In particular, this will do the following before the push for the fields of type: - [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files. Return: huggingface_hub.CommitInfo Example: ```python >>> dataset.push_to_hub("<organization>/<dataset_id>") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True) >>> dataset.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB") >>> dataset.push_to_hub("<organization>/<dataset_id>", num_shards=1024) ``` If your dataset has multiple splits (e.g. train/validation/test): ```python >>> train_dataset.push_to_hub("<organization>/<dataset_id>", split="train") >>> val_dataset.push_to_hub("<organization>/<dataset_id>", split="validation") >>> # later >>> dataset = load_dataset("<organization>/<dataset_id>") >>> train_dataset = dataset["train"] >>> val_dataset = dataset["validation"] ``` If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): ```python >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en") >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr") >>> # later >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en") >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr") ``` """ if config_name == "data": raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.") if max_shard_size is not None and num_shards is not None: raise ValueError( "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." ) if split is None: split = str(self.split) if self.split is not None else "train" if not re.match(_split_re, split): raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") if branch != "deprecated": warnings.warn( "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'revision={branch}' instead.", FutureWarning, ) revision = branch api = HfApi(endpoint=config.HF_ENDPOINT, token=token) repo_url = api.create_repo( repo_id, token=token, repo_type="dataset", private=private, exist_ok=True, ) repo_id = repo_url.repo_id if revision is not None: api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) if not data_dir: data_dir = config_name if config_name != "default" else "data" # for backward compatibility additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub( repo_id=repo_id, data_dir=data_dir, split=split, token=token, revision=revision, max_shard_size=max_shard_size, num_shards=num_shards, create_pr=create_pr, embed_external_files=embed_external_files, ) # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) # and delete old split shards (if they exist) repo_with_dataset_card, repo_with_dataset_infos = False, False deletions, deleted_size = [], 0 repo_splits = [] # use a list to keep the order of the splits repo_files_to_add = [addition.path_in_repo for addition in additions] for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token): if repo_file.rfilename == config.REPOCARD_FILENAME: repo_with_dataset_card = True elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: repo_with_dataset_infos = True elif ( repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add ): deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) deleted_size += repo_file.size elif fnmatch.fnmatch( repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*") ): repo_split = string_to_dict( repo_file.rfilename, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED), )["split"] if repo_split not in repo_splits: repo_splits.append(repo_split) organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id) info_to_dump = self.info.copy() info_to_dump.download_checksums = None info_to_dump.download_size = uploaded_size info_to_dump.dataset_size = dataset_nbytes info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes info_to_dump.config_name = config_name info_to_dump.splits = SplitDict( {split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)} ) # get the info from the README to update them if repo_with_dataset_card: dataset_card_path = api.hf_hub_download( repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision ) dataset_card = DatasetCard.load(Path(dataset_card_path)) dataset_card_data = dataset_card.data metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data) if dataset_infos and config_name in dataset_infos: repo_info = dataset_infos[config_name] else: repo_info = None # get the deprecated dataset_infos.json to update them elif repo_with_dataset_infos: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None else: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() repo_info = None # update the total info to dump from existing info if repo_info is not None: logger.info("Updating downloaded metadata with the new split.") if repo_info.splits and list(repo_info.splits) != [split]: if self._info.features != repo_info.features: raise ValueError( f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}" ) if split in repo_info.splits: repo_info.download_size -= deleted_size repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0 repo_info.download_checksums = None repo_info.download_size = (repo_info.download_size or 0) + uploaded_size repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size repo_info.splits.pop(split, None) repo_info.splits[split] = SplitInfo( split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name ) info_to_dump = repo_info # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed if not metadata_configs and repo_splits: default_metadata_configs_to_dump = { "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits] } MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) # update the metadata configs if config_name in metadata_configs: metadata_config = metadata_configs[config_name] if "data_files" in metadata_config: data_files_to_dump = sanitize_patterns(metadata_config["data_files"]) else: data_files_to_dump = {} # add the new split data_files_to_dump[split] = [f"{data_dir}/{split}-*"] metadata_config_to_dump = { "data_files": [ { "split": _split, "path": _pattern[0] if len(_pattern) == 1 else _pattern, } for _split, _pattern in data_files_to_dump.items() ] } else: metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]} if set_default and config_name != "default": if metadata_configs: default_config_name = metadata_configs.get_default_config_name() if default_config_name == "default": raise ValueError( "There exists a configuration named 'default'. To set a different configuration as default, " "rename the 'default' one first." ) else: _ = metadata_configs[default_config_name].pop("default") metadata_config_to_dump["default"] = True # push to the deprecated dataset_infos.json if repo_with_dataset_infos: dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_infos[config_name] = asdict(info_to_dump) buffer = BytesIO() buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) additions.append( CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) ) # push to README DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card additions.append( CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) commit_message = commit_message if commit_message is not None else "Upload dataset" if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: commit_info = api.create_commit( repo_id, operations=additions + deletions, commit_message=commit_message, commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) else: logger.info( f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits." ) num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) for i in range(0, num_commits): operations = additions[ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT ] + (deletions if i == 0 else []) commit_info = api.create_commit( repo_id, operations=operations, commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) logger.info( f"Commit #{i+1} completed" + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "") + "." ) return commit_info def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str): """Add column to Dataset. <Added version="1.7"/> Args: name (`str`): Column name. column (`list` or `np.array`): Column data to be added. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> more_text = ds["text"] >>> ds.add_column(name="text_2", column=more_text) Dataset({ features: ['text', 'label', 'text_2'], num_rows: 1066 }) ``` """ column_table = InMemoryTable.from_pydict({name: column}) _check_column_names(self._data.column_names + column_table.column_names) dataset = self.flatten_indices() if self._indices is not None else self # Concatenate tables horizontally table = concat_tables([dataset._data, column_table], axis=1) # Update features info = dataset.info.copy() info.features.update(Features.from_arrow_schema(column_table.schema)) table = update_metadata_with_features(table, info.features) return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint) def add_faiss_index( self, column: str, index_name: Optional[str] = None, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. By default the index is done over the vectors of the specified column. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) Args: column (`str`): The column of the vectors to add to the index. index_name (`str`, *optional*): The `index_name`/identifier of the index. This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`]. By default it corresponds to `column`. device (`Union[int, List[int]]`, *optional*): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (`str`, *optional*): This is passed to the index factory of Faiss to create the index. Default index class is `IndexFlat`. metric_type (`int`, *optional*): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (`faiss.Index`, *optional*): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (`int`): Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`. <Added version="2.4.0"/> train_size (`int`, *optional*): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to `False`): Enable the verbosity of the Faiss index. dtype (`data-type`): The dtype of the numpy arrays that are indexed. Default is `np.float32`. Example: ```python >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']})) >>> ds_with_embeddings.add_faiss_index(column='embeddings') >>> # query >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10) >>> # save index >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> # load index >>> ds.load_faiss_index('embeddings', 'my_index.faiss') >>> # query >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10) ``` """ with self.formatted_as(type="numpy", columns=[column], dtype=dtype): super().add_faiss_index( column=column, index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose, ) return self def add_faiss_index_from_external_arrays( self, external_arrays: np.array, index_name: str, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of `external_arrays`. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) Args: external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`. It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`. index_name (`str`): The `index_name`/identifier of the index. This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`]. device (Optional `Union[int, List[int]]`, *optional*): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (`str`, *optional*): This is passed to the index factory of Faiss to create the index. Default index class is `IndexFlat`. metric_type (`int`, *optional*): Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (`faiss.Index`, *optional*): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (`int`, *optional*): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. <Added version="2.4.0"/> train_size (`int`, *optional*): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. dtype (`numpy.dtype`): The dtype of the numpy arrays that are indexed. Default is np.float32. """ super().add_faiss_index_from_external_arrays( external_arrays=external_arrays.astype(dtype), index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose, ) def add_elasticsearch_index( self, column: str, index_name: Optional[str] = None, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821 es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): """Add a text index using ElasticSearch for fast retrieval. This is done in-place. Args: column (`str`): The column of the documents to add to the index. index_name (`str`, *optional*): The `index_name`/identifier of the index. This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`Dataset.search`]. By default it corresponds to `column`. host (`str`, *optional*, defaults to `localhost`): Host of where ElasticSearch is running. port (`str`, *optional*, defaults to `9200`): Port of where ElasticSearch is running. es_client (`elasticsearch.Elasticsearch`, *optional*): The elasticsearch client used to create the index if host and port are `None`. es_index_name (`str`, *optional*): The elasticsearch index name used to create the index. es_index_config (`dict`, *optional*): The configuration of the elasticsearch index. Default config is: ``` { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } ``` Example: ```python >>> es_client = elasticsearch.Elasticsearch() >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index") >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10) ``` """ with self.formatted_as(type=None, columns=[column]): super().add_elasticsearch_index( column=column, index_name=index_name, host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config, ) return self def add_item(self, item: dict, new_fingerprint: str): """Add item to Dataset. <Added version="1.7"/> Args: item (`dict`): Item data to be added. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} >>> ds = ds.add_item(new_review) >>> ds[-1] {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} ``` """ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()}) # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe" dset_features, item_features = _align_features( [self._info.features, Features.from_arrow_schema(item_table.schema)] ) # Cast to align the schemas of the tables and concatenate the tables table = concat_tables( [ self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data, item_table.cast(item_features.arrow_schema), ] ) if self._indices is None: indices_table = None else: item_indices_array = pa.array([len(self._data)], type=pa.uint64()) item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"]) indices_table = concat_tables([self._indices, item_indices_table]) info = self.info.copy() info.features.update(item_features) table = update_metadata_with_features(table, info.features) return Dataset( table, info=info, split=self.split, indices_table=indices_table, fingerprint=new_fingerprint, ) def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset": """Align the dataset's label ID and label name mapping to match an input `label2id` mapping. This is useful when you want to ensure that a model's predicted labels are aligned with the dataset. The alignment in done using the lowercase label names. Args: label2id (`dict`): The label name to ID mapping to align the dataset with. label_column (`str`): The column name of labels to align on. Example: ```python >>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2} >>> ds = load_dataset("glue", "mnli", split="train") >>> # mapping to align with >>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2} >>> ds_aligned = ds.align_labels_with_mapping(label2id, "label") ``` """ # Sanity checks if label_column not in self._data.column_names: raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).") label_feature = self._info.features[label_column] if not ( isinstance(label_feature, ClassLabel) or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel)) ): raise ValueError( f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}." ) # Sort input mapping by ID value to ensure the label names are aligned label2id = dict(sorted(label2id.items(), key=lambda item: item[1])) label_names = list(label2id.keys()) # Some label mappings use uppercase label names so we lowercase them during alignment label2id = {k.lower(): v for k, v in label2id.items()} int2str_function = ( label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str ) if isinstance(label_feature, ClassLabel): def process_label_ids(batch): dset_label_names = [ int2str_function(label_id).lower() if label_id is not None else None for label_id in batch[label_column] ] batch[label_column] = [ label2id[label_name] if label_name is not None else None for label_name in dset_label_names ] return batch else: def process_label_ids(batch): dset_label_names = [ [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq] for seq in batch[label_column] ] batch[label_column] = [ [label2id[label_name] if label_name is not None else None for label_name in seq] for seq in dset_label_names ] return batch features = self.features features[label_column] = ( ClassLabel(num_classes=len(label_names), names=label_names) if isinstance(label_feature, ClassLabel) else Sequence(ClassLabel(num_classes=len(label_names), names=label_names)) ) return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels") def _interleave_map_style_datasets( datasets: List["Dataset"], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", **kwargs, ) -> "Dataset": """ Interleave several map-style datasets (sources) into a single map-style dataset. The new dataset is constructed by alternating between the sources to get the examples. If `probabilities = None` (default) the new dataset is constructed by cycling between each source to get the examples. If `probabilities` is not `None, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. Args: datasets (`List[Dataset]`): list of datasets to interleave probabilities (`List[float]`, optional, default None): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, optional, default None): The random seed used to choose a source for each example. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. **kwargs (additional keyword arguments): Keyword arguments to be passed to :meth:`datasets.Datasets.select` when selecting the indices used to interleave the datasets. Output: :class:`datasets.Dataset` """ if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError( f"{stopping_strategy} stopping strategy in `interleave_datasets` is not implemented yet with a list of {type(datasets[0])}" ) # To interleave the datasets, we concatenate them and then we re-order the indices concatenated_datasets = _concatenate_map_style_datasets(datasets, info=info, split=split) # Let's now build the indices to pass to .select() lengths = [len(dset) for dset in datasets] offsets = np.cumsum([0] + lengths[:-1]) # if stopping_strategy is "first_exhausted", it is an undersampling situation whereas it is an oversampling situation if it is "all_exhausted" oversampling = stopping_strategy == "all_exhausted" if probabilities is None and not oversampling: # Undersampling situation with cycling between each sources # Example:: If lengths of the datasets are [3, 4, 5] # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 6, 9] # Note that we only have 3 examples per dataset since the first dataset ran out of examples # Reasoning behind the following operation: keeping the min_length first indices of each dataset # while offsetting in order to correspond to the right indices of the concatenated dataset # and flattening to effectively interleave the datasets indices = (offsets.reshape(1, -1) + np.arange(min(lengths)).reshape(-1, 1)).flatten().tolist() elif probabilities is None: # Oversampling situation with cycling between each sources # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 5, 9, 0, 6, 10, 1, 3, 11] # Note that we have 5 examples per dataset with a rolling window since the longest dataset has 5 samples # Reasoning behind the following operation: for each dataset indices (i.e column) repeat the indices to have max_length indices per dataset # For example, if the max_length is 5 and the i-th dataset has 3 samples, the i-th column will be [0,1,2,0,1] indices = np.mod(np.arange(max(lengths)).reshape(-1, 1), np.array(lengths).reshape(1, -1)) # We have to keep the indices to their respective dataset offsets and to flatten to effectively interleave the datasets indices = (indices + offsets).flatten().tolist() else: # boolean array indicating if at index i if the dataset_i has been fully exhausted is_exhausted = np.full(len(lengths), False) # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once bool_strategy_func = np.all if oversampling else np.any def iter_random_indices(): """Get an infinite iterator that randomly samples the index of the source to pick examples from.""" rng = np.random.default_rng(seed) while True: yield from (int(i) for i in rng.choice(len(datasets), size=1000, p=probabilities)) current_index = [0] * len(datasets) indices = [] for source_idx in iter_random_indices(): # If no oversampling, we stop as soon as a dataset has ran out of examples (np.any) # Otherwise, we stop as soon as every dataset has ran out of examples (np.all) if bool_strategy_func(is_exhausted): # the stopping condition was reached, let's stop break # let's add the example at the current index of the `source_idx`-th dataset indices.append(current_index[source_idx] + offsets[source_idx]) current_index[source_idx] += 1 # we've ran out of examples for the current dataset, let's update our boolean array and bring the current_index back to 0 if current_index[source_idx] >= lengths[source_idx]: is_exhausted[source_idx] = True current_index[source_idx] = 0 return concatenated_datasets.select(indices, **kwargs) class DatasetDict(dict): """A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)""" def _check_values_type(self): for dataset in self.values(): if not isinstance(dataset, Dataset): raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'") def _check_values_features(self): items = list(self.items()) for item_a, item_b in zip(items[:-1], items[1:]): if item_a[1].features != item_b[1].features: raise ValueError( f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}" ) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables for dataset in self.values(): if hasattr(dataset, "_data"): del dataset._data if hasattr(dataset, "_indices"): del dataset._indices def __getitem__(self, k) -> Dataset: if isinstance(k, (str, NamedSplit)) or len(self) == 0: return super().__getitem__(k) else: available_suggested_splits = [ split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self ] suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0] raise KeyError( f"Invalid key: {k}. Please first select a split. For example: " f"`my_dataset_dictionary['{suggested_split}'][{k}]`. " f"Available splits: {sorted(self)}" ) def data(self) -> Dict[str, Table]: """The Apache Arrow tables backing each split. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.data ``` """ self._check_values_type() return {k: dataset.data for k, dataset in self.items()} def cache_files(self) -> Dict[str, Dict]: """The cache files containing the Apache Arrow table backing each split. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.cache_files {'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}], 'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}], 'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]} ``` """ self._check_values_type() return {k: dataset.cache_files for k, dataset in self.items()} def num_columns(self) -> Dict[str, int]: """Number of columns in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.num_columns {'test': 2, 'train': 2, 'validation': 2} ``` """ self._check_values_type() return {k: dataset.num_columns for k, dataset in self.items()} def num_rows(self) -> Dict[str, int]: """Number of rows in each split of the dataset (same as :func:`datasets.Dataset.__len__`). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.num_rows {'test': 1066, 'train': 8530, 'validation': 1066} ``` """ self._check_values_type() return {k: dataset.num_rows for k, dataset in self.items()} def column_names(self) -> Dict[str, List[str]]: """Names of the columns in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.column_names {'test': ['text', 'label'], 'train': ['text', 'label'], 'validation': ['text', 'label']} ``` """ self._check_values_type() return {k: dataset.column_names for k, dataset in self.items()} def shape(self) -> Dict[str, Tuple[int]]: """Shape of each split of the dataset (number of columns, number of rows). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.shape {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)} ``` """ self._check_values_type() return {k: dataset.shape for k, dataset in self.items()} def flatten(self, max_depth=16) -> "DatasetDict": """Flatten the Apache Arrow Table of each split (nested features are flatten). Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad") >>> ds["train"].features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() DatasetDict({ train: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) validation: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 10570 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()}) def unique(self, column: str) -> Dict[str, List]: """Return a list of the unique elements in a column for each split. This is implemented in the low-level backend and as such, very fast. Args: column (`str`): column name (list all the column names with [`~datasets.Dataset.column_names`]) Returns: Dict[`str`, `list`]: Dictionary of unique elements in the given column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.unique("label") {'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]} ``` """ self._check_values_type() return {k: dataset.unique(column) for k, dataset in self.items()} def cleanup_cache_files(self) -> Dict[str, int]: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files. Return: `Dict` with the number of removed files for each split Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.cleanup_cache_files() {'test': 0, 'train': 0, 'validation': 0} ``` """ self._check_values_type() return {k: dataset.cleanup_cache_files() for k, dataset in self.items()} def __repr__(self): repr = "\n".join([f"{k}: {v}" for k, v in self.items()]) repr = re.sub(r"^", " " * 4, repr, 0, re.M) return f"DatasetDict({{\n{repr}\n}})" def cast(self, features: Features) -> "DatasetDict": """ Cast the dataset to a new set of features. The transformation is applied to all the datasets of the dataset dictionary. You can also remove a column using [`Dataset.map`] with `feature` but `cast` is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: features ([`Features`]): New features to cast the dataset to. The name and order of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds["train"].features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ self._check_values_type() return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) def cast_column(self, column: str, feature) -> "DatasetDict": """Cast column to feature for decoding. Args: column (`str`): Column name. feature ([`Feature`]): Target feature. Returns: [`DatasetDict`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ self._check_values_type() return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}) def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict": """ Remove one or several column(s) from each split in the dataset and the features associated to the column(s). The transformation is applied to all the splits of the dataset dictionary. You can also remove a column using [`Dataset.map`] with `remove_columns` but the present method is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.remove_columns("label") DatasetDict({ train: Dataset({ features: ['text'], num_rows: 8530 }) validation: Dataset({ features: ['text'], num_rows: 1066 }) test: Dataset({ features: ['text'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()}) def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict": """ Rename a column in the dataset and move the features associated to the original column under the new column name. The transformation is applied to all the datasets of the dataset dictionary. You can also rename a column using [`~Dataset.map`] with `remove_columns` but the present method: - takes care of moving the original features under the new column name. - doesn't copy the data to a new dataset and is thus much faster. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.rename_column("label", "label_new") DatasetDict({ train: Dataset({ features: ['text', 'label_new'], num_rows: 8530 }) validation: Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) test: Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict( { k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) for k, dataset in self.items() } ) def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. The transformation is applied to all the datasets of the dataset dictionary. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names. Returns: [`DatasetDict`]: A copy of the dataset with renamed columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) DatasetDict({ train: Dataset({ features: ['text_new', 'label_new'], num_rows: 8530 }) validation: Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) test: Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}) def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict": """Select one or several column(s) from each split in the dataset and the features associated to the column(s). The transformation is applied to all the splits of the dataset dictionary. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.select_columns("text") DatasetDict({ train: Dataset({ features: ['text'], num_rows: 8530 }) validation: Dataset({ features: ['text'], num_rows: 1066 }) test: Dataset({ features: ['text'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()}) def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict": """Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables. Args: column (`str`): The name of the column to cast. include_nulls (`bool`, defaults to `False`): Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. <Added version="1.14.2"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("boolq") >>> ds["train"].features {'answer': Value(dtype='bool', id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} >>> ds = ds.class_encode_column("answer") >>> ds["train"].features {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} ``` """ self._check_values_type() return DatasetDict( {k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()} ) def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set `__getitem__` return format (type and columns). The transformation is applied to all the datasets of the dataset dictionary. Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to False): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ self._check_values_type() old_format_type = {k: dataset._format_type for k, dataset in self.items()} old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()} old_format_columns = {k: dataset._format_columns for k, dataset in self.items()} old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()} try: self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: for k, dataset in self.items(): dataset.set_format( old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k] ) def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The format is set for every dataset in the dataset dictionary. Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to False): Keep un-formatted columns as well in the output (as python objects), **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted: `new formatted columns = (all columns - previously unformatted columns)` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True) >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds["train"].format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} ``` """ self._check_values_type() for dataset in self.values(): dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) def reset_format(self): """Reset `__getitem__` return format to python objects and all columns. The transformation is applied to all the datasets of the dataset dictionary. Same as `self.set_format()` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True) >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds["train"].format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} >>> ds.reset_format() >>> ds["train"].format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} ``` """ self._check_values_type() for dataset in self.values(): dataset.set_format() def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called. The transform is set for every dataset in the dataset dictionary As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format` Args: transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format` A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in ``__getitem__``. columns (`List[str]`, optional): columns to format in the output If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects) If set to True, then the other un-formatted columns are kept with the output of the transform. """ self._check_values_type() for dataset in self.values(): dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ) -> "DatasetDict": """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. The format is set for every dataset in the dataset dictionary. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects. Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds["train"].format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds["train"].format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'tensorflow'} ``` """ dataset = copy.deepcopy(self) dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) return dataset def with_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ) -> "DatasetDict": """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. The transform is set for every dataset in the dataset dictionary As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects. Args: transform (`Callable`, *optional*): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, *optional*): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to False): Keep un-formatted columns as well in the output (as python objects). If set to `True`, then the other un-formatted columns are kept with the output of the transform. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> def encode(example): ... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt") >>> ds = ds.with_transform(encode) >>> ds["train"][0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432, 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119, 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190, 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117, 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137, 188, 1566, 7912, 14516, 6997, 119, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ dataset = copy.deepcopy(self) dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) return dataset def map( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, desc: Optional[str] = None, ) -> "DatasetDict": """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does updated examples). The transformation is applied to all the datasets of the dataset dictionary. Args: function (`callable`): with one of the following signature: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`, `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`[datasets.Features]`, *optional*, defaults to `None`): Use a specific [`Features`] to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function` num_proc (`int`, *optional*, defaults to `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> ds["train"][0:3]["text"] ['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'Review: effective but too-tepid biopic'] # process a batch of examples >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) # set number of processors >>> ds = ds.map(add_prefix, num_proc=4) ``` """ self._check_values_type() if cache_file_names is None: cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.map( function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, fn_kwargs=fn_kwargs, num_proc=num_proc, desc=desc, ) for k, dataset in self.items() } ) def filter( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, desc: Optional[str] = None, ) -> "DatasetDict": """Apply a filter function to all the elements in the table in batches and update the table so that the dataset only includes examples according to the filter function. The transformation is applied to all the datasets of the dataset dictionary. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, defaults to an always `True` function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if chaching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function` num_proc (`int`, *optional*, defaults to `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while filtering examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.filter(lambda x: x["label"] == 1) DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 4265 }) validation: Dataset({ features: ['text', 'label'], num_rows: 533 }) test: Dataset({ features: ['text', 'label'], num_rows: 533 }) }) ``` """ self._check_values_type() if cache_file_names is None: cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.filter( function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, desc=desc, ) for k, dataset in self.items() } ) def flatten_indices( self, keep_in_memory: bool = False, cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, num_proc: Optional[int] = None, new_fingerprint: Optional[str] = None, ) -> "DatasetDict": """Create and cache a new Dataset by flattening the indices mapping. Args: keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_names (`Dict[str, str]`, *optional*, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific [`Features`] to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Allow null values in the table. num_proc (`int`, optional, default `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ self._check_values_type() if cache_file_names is None: cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.flatten_indices( keep_in_memory=keep_in_memory, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, num_proc=num_proc, new_fingerprint=new_fingerprint, ) for k, dataset in self.items() } ) def sort( self, column_names: Union[str, Sequence[str]], reverse: Union[bool, Sequence[bool]] = False, kind="deprecated", null_placement: str = "at_end", keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, ) -> "DatasetDict": """Create a new dataset sorted according to a single or multiple columns. Args: column_names (`Union[str, Sequence[str]]`): Column name(s) to sort by. reverse (`Union[bool, Sequence[bool]]`, defaults to `False`): If `True`, sort by descending order rather than ascending. If a single bool is provided, the value is applied to the sorting of all column names. Otherwise a list of bools with the same length and order as column_names must be provided. kind (`str`, *optional*): Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`, The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general, the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility. <Deprecated version="2.8.0"> `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0. </Deprecated> null_placement (`str`, defaults to `at_end`): Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last` keep_in_memory (`bool`, defaults to `False`): Keep the sorted indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the sorted indices can be identified, use it instead of recomputing. indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. Higher value gives smaller cache files, lower value consume less temporary memory. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes') >>> ds['train']['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] >>> sorted_ds = ds.sort('label') >>> sorted_ds['train']['label'][:10] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False]) >>> another_sorted_ds['train']['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` """ self._check_values_type() if indices_cache_file_names is None: indices_cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.sort( column_names=column_names, reverse=reverse, kind=kind, null_placement=null_placement, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, indices_cache_file_name=indices_cache_file_names[k], writer_batch_size=writer_batch_size, ) for k, dataset in self.items() } ) def shuffle( self, seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None, seed: Optional[int] = None, generators: Optional[Dict[str, np.random.Generator]] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, ) -> "DatasetDict": """Create a new Dataset where the rows are shuffled. The transformation is applied to all the datasets of the dataset dictionary. Currently shuffling uses numpy random generators. You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). Args: seeds (`Dict[str, int]` or `int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. If `None`, then fresh, unpredictable entropy will be pulled from the OS. If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. You can provide one `seed` per dataset in the dataset dictionary. seed (`int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided). generators (`Dict[str, *optional*, np.random.Generator]`): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). You have to provide one `generator` per dataset in the dataset dictionary. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. indices_cache_file_names (`Dict[str, str]`, *optional*): Provide the name of a path for the cache file. It is used to store the indices mappings instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds["train"]["label"][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # set a seed >>> shuffled_ds = ds.shuffle(seed=42) >>> shuffled_ds["train"]["label"][:10] [0, 1, 0, 1, 0, 0, 0, 0, 0, 0] ``` """ self._check_values_type() if seed is not None and seeds is not None: raise ValueError("Please specify seed or seeds, but not both") seeds = seed if seed is not None else seeds if seeds is None: seeds = {k: None for k in self} elif not isinstance(seeds, dict): seeds = {k: seeds for k in self} if generators is None: generators = {k: None for k in self} if indices_cache_file_names is None: indices_cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.shuffle( seed=seeds[k], generator=generators[k], keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, indices_cache_file_name=indices_cache_file_names[k], writer_batch_size=writer_batch_size, ) for k, dataset in self.items() } ) def save_to_disk( self, dataset_dict_path: PathLike, fs="deprecated", max_shard_size: Optional[Union[str, int]] = None, num_shards: Optional[Dict[str, int]] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, ): """ Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`. For [`Image`] and [`Audio`] data: All the Image() and Audio() data are stored in the arrow files. If you want to store paths or urls, please use the Value("string") type. Args: dataset_dict_path (`str`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be saved to. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"50MB"`). num_shards (`Dict[str, int]`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. You need to provide the number of shards for each dataset in the dataset dictionary. Use a dictionary to define a different num_shards for each split. <Added version="2.8.0"/> num_proc (`int`, *optional*, default `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.8.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Example: ```python >>> dataset_dict.save_to_disk("path/to/dataset/directory") >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8}) ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, _ = url_to_fs(dataset_dict_path, **(storage_options or {})) if num_shards is None: num_shards = {k: None for k in self} elif not isinstance(num_shards, dict): raise ValueError( "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}" ) fs.makedirs(dataset_dict_path, exist_ok=True) with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f: json.dump({"splits": list(self)}, f) for k, dataset in self.items(): dataset.save_to_disk( posixpath.join(dataset_dict_path, k), num_shards=num_shards.get(k), max_shard_size=max_shard_size, num_proc=num_proc, storage_options=storage_options, ) def load_from_disk( dataset_dict_path: PathLike, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None, ) -> "DatasetDict": """ Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`. Args: dataset_dict_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) of the dataset dict directory where the dataset dict will be loaded from. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Returns: [`DatasetDict`] Example: ```py >>> ds = load_from_disk('path/to/dataset/directory') ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, dataset_dict_path = url_to_fs(dataset_dict_path, **(storage_options or {})) dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME) dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME) if not fs.isfile(dataset_dict_json_path): if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path): raise FileNotFoundError( f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`." ) with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f: splits = json.load(f)["splits"] dataset_dict = DatasetDict() for k in splits: dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k) dataset_dict[k] = Dataset.load_from_disk( dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options ) return dataset_dict def from_csv( path_or_paths: Dict[str, PathLike], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ) -> "DatasetDict": """Create [`DatasetDict`] from CSV file(s). Args: path_or_paths (`dict` of path-like): Path(s) of the CSV file(s). features ([`Features`], *optional*): Dataset features. cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`pandas.read_csv`]. Returns: [`DatasetDict`] Example: ```py >>> from datasets import DatasetDict >>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'}) ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetReader return CsvDatasetReader( path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() def from_json( path_or_paths: Dict[str, PathLike], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ) -> "DatasetDict": """Create [`DatasetDict`] from JSON Lines file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the JSON Lines file(s). features ([`Features`], *optional*): Dataset features. cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`JsonConfig`]. Returns: [`DatasetDict`] Example: ```py >>> from datasets import DatasetDict >>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'}) ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetReader return JsonDatasetReader( path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() def from_parquet( path_or_paths: Dict[str, PathLike], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, columns: Optional[List[str]] = None, **kwargs, ) -> "DatasetDict": """Create [`DatasetDict`] from Parquet file(s). Args: path_or_paths (`dict` of path-like): Path(s) of the CSV file(s). features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. columns (`List[str]`, *optional*): If not `None`, only these columns will be read from the file. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e'. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`ParquetConfig`]. Returns: [`DatasetDict`] Example: ```py >>> from datasets import DatasetDict >>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'}) ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetReader return ParquetDatasetReader( path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, **kwargs, ).read() def from_text( path_or_paths: Dict[str, PathLike], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ) -> "DatasetDict": """Create [`DatasetDict`] from text file(s). Args: path_or_paths (`dict` of path-like): Path(s) of the text file(s). features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`TextConfig`]. Returns: [`DatasetDict`] Example: ```py >>> from datasets import DatasetDict >>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'}) ``` """ # Dynamic import to avoid circular dependency from .io.text import TextDatasetReader return TextDatasetReader( path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "DatasetDict": self._check_values_type() return DatasetDict({k: dataset.prepare_for_task(task=task, id=id) for k, dataset in self.items()}) def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict": self._check_values_type() return DatasetDict( { k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column) for k, dataset in self.items() } ) def push_to_hub( self, repo_id, config_name: str = "default", set_default: Optional[bool] = None, data_dir: Optional[str] = None, commit_message: Optional[str] = None, commit_description: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, revision: Optional[str] = None, branch="deprecated", create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[Dict[str, int]] = None, embed_external_files: bool = True, ) -> CommitInfo: """Pushes the [`DatasetDict`] to the hub as a Parquet dataset. The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed. Each dataset split will be pushed independently. The pushed dataset will keep the original split names. The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`] data, the Parquet files will store the bytes of your images or audio files. You can disable this by setting `embed_external_files` to False. Args: repo_id (`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. config_name (`str`): Configuration name of a dataset. Defaults to "default". set_default (`bool`, *optional*): Whether to set this configuration as the default one. Otherwise, the default configuration is the one named "default". data_dir (`str`, *optional*): Directory name that will contain the uploaded data files. Defaults to the `config_name` if different from "default", else "data". <Added version="2.17.0"/> commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload dataset"`. commit_description (`str`, *optional*): Description of the commit that will be created. Additionally, description of the PR if a PR is created (`create_pr` is True). <Added version="2.16.0"/> private (`bool`, *optional*): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (`str`, *optional*): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with `huggingface-cli login`. Will raise an error if no token is passed and the user is not logged-in. revision (`str`, *optional*): Branch to push the uploaded files to. Defaults to the `"main"` branch. <Added version="2.15.0"/> branch (`str`, *optional*): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. <Deprecated version="2.15.0"> `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0. </Deprecated> create_pr (`bool`, *optional*, defaults to `False`): Whether to create a PR with the uploaded files or directly commit. <Added version="2.15.0"/> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"500MB"` or `"1GB"`). num_shards (`Dict[str, int]`, *optional*): Number of shards to write. By default, the number of shards depends on `max_shard_size`. Use a dictionary to define a different num_shards for each split. <Added version="2.8.0"/> embed_external_files (`bool`, defaults to `True`): Whether to embed file bytes in the shards. In particular, this will do the following before the push for the fields of type: - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files. Return: huggingface_hub.CommitInfo Example: ```python >>> dataset_dict.push_to_hub("<organization>/<dataset_id>") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True) >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8}) ``` If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): ```python >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en") >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr") >>> # later >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en") >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr") ``` """ if num_shards is None: num_shards = {k: None for k in self} elif not isinstance(num_shards, dict): raise ValueError( "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}" ) if branch != "deprecated": warnings.warn( "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'revision={branch}' instead.", FutureWarning, ) revision = branch self._check_values_type() self._check_values_features() total_uploaded_size = 0 total_dataset_nbytes = 0 info_to_dump: DatasetInfo = next(iter(self.values())).info.copy() info_to_dump.config_name = config_name info_to_dump.splits = SplitDict() for split in self.keys(): if not re.match(_split_re, split): raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") api = HfApi(endpoint=config.HF_ENDPOINT, token=token) repo_url = api.create_repo( repo_id, token=token, repo_type="dataset", private=private, exist_ok=True, ) repo_id = repo_url.repo_id if revision is not None: api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) if not data_dir: data_dir = config_name if config_name != "default" else "data" # for backward compatibility additions = [] for split in self.keys(): logger.info(f"Pushing split {split} to the Hub.") # The split=key needs to be removed before merging split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub( repo_id, data_dir=data_dir, split=split, token=token, revision=revision, create_pr=create_pr, max_shard_size=max_shard_size, num_shards=num_shards.get(split), embed_external_files=embed_external_files, ) additions += split_additions total_uploaded_size += uploaded_size total_dataset_nbytes += dataset_nbytes info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split])) info_to_dump.download_checksums = None info_to_dump.download_size = total_uploaded_size info_to_dump.dataset_size = total_dataset_nbytes info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) # and delete old split shards (if they exist) repo_with_dataset_card, repo_with_dataset_infos = False, False repo_splits = [] # use a list to keep the order of the splits deletions = [] repo_files_to_add = [addition.path_in_repo for addition in additions] for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token): if repo_file.rfilename == config.REPOCARD_FILENAME: repo_with_dataset_card = True elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: repo_with_dataset_infos = True elif ( repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys())) and repo_file.rfilename not in repo_files_to_add ): deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) elif fnmatch.fnmatch( repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*") ): repo_split = string_to_dict( repo_file.rfilename, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED), )["split"] if repo_split not in repo_splits: repo_splits.append(split) # get the info from the README to update them if repo_with_dataset_card: dataset_card_path = api.hf_hub_download( repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision ) dataset_card = DatasetCard.load(Path(dataset_card_path)) dataset_card_data = dataset_card.data metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) # get the deprecated dataset_infos.json to update them elif repo_with_dataset_infos: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() else: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed if not metadata_configs and repo_splits: default_metadata_configs_to_dump = { "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits] } MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) metadata_config_to_dump = { "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()], } if set_default and config_name != "default": if metadata_configs: default_config_name = metadata_configs.get_default_config_name() if default_config_name == "default": raise ValueError( "There exists a configuration named 'default'. To set a different configuration as default, " "rename the 'default' one first." ) else: _ = metadata_configs[default_config_name].pop("default") metadata_config_to_dump["default"] = True # push to the deprecated dataset_infos.json if repo_with_dataset_infos: dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_infos[config_name] = asdict(info_to_dump) buffer = BytesIO() buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) additions.append( CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) ) # push to README DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card additions.append( CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) commit_message = commit_message if commit_message is not None else "Upload dataset" if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: commit_info = api.create_commit( repo_id, operations=additions + deletions, commit_message=commit_message, commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) else: logger.info( f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits." ) num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) for i in range(0, num_commits): operations = additions[ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT ] + (deletions if i == 0 else []) commit_info = api.create_commit( repo_id, operations=operations, commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) logger.info( f"Commit #{i+1} completed" + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "") + "." ) return commit_info class IterableDatasetDict(dict): def __repr__(self): repr = "\n".join([f"{k}: {v}" for k, v in self.items()]) repr = re.sub(r"^", " " * 4, repr, 0, re.M) return f"IterableDatasetDict({{\n{repr}\n}})" def with_format( self, type: Optional[str] = None, ) -> "IterableDatasetDict": """ Return a dataset with the specified format. This method only supports the "torch" format for now. The format is set to all the datasets of the dataset dictionary. Args: type (`str`, *optional*, defaults to `None`): If set to "torch", the returned dataset will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") >>> def encode(example): ... return tokenizer(examples["text"], truncation=True, padding="max_length") >>> ds = ds.map(encode, batched=True, remove_columns=["text"]) >>> ds = ds.with_format("torch") ``` """ return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()}) def map( self, function: Optional[Callable] = None, with_indices: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: int = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, fn_kwargs: Optional[dict] = None, ) -> "IterableDatasetDict": """ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. If your function returns a column that already exists, then it overwrites it. The function is applied on-the-fly on the examples when iterating over the dataset. The transformation is applied to all the datasets of the dataset dictionary. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`. - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`, *optional*, defaults to `None`): Function applied on-the-fly on the examples when you iterate on the dataset. It must have one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the `batch_size` should be dropped instead of being processed by the function. remove_columns (`[List[str]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function` Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> next(iter(ds["train"])) {'label': 1, 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return IterableDatasetDict( { k: dataset.map( function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs, ) for k, dataset in self.items() } ) def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, ) -> "IterableDatasetDict": """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. The filtering is done on-the-fly when iterating over the dataset. The filtering is applied to all the datasets of the dataset dictionary. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` If no function is provided, defaults to an always True function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function` Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.filter(lambda x: x["label"] == 0) >>> list(ds["train"].take(3)) [{'label': 0, 'text': 'Review: simplistic , silly and tedious .'}, {'label': 0, 'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, {'label': 0, 'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] ``` """ return IterableDatasetDict( { k: dataset.filter( function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, ) for k, dataset in self.items() } ) def shuffle( self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 ) -> "IterableDatasetDict": """ Randomly shuffles the elements of this dataset. The shuffling is applied to all the datasets of the dataset dictionary. This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will initially select a random element from only the first 1000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1000 element buffer. If the dataset is made of several shards, it also does `shuffle` the order of the shards. However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] then the order of the shards is kept unchanged. Args: seed (`int`, *optional*, defaults to `None`): Random seed that will be used to shuffle the dataset. It is used to sample from the shuffle buffer and also to shuffle the data shards. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). buffer_size (`int`, defaults to `1000`): Size of the buffer. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> list(ds["train"].take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> ds = ds.shuffle(seed=42) >>> list(ds["train"].take(3)) [{'label': 1, 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, {'label': 1, 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, {'label': 1, 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] ``` """ return IterableDatasetDict( { k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size) for k, dataset in self.items() } ) def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. The renaming is applied to all the datasets of the dataset dictionary. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Returns: [`IterableDatasetDict`]: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.rename_column("text", "movie_review") >>> next(iter(ds["train"])) {'label': 1, 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return IterableDatasetDict( { k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) for k, dataset in self.items() } ) def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. The renaming is applied to all the datasets of the dataset dictionary. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names. Returns: [`IterableDatasetDict`]: A copy of the dataset with renamed columns Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"}) >>> next(iter(ds["train"])) {'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'rating': 1} ``` """ return IterableDatasetDict( {k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()} ) def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict": """ Remove one or several column(s) in the dataset and the features associated to them. The removal is done on-the-fly on the examples when iterating over the dataset. The removal is applied to all the datasets of the dataset dictionary. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: [`IterableDatasetDict`]: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.remove_columns("label") >>> next(iter(ds["train"])) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()}) def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict": """Select one or several column(s) in the dataset and the features associated to them. The selection is done on-the-fly on the examples when iterating over the dataset. The selection is applied to all the datasets of the dataset dictionary. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. Returns: [`IterableDatasetDict`]: A copy of the dataset object with only selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.select("text") >>> next(iter(ds["train"])) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()}) def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict": """Cast column to feature for decoding. The type casting is applied to all the datasets of the dataset dictionary. Args: column (`str`): Column name. feature ([`Feature`]): Target feature. Returns: [`IterableDatasetDict`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return IterableDatasetDict( {k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()} ) def cast( self, features: Features, ) -> "IterableDatasetDict": """ Cast the dataset to a new set of features. The type casting is applied to all the datasets of the dataset dictionary. Args: features (`Features`): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset. Returns: [`IterableDatasetDict`]: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds["train"].features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) class DatasetInfo: """Information about a dataset. `DatasetInfo` documents datasets, including its name, version, and features. See the constructor arguments and properties for a full list. Not all fields are known on construction and may be updated later. Attributes: description (`str`): A description of the dataset. citation (`str`): A BibTeX citation of the dataset. homepage (`str`): A URL to the official homepage for the dataset. license (`str`): The dataset's license. It can be the name of the license or a paragraph containing the terms of the license. features ([`Features`], *optional*): The features used to specify the dataset's column types. post_processed (`PostProcessedInfo`, *optional*): Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index. supervised_keys (`SupervisedKeysData`, *optional*): Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS). builder_name (`str`, *optional*): The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name. config_name (`str`, *optional*): The name of the configuration derived from [`BuilderConfig`]. version (`str` or [`Version`], *optional*): The version of the dataset. splits (`dict`, *optional*): The mapping between split name and metadata. download_checksums (`dict`, *optional*): The mapping between the URL to download the dataset's checksums and corresponding metadata. download_size (`int`, *optional*): The size of the files to download to generate the dataset, in bytes. post_processing_size (`int`, *optional*): Size of the dataset in bytes after post-processing, if any. dataset_size (`int`, *optional*): The combined size in bytes of the Arrow tables for all splits. size_in_bytes (`int`, *optional*): The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files). task_templates (`List[TaskTemplate]`, *optional*): The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`. **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`]. """ # Set in the dataset scripts description: str = dataclasses.field(default_factory=str) citation: str = dataclasses.field(default_factory=str) homepage: str = dataclasses.field(default_factory=str) license: str = dataclasses.field(default_factory=str) features: Optional[Features] = None post_processed: Optional[PostProcessedInfo] = None supervised_keys: Optional[SupervisedKeysData] = None task_templates: Optional[List[TaskTemplate]] = None # Set later by the builder builder_name: Optional[str] = None dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name config_name: Optional[str] = None version: Optional[Union[str, Version]] = None # Set later by `download_and_prepare` splits: Optional[dict] = None download_checksums: Optional[dict] = None download_size: Optional[int] = None post_processing_size: Optional[int] = None dataset_size: Optional[int] = None size_in_bytes: Optional[int] = None _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [ "config_name", "download_size", "dataset_size", "features", "splits", ] def __post_init__(self): # Convert back to the correct classes when we reload from dict if self.features is not None and not isinstance(self.features, Features): self.features = Features.from_dict(self.features) if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo): self.post_processed = PostProcessedInfo.from_dict(self.post_processed) if self.version is not None and not isinstance(self.version, Version): if isinstance(self.version, str): self.version = Version(self.version) else: self.version = Version.from_dict(self.version) if self.splits is not None and not isinstance(self.splits, SplitDict): self.splits = SplitDict.from_split_dict(self.splits) if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData): if isinstance(self.supervised_keys, (tuple, list)): self.supervised_keys = SupervisedKeysData(*self.supervised_keys) else: self.supervised_keys = SupervisedKeysData(**self.supervised_keys) # Parse and make a list of templates if self.task_templates is not None: if isinstance(self.task_templates, (list, tuple)): templates = [ template if isinstance(template, TaskTemplate) else task_template_from_dict(template) for template in self.task_templates ] self.task_templates = [template for template in templates if template is not None] elif isinstance(self.task_templates, TaskTemplate): self.task_templates = [self.task_templates] else: template = task_template_from_dict(self.task_templates) self.task_templates = [template] if template is not None else [] # Align task templates with features if self.task_templates is not None: self.task_templates = list(self.task_templates) if self.features is not None: self.task_templates = [ template.align_with_features(self.features) for template in (self.task_templates) ] def write_to_directory( self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None ): """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`. Args: dataset_info_dir (`str`): Destination directory. pretty_print (`bool`, defaults to `False`): If `True`, the JSON will be pretty-printed with the indent level of 4. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem used to download the files from. <Deprecated version="2.9.0"> `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`. </Deprecated> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.info.write_to_directory("/path/to/directory/") ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f: self._dump_info(f, pretty_print=pretty_print) if self.license: with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f: self._dump_license(f) def _dump_info(self, file, pretty_print=False): """Dump info in `file` file-like object open in bytes mode (to support remote files)""" file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8")) def _dump_license(self, file): """Dump license in `file` file-like object open in bytes mode (to support remote files)""" file.write(self.license.encode("utf-8")) def from_merge(cls, dataset_infos: List["DatasetInfo"]): dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None] if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos): # if all dataset_infos are equal we don't need to merge. Just return the first. return dataset_infos[0] description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip() citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip() homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip() license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip() features = None supervised_keys = None task_templates = None # Find common task templates across all dataset infos all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None] if len(all_task_templates) > 1: task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:])) elif len(all_task_templates): task_templates = list(set(all_task_templates[0])) # If no common task templates found, replace empty list with None task_templates = task_templates if task_templates else None return cls( description=description, citation=citation, homepage=homepage, license=license, features=features, supervised_keys=supervised_keys, task_templates=task_templates, ) def from_directory( cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None ) -> "DatasetInfo": """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`. This function updates all the dynamically generated fields (num_examples, hash, time of creation,...) of the [`DatasetInfo`]. This will overwrite all previous metadata. Args: dataset_info_dir (`str`): The directory containing the metadata file. This should be the root directory of a specific dataset version. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem used to download the files from. <Deprecated version="2.9.0"> `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`. </Deprecated> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import DatasetInfo >>> ds_info = DatasetInfo.from_directory("/path/to/directory/") ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) logger.info(f"Loading Dataset info from {dataset_info_dir}") if not dataset_info_dir: raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.") with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f: dataset_info_dict = json.load(f) return cls.from_dict(dataset_info_dict) def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo": field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names}) def update(self, other_dataset_info: "DatasetInfo", ignore_none=True): self_dict = self.__dict__ self_dict.update( **{ k: copy.deepcopy(v) for k, v in other_dataset_info.__dict__.items() if (v is not None or not ignore_none) } ) def copy(self) -> "DatasetInfo": return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) def _to_yaml_dict(self) -> dict: yaml_dict = {} dataset_info_dict = asdict(self) for key in dataset_info_dict: if key in self._INCLUDED_INFO_IN_YAML: value = getattr(self, key) if hasattr(value, "_to_yaml_list"): # Features, SplitDict yaml_dict[key] = value._to_yaml_list() elif hasattr(value, "_to_yaml_string"): # Version yaml_dict[key] = value._to_yaml_string() else: yaml_dict[key] = value return yaml_dict def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo": yaml_data = copy.deepcopy(yaml_data) if yaml_data.get("features") is not None: yaml_data["features"] = Features._from_yaml_list(yaml_data["features"]) if yaml_data.get("splits") is not None: yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"]) field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in yaml_data.items() if k in field_names}) class IterableDataset(DatasetInfoMixin): """A Dataset backed by an iterable.""" def __init__( self, ex_iterable: _BaseExamplesIterable, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, formatting: Optional[FormattingConfig] = None, shuffling: Optional[ShufflingConfig] = None, distributed: Optional[DistributedConfig] = None, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None, format_type="deprecated", ): if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None: raise RuntimeError( "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. " "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. " ) if format_type != "deprecated": warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. " help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead." warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2) formatting = FormattingConfig(format_type=format_type) info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) self._ex_iterable = ex_iterable self._formatting = formatting self._shuffling = shuffling self._distributed = distributed self._epoch = 0 self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {} _maybe_add_torch_iterable_dataset_parent_class(self.__class__) def __repr__(self): return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n n_shards: {self.n_shards}\n}})" def __getstate__(self): return self.__dict__ def __setstate__(self, d): self.__dict__ = d # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling _maybe_add_torch_iterable_dataset_parent_class(self.__class__) def _head(self, n=5): return _examples_to_batch(list(self.take(n))) def _effective_generator(self): if self._shuffling and self._epoch == 0: return self._shuffling.generator elif self._shuffling: # Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars) effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed return np.random.default_rng(effective_seed) else: raise ValueError("This dataset is not shuffled") def n_shards(self) -> int: if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0: return self._ex_iterable.n_shards // self._distributed.world_size return self._ex_iterable.n_shards def _iter_pytorch(self): ex_iterable = self._prepare_ex_iterable_for_iteration() # Fix for fsspec when using multiprocess to avoid hanging in the ML training loop. (only required for fsspec >= 0.9.0) # See https://github.com/fsspec/gcsfs/issues/379 fsspec.asyn.reset_lock() # check if there aren't too many workers import torch.utils.data worker_info = torch.utils.data.get_worker_info() if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers: logger.warning( f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). " f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers." ) logger.info( f"To parallelize data loading, we give each process some shards (or data sources) to process. " f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. " f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}." ) # split workload _log_prefix = f"node#{self._distributed.rank} " if self._distributed else "" shards_indices = ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers) if shards_indices: logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards." ) ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers) if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1) else: iterator = _convert_to_arrow(ex_iterable, batch_size=1) for key, pa_table in iterator: yield formatter.format_row(pa_table) return else: for key, example in ex_iterable: if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_example`. example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self._token_per_repo_id ) yield format_dict(example) if format_dict else example logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards." ) else: logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})." ) def _is_main_process(self): if self._distributed and self._distributed.rank > 0: return False if "torch" in sys.modules: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if worker_info is not None and worker_info.id > 0: return False return True def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable: if self._shuffling: ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator()) else: ex_iterable = self._ex_iterable if self._distributed: rank = self._distributed.rank world_size = self._distributed.world_size if ex_iterable.n_shards % world_size == 0: if self._is_main_process(): n_shards_per_node = ex_iterable.n_shards // world_size plural = "s" if n_shards_per_node > 1 else "" logger.info( f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node." ) ex_iterable = ex_iterable.shard_data_sources(rank, world_size) else: if self._is_main_process(): logger.info( f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration." ) logger.info( f"It is more optimized to distribute the dataset shards (or data sources) across nodes. " f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. " f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}" ) ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank) return ex_iterable def __iter__(self): if "torch" in sys.modules: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None: # We're a torch.utils.data.IterableDataset in a PyTorch worker process yield from self._iter_pytorch() return ex_iterable = self._prepare_ex_iterable_for_iteration() if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1) else: iterator = _convert_to_arrow(ex_iterable, batch_size=1) for key, pa_table in iterator: yield formatter.format_row(pa_table) return for key, example in ex_iterable: if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_example`. example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self._token_per_repo_id ) yield format_dict(example) if format_dict else example def iter(self, batch_size: int, drop_last_batch: bool = False): """Iterate through the batches of size `batch_size`. Args: batch_size (:obj:`int`): size of each batch to yield. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped """ if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None ex_iterable = self._prepare_ex_iterable_for_iteration() if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables( ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch ) else: iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) for key, pa_table in iterator: yield formatter.format_batch(pa_table) return iterator = iter(ex_iterable) for key, example in iterator: # If batched, first build the batch examples = [example] + [example for key, example in islice(iterator, batch_size - 1)] if drop_last_batch and len(examples) < batch_size: # ignore last batch return batch = _examples_to_batch(examples) if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_batch`. batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id) yield format_dict(batch) if format_dict else batch def from_generator( generator: Callable, features: Optional[Features] = None, gen_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Create an Iterable Dataset from a generator. Args: generator (`Callable`): A generator function that `yields` examples. features (`Features`, *optional*): Dataset features. gen_kwargs(`dict`, *optional*): Keyword arguments to be passed to the `generator` callable. You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`. This can be used to improve shuffling and when iterating over the dataset with multiple workers. Returns: `IterableDataset` Example: ```py >>> def gen(): ... yield {"text": "Good", "label": 0} ... yield {"text": "Bad", "label": 1} ... >>> ds = IterableDataset.from_generator(gen) ``` ```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards}) >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer >>> from torch.utils.data import DataLoader >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards ``` """ from .io.generator import GeneratorDatasetInputStream return GeneratorDatasetInputStream( generator=generator, features=features, gen_kwargs=gen_kwargs, streaming=True, ).read() def from_spark( df: "pyspark.sql.DataFrame", split: Optional[NamedSplit] = None, features: Optional[Features] = None, **kwargs, ) -> "IterableDataset": """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches. Args: df (`pyspark.sql.DataFrame`): The DataFrame containing the desired data. split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. Returns: [`IterableDataset`] Example: ```py >>> df = spark.createDataFrame( >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], >>> columns=["id", "name"], >>> ) >>> ds = IterableDataset.from_spark(df) ``` """ from .io.spark import SparkDatasetReader if sys.platform == "win32": raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows") return SparkDatasetReader( df, split=split, features=features, streaming=True, **kwargs, ).read() def from_file(filename: str) -> "IterableDataset": """Instantiate a IterableDataset from Arrow table at filename. Args: filename (`str`): File name of the dataset. Returns: [`IterableDataset`] """ pa_table_schema = read_schema_from_file(filename) inferred_features = Features.from_arrow_schema(pa_table_schema) ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename}) return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features)) def with_format( self, type: Optional[str] = None, ) -> "IterableDataset": """ Return a dataset with the specified format. Supported formats: "arrow", or None for regular python objects. The other formats are currently not implemented. Args: type (`str`, optional, default None): if set to "torch", the returned dataset will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader """ type = get_format_type_from_alias(type) # TODO(QL): add format_kwargs # TODO(QL): add format_columns and return_all_columns # TODO(QL): add pandas format return IterableDataset( ex_iterable=self._ex_iterable, info=self._info.copy(), split=self._split, formatting=FormattingConfig(format_type=type), shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def map( self, function: Optional[Callable] = None, with_indices: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, features: Optional[Features] = None, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. If your function returns a column that already exists, then it overwrites it. The function is applied on-the-fly on the examples when iterating over the dataset. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}. - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`, *optional*, defaults to `None`): Function applied on-the-fly on the examples when you iterate on the dataset. It must have one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`[List[str]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. features (`[Features]`, *optional*, defaults to `None`): Feature types of the resulting dataset. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> list(ds.take(3)) [{'label': 1, 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'Review: effective but too-tepid biopic'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns] if isinstance(remove_columns, str): remove_columns = [remove_columns] if function is None: function = identity_func if fn_kwargs is None: fn_kwargs = {} ex_iterable = MappedExamplesIterable( TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs, formatting=self._formatting, ) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. The filtering is done on-the-fly when iterating over the dataset. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` If no function is provided, defaults to an always True function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, default `1000`): Number of examples per batch provided to `function` if `batched=True`. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds = ds.filter(lambda x: x["label"] == 0) >>> list(ds.take(3)) [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'}, {'label': 0, 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, {'label': 0, 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns] # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example) info = copy.deepcopy(self._info) info.features = None # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here ex_iterable = FilteredExamplesIterable( TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, formatting=self._formatting, ) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def shuffle( self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 ) -> "IterableDataset": """ Randomly shuffles the elements of this dataset. This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will initially select a random element from only the first 1000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1000 element buffer. If the dataset is made of several shards, it also does shuffle the order of the shards. However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] then the order of the shards is kept unchanged. Args: seed (`int`, *optional*, defaults to `None`): Random seed that will be used to shuffle the dataset. It is used to sample from the shuffle buffer and also to shuffle the data shards. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). buffer_size (`int`, defaults to `1000`): Size of the buffer. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> shuffled_ds = ds.shuffle(seed=42) >>> list(shuffled_ds.take(3)) [{'label': 1, 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, {'label': 1, 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, {'label': 1, 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] ``` """ if generator is None: generator = np.random.default_rng(seed) else: generator = deepcopy(generator) shuffling = ShufflingConfig(generator=generator, _original_seed=seed) return IterableDataset( ex_iterable=BufferShuffledExamplesIterable( self._ex_iterable, buffer_size=buffer_size, generator=generator ).shuffle_data_sources(generator), info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=shuffling, distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def set_epoch(self, epoch: int): self._epoch = epoch def skip(self, n) -> "IterableDataset": """ Create a new [`IterableDataset`] that skips the first `n` elements. Args: n (`int`): Number of elements to skip. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> ds = ds.skip(1) >>> list(ds.take(3)) [{'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}, {'label': 1, 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}] ``` """ ex_iterable = SkipExamplesIterable(self._ex_iterable, n) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def take(self, n) -> "IterableDataset": """ Create a new [`IterableDataset`] with only the first `n` elements. Args: n (`int`): Number of elements to take. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> small_ds = ds.take(2) >>> list(small_ds) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] ``` """ ex_iterable = TakeExamplesIterable(self._ex_iterable, n) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def column_names(self) -> Optional[List[str]]: """Names of the columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True) >>> ds.column_names ['text', 'label'] ``` """ return list(self._info.features.keys()) if self._info.features is not None else None def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset": """Add column to Dataset. Args: name (str): Column name. column (list or np.array): Column data to be added. Returns: `IterableDataset` """ return self.map(partial(add_column_fn, name=name, column=column), with_indices=True) def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Returns: `IterableDataset`: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} >>> ds = ds.rename_column("text", "movie_review") >>> next(iter(ds)) {'label': 1, 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return self.rename_columns({original_column_name: new_column_name}) def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names Returns: `IterableDataset`: A copy of the dataset with renamed columns """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map( partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping) ) if original_features is not None: ds_iterable._info.features = Features( { column_mapping[col] if col in column_mapping.keys() else col: feature for col, feature in original_features.items() } ) # check that it's still valid, especially with regard to task templates try: ds_iterable._info.copy() except ValueError: ds_iterable._info.task_templates = None return ds_iterable def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """ Remove one or several column(s) in the dataset and the features associated to them. The removal is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: `IterableDataset`: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.remove_columns("label") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map(remove_columns=column_names) if original_features is not None: ds_iterable._info.features = original_features.copy() for col, _ in original_features.items(): if col in column_names: del ds_iterable._info.features[col] # check that it's still valid, especially with regard to task templates try: ds_iterable._info.copy() except ValueError: ds_iterable._info.task_templates = None return ds_iterable def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """Select one or several column(s) in the dataset and the features associated to them. The selection is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to select. Returns: `IterableDataset`: A copy of the dataset object with selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.select_columns("text") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ if isinstance(column_names, str): column_names = [column_names] if self._info: info = copy.deepcopy(self._info) if self._info.features is not None: missing_columns = set(column_names) - set(self._info.features.keys()) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Columns in the dataset: " f"{list(self._info.features.keys())}." ) info.features = Features({c: info.features[c] for c in column_names}) # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=self._shuffling, distributed=self._distributed, token_per_repo_id=self._token_per_repo_id, ) def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`Feature`): Target feature. Returns: `IterableDataset` Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True) >>> ds.features {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds.features {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} ``` """ info = self._info.copy() info.features[column] = feature # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def cast( self, features: Features, ) -> "IterableDataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. Returns: `IterableDataset`: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features["label"] = ClassLabel(names=["bad", "good"]) >>> new_features["text"] = Value("large_string") >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ info = self._info.copy() info.features = features # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def _step(self, step: int, offset: int) -> "IterableDataset": ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def _resolve_features(self): if self.features is not None: return self elif isinstance(self._ex_iterable, TypedExamplesIterable): features = self._ex_iterable.features else: features = _infer_features_from_batch(self.with_format(None)._head()) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def _interleave_iterable_datasets( datasets: List[IterableDataset], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> IterableDataset: """ Interleave several iterable datasets (sources) into a single iterable dataset. The new iterable dataset alternates between the sources to yield examples. If `probabilities = None` (default) the iterable dataset will cycles through the sources in order for each next example in the iteration. If `probabilities` is not `None, the iterable dataset will sample a random source according to the provided probabilities for each next examples in the iteration. <Added version="2.4.0"/> Args: datasets (`List[IterableDataset]`): list of datasets to interleave probabilities (`List[float]`, optional, default None): If specified, the new iterable dataset samples examples from one source at a time according to these probabilities. seed (`int`, optional, default None): The random seed used to choose a source for each example. stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Output: `datasets.IterableDataset` """ datasets = [d._resolve_features() for d in datasets] # Perform checks _check_if_features_can_be_aligned([dset.features for dset in datasets]) # TODO: improve this to account for a mix of ClassLabel and Value for example # right now it would keep the type of the first dataset in the list features = Features( {k: v for features in _align_features([dset.features for dset in datasets]) for k, v in features.items()} ) ex_iterables = [d._ex_iterable for d in datasets] # Use cycling or random cycling of sources if probabilities is None: ex_iterable = CyclingMultiSourcesExamplesIterable(ex_iterables, stopping_strategy=stopping_strategy) else: generator = np.random.default_rng(seed) ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable( ex_iterables, generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy ) # Set new info - we update the features # setting the features also ensures to fill missing columns with None if info is None: info = DatasetInfo.from_merge([d.info for d in datasets]) else: info = info.copy() info.features = features # Get all the auth tokens per repository - in case the datasets come from different private repositories token_per_repo_id = { repo_id: token for dataset in datasets for repo_id, token in dataset._token_per_repo_id.items() } # Return new daset return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id) class NamedSplit(SplitBase): """Descriptor corresponding to a named split (train, test, ...). Example: Each descriptor can be composed with other using addition or slice: ```py split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST ``` The resulting split will correspond to 25% of the train split merged with 100% of the test split. A split cannot be added twice, so the following will fail: ```py split = ( datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + datasets.Split.TRAIN.subsplit(datasets.percent[75:]) ) # Error split = datasets.Split.TEST + datasets.Split.ALL # Error ``` The slices can be applied only one time. So the following are valid: ```py split = ( datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + datasets.Split.TEST.subsplit(datasets.percent[:50]) ) split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50]) ``` But this is not valid: ```py train = datasets.Split.TRAIN test = datasets.Split.TEST split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25]) split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50]) ``` """ def __init__(self, name): self._name = name split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")] for split_name in split_names_from_instruction: if not re.match(_split_re, split_name): raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.") def __str__(self): return self._name def __repr__(self): return f"NamedSplit({self._name!r})" def __eq__(self, other): """Equality: datasets.Split.TRAIN == 'train'.""" if isinstance(other, NamedSplit): return self._name == other._name # pylint: disable=protected-access elif isinstance(other, SplitBase): return False elif isinstance(other, str): # Other should be string return self._name == other else: raise ValueError(f"Equality not supported between split {self} and {other}") def __lt__(self, other): return self._name < other._name # pylint: disable=protected-access def __hash__(self): return hash(self._name) def get_read_instruction(self, split_dict): return SplitReadInstruction(split_dict[self._name]) The provided code snippet includes necessary dependencies for implementing the `interleave_datasets` function. Write a Python function `def interleave_datasets( datasets: List[DatasetType], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> DatasetType` to solve the following problem: Interleave several datasets (sources) into a single dataset. The new dataset is constructed by alternating between the sources to get the examples. You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. Note for iterable datasets: In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). Args: datasets (`List[Dataset]` or `List[IterableDataset]`): List of datasets to interleave. probabilities (`List[float]`, *optional*, defaults to `None`): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, *optional*, defaults to `None`): The random seed used to choose a source for each example. info ([`DatasetInfo`], *optional*): Dataset information, like description, citation, etc. <Added version="2.4.0"/> split ([`NamedSplit`], *optional*): Name of the dataset split. <Added version="2.4.0"/> stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Returns: [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of `IterableDataset`. Example: For regular datasets (map-style): ```python >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] For datasets in streaming mode (iterable): >>> from datasets import load_dataset, interleave_datasets >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) >>> dataset = interleave_datasets([d1, d2]) >>> iterator = iter(dataset) >>> next(iterator) {'text': 'Mtendere Village was inspired by the vision...} >>> next(iterator) {'text': "Média de débat d'idées, de culture...} ``` Here is the function: def interleave_datasets( datasets: List[DatasetType], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> DatasetType: """ Interleave several datasets (sources) into a single dataset. The new dataset is constructed by alternating between the sources to get the examples. You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. Note for iterable datasets: In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). Args: datasets (`List[Dataset]` or `List[IterableDataset]`): List of datasets to interleave. probabilities (`List[float]`, *optional*, defaults to `None`): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, *optional*, defaults to `None`): The random seed used to choose a source for each example. info ([`DatasetInfo`], *optional*): Dataset information, like description, citation, etc. <Added version="2.4.0"/> split ([`NamedSplit`], *optional*): Name of the dataset split. <Added version="2.4.0"/> stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Returns: [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of `IterableDataset`. Example: For regular datasets (map-style): ```python >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] For datasets in streaming mode (iterable): >>> from datasets import load_dataset, interleave_datasets >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) >>> dataset = interleave_datasets([d1, d2]) >>> iterator = iter(dataset) >>> next(iterator) {'text': 'Mtendere Village was inspired by the vision...} >>> next(iterator) {'text': "Média de débat d'idées, de culture...} ``` """ from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets.") for i, dataset in enumerate(datasets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.") if dataset_type is Dataset: return _interleave_map_style_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy ) else: return _interleave_iterable_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy )
Interleave several datasets (sources) into a single dataset. The new dataset is constructed by alternating between the sources to get the examples. You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. Note for iterable datasets: In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). Args: datasets (`List[Dataset]` or `List[IterableDataset]`): List of datasets to interleave. probabilities (`List[float]`, *optional*, defaults to `None`): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, *optional*, defaults to `None`): The random seed used to choose a source for each example. info ([`DatasetInfo`], *optional*): Dataset information, like description, citation, etc. <Added version="2.4.0"/> split ([`NamedSplit`], *optional*): Name of the dataset split. <Added version="2.4.0"/> stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Returns: [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of `IterableDataset`. Example: For regular datasets (map-style): ```python >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] For datasets in streaming mode (iterable): >>> from datasets import load_dataset, interleave_datasets >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) >>> dataset = interleave_datasets([d1, d2]) >>> iterator = iter(dataset) >>> next(iterator) {'text': 'Mtendere Village was inspired by the vision...} >>> next(iterator) {'text': "Média de débat d'idées, de culture...} ```
17,991
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin): """A Dataset backed by an Arrow table.""" def __init__( self, arrow_table: Table, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_table: Optional[Table] = None, fingerprint: Optional[str] = None, ): info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) IndexableMixin.__init__(self) self._data: Table = _check_table(arrow_table) self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None maybe_register_dataset_for_temp_dir_deletion(self) self._format_type: Optional[str] = None self._format_kwargs: dict = {} self._format_columns: Optional[list] = None self._output_all_columns: bool = False self._fingerprint: str = fingerprint # Read metadata if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata: metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode()) if ( "fingerprint" in metadata and self._fingerprint is None ): # try to load fingerprint from the arrow file metadata self._fingerprint = metadata["fingerprint"] # Infer features if None inferred_features = Features.from_arrow_schema(arrow_table.schema) if self.info.features is None: self.info.features = inferred_features else: # make sure the nested columns are in the right order try: self.info.features = self.info.features.reorder_fields_as(inferred_features) except ValueError as e: raise ValueError( f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file." ) # Infer fingerprint if None if self._fingerprint is None: self._fingerprint = generate_fingerprint(self) # Sanity checks if self._info.features is None: raise ValueError("Features can't be None in a Dataset object") if self._fingerprint is None: raise ValueError("Fingerprint can't be None in a Dataset object") if self.info.features.type != inferred_features.type: raise ValueError( f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}" ) if self._indices is not None: if not pa.types.is_unsigned_integer(self._indices.column(0).type): raise ValueError( f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}" ) _check_column_names(self._data.column_names) self._data = update_metadata_with_features(self._data, self._info.features) def features(self) -> Features: features = super().features if features is None: # this is already checked in __init__ raise ValueError("Features can't be None in a Dataset object") return features def from_file( cls, filename: str, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_filename: Optional[str] = None, in_memory: bool = False, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow table at filename. Args: filename (`str`): File name of the dataset. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. indices_filename (`str`, *optional*): File names of the indices. in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. Returns: [`Dataset`] """ table = ArrowReader.read_table(filename, in_memory=in_memory) if indices_filename is not None: indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory) else: indices_pa_table = None return cls( arrow_table=table, info=info, split=split, indices_table=indices_pa_table, ) def from_buffer( cls, buffer: pa.Buffer, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_buffer: Optional[pa.Buffer] = None, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow buffer. Args: buffer (`pyarrow.Buffer`): Arrow buffer. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. indices_buffer (`pyarrow.Buffer`, *optional*): Indices Arrow buffer. Returns: [`Dataset`] """ table = InMemoryTable.from_buffer(buffer) if indices_buffer is not None: indices_table = InMemoryTable.from_buffer(buffer) else: indices_table = None return cls(table, info=info, split=split, indices_table=indices_table) def from_pandas( cls, df: pd.DataFrame, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, preserve_index: Optional[bool] = None, ) -> "Dataset": """ Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`]. The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit features and passing it to this function. Args: df (`pandas.DataFrame`): Dataframe that contains the dataset. features ([`Features`], *optional*): Dataset features. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. preserve_index (`bool`, *optional*): Whether to store the index as an additional column in the resulting Dataset. The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only. Use `preserve_index=True` to force it to be stored as a column. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_pandas(df) ``` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable.from_pandas( df=df, preserve_index=preserve_index, ) if features is not None: # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split) def from_polars( cls, df: "pl.DataFrame", features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Collect the underlying arrow arrays in an Arrow Table. This operation is mostly zero copy. Data types that do copy: * CategoricalType Args: df (`polars.DataFrame`): DataFrame to convert to Arrow Table features (`Features`, optional): Dataset features. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split. Examples: ```py >>> ds = Dataset.from_polars(df) ``` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable(df.to_arrow()) if features is not None: # more expensive cast than InMemoryTable.from_polars(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split) def from_dict( cls, mapping: dict, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert `dict` to a `pyarrow.Table` to create a [`Dataset`]. Args: mapping (`Mapping`): Mapping of strings to Arrays or Python lists. features ([`Features`], *optional*): Dataset features. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. Returns: [`Dataset`] """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None arrow_typed_mapping = {} for col, data in mapping.items(): if isinstance(data, (pa.Array, pa.ChunkedArray)): data = cast_array_to_feature(data, features[col]) if features is not None else data else: data = OptimizedTypedSequence( features.encode_column(data, col) if features is not None else data, type=features[col] if features is not None else None, col=col, ) arrow_typed_mapping[col] = data mapping = arrow_typed_mapping pa_table = InMemoryTable.from_pydict(mapping=mapping) if info is None: info = DatasetInfo() info.features = features if info.features is None: info.features = Features( { col: generate_from_arrow_type(data.type) if isinstance(data, (pa.Array, pa.ChunkedArray)) else data.get_inferred_type() for col, data in mapping.items() } ) return cls(pa_table, info=info, split=split) def from_list( cls, mapping: List[dict], features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`. Note that the keys of the first entry will be used to determine the dataset columns, regardless of what is passed to features. Args: mapping (`List[dict]`): A list of mappings of strings to row values. features (`Features`, optional): Dataset features. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split. Returns: [`Dataset`] """ # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {} return cls.from_dict(mapping, features, info, split) def from_csv( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from CSV file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the CSV file(s). split ([`NamedSplit`], *optional*): Split name to be assigned to the dataset. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`pandas.read_csv`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_csv('path/to/dataset.csv') ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetReader return CsvDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs, ).read() def from_generator( generator: Callable, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, gen_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, **kwargs, ): """Create a Dataset from a generator. Args: generator (:`Callable`): A generator function that `yields` examples. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. gen_kwargs(`dict`, *optional*): Keyword arguments to be passed to the `generator` callable. You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`. <Added version="2.7.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to :[`GeneratorConfig`]. Returns: [`Dataset`] Example: ```py >>> def gen(): ... yield {"text": "Good", "label": 0} ... yield {"text": "Bad", "label": 1} ... >>> ds = Dataset.from_generator(gen) ``` ```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards}) ``` """ from .io.generator import GeneratorDatasetInputStream return GeneratorDatasetInputStream( generator=generator, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, gen_kwargs=gen_kwargs, num_proc=num_proc, **kwargs, ).read() def from_json( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, field: Optional[str] = None, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from JSON or JSON Lines file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the JSON or JSON Lines file(s). split ([`NamedSplit`], *optional*): Split name to be assigned to the dataset. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. field (`str`, *optional*): Field name of the JSON file where the dataset is contained in. num_proc (`int`, *optional* defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`JsonConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_json('path/to/dataset.json') ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetReader return JsonDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, field=field, num_proc=num_proc, **kwargs, ).read() def from_parquet( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, columns: Optional[List[str]] = None, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from Parquet file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the Parquet file(s). split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. columns (`List[str]`, *optional*): If not `None`, only these columns will be read from the file. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e'. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`ParquetConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_parquet('path/to/dataset.parquet') ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetReader return ParquetDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, num_proc=num_proc, **kwargs, ).read() def from_text( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from text file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the text file(s). split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`TextConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_text('path/to/dataset.txt') ``` """ # Dynamic import to avoid circular dependency from .io.text import TextDatasetReader return TextDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs, ).read() def from_spark( df: "pyspark.sql.DataFrame", split: Optional[NamedSplit] = None, features: Optional[Features] = None, keep_in_memory: bool = False, cache_dir: str = None, working_dir: str = None, load_from_cache_file: bool = True, **kwargs, ): """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers. Args: df (`pyspark.sql.DataFrame`): The DataFrame containing the desired data. split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both workers and the driver. keep_in_memory (`bool`): Whether to copy the data in-memory. working_dir (`str`, *optional*) Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting a non-NFS intermediate directory may improve performance. load_from_cache_file (`bool`): Whether to load the dataset from the cache if possible. Returns: [`Dataset`] Example: ```py >>> df = spark.createDataFrame( >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], >>> columns=["id", "name"], >>> ) >>> ds = Dataset.from_spark(df) ``` """ # Dynamic import to avoid circular dependency from .io.spark import SparkDatasetReader if sys.platform == "win32": raise EnvironmentError("Dataset.from_spark is not currently supported on Windows") return SparkDatasetReader( df, split=split, features=features, streaming=False, cache_dir=cache_dir, keep_in_memory=keep_in_memory, working_dir=working_dir, load_from_cache_file=load_from_cache_file, **kwargs, ).read() def from_sql( sql: Union[str, "sqlalchemy.sql.Selectable"], con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): """Create Dataset from SQL query or database table. Args: sql (`str` or `sqlalchemy.sql.Selectable`): SQL query to be executed or a table name. con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`SqlConfig`]. Returns: [`Dataset`] Example: ```py >>> # Fetch a database table >>> ds = Dataset.from_sql("test_data", "postgres:///db_name") >>> # Execute a SQL query on the table >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name") >>> # Use a Selectable object to specify the query >>> from sqlalchemy import select, text >>> stmt = select([text("sentence")]).select_from(text("test_data")) >>> ds = Dataset.from_sql(stmt, "postgres:///db_name") ``` <Tip> The returned dataset can only be cached if `con` is specified as URI string. </Tip> """ from .io.sql import SqlDatasetReader return SqlDatasetReader( sql, con, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs, ).read() def __setstate__(self, state): self.__dict__.update(state) maybe_register_dataset_for_temp_dir_deletion(self) return self def __del__(self): if hasattr(self, "_data"): del self._data if hasattr(self, "_indices"): del self._indices def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables self.__del__() def save_to_disk( self, dataset_path: PathLike, fs="deprecated", max_shard_size: Optional[Union[str, int]] = None, num_shards: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, ): """ Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. For [`Image`] and [`Audio`] data: All the Image() and Audio() data are stored in the arrow files. If you want to store paths or urls, please use the Value("string") type. Args: dataset_path (`str`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset directory where the dataset will be saved to. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"50MB"`). num_shards (`int`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. <Added version="2.8.0"/> num_proc (`int`, *optional*): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.8.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Example: ```py >>> ds.save_to_disk("path/to/dataset/directory") >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024) ``` """ if max_shard_size is not None and num_shards is not None: raise ValueError( "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." ) if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options if self.list_indexes(): raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset") if num_shards is None: dataset_nbytes = self._estimate_nbytes() max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) num_shards = int(dataset_nbytes / max_shard_size) + 1 num_shards = max(num_shards, num_proc or 1) num_proc = num_proc if num_proc is not None else 1 num_shards = num_shards if num_shards is not None else num_proc fs: fsspec.AbstractFileSystem fs, _ = url_to_fs(dataset_path, **(storage_options or {})) if not is_remote_filesystem(fs): parent_cache_files_paths = { Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files } # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux. if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths: raise PermissionError( f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself." ) fs.makedirs(dataset_path, exist_ok=True) # Get json serializable state state = { key: self.__dict__[key] for key in [ "_fingerprint", "_format_columns", "_format_kwargs", "_format_type", "_output_all_columns", ] } state["_split"] = str(self.split) if self.split is not None else self.split state["_data_files"] = [ {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards) ] for k in state["_format_kwargs"].keys(): try: json.dumps(state["_format_kwargs"][k]) except TypeError as e: raise TypeError( str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't." ) from None # Get json serializable dataset info dataset_info = asdict(self._info) shards_done = 0 pbar = hf_tqdm( unit=" examples", total=len(self), desc=f"Saving the dataset ({shards_done}/{num_shards} shards)", ) kwargs_per_job = ( { "job_id": shard_idx, "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True), "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"), "storage_options": storage_options, } for shard_idx in range(num_shards) ) shard_lengths = [None] * num_shards shard_sizes = [None] * num_shards if num_proc > 1: with Pool(num_proc) as pool: with pbar: for job_id, done, content in iflatmap_unordered( pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job ): if done: shards_done += 1 pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") shard_lengths[job_id], shard_sizes[job_id] = content else: pbar.update(content) else: with pbar: for kwargs in kwargs_per_job: for job_id, done, content in Dataset._save_to_disk_single(**kwargs): if done: shards_done += 1 pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") shard_lengths[job_id], shard_sizes[job_id] = content else: pbar.update(content) with fs.open( posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8" ) as state_file: json.dump(state, state_file, indent=2, sort_keys=True) with fs.open( posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8" ) as dataset_info_file: # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)} json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2) def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]): batch_size = config.DEFAULT_MAX_BATCH_SIZE num_examples_progress_update = 0 writer = ArrowWriter( features=shard.features, path=fpath, storage_options=storage_options, embed_local_files=True, ) try: _time = time.time() for pa_table in shard.with_format("arrow").iter(batch_size): writer.write_table(pa_table) num_examples_progress_update += len(pa_table) if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield job_id, False, num_examples_progress_update num_examples_progress_update = 0 finally: yield job_id, False, num_examples_progress_update num_examples, num_bytes = writer.finalize() writer.close() yield job_id, True, (num_examples, num_bytes) def _build_local_temp_path(uri_or_path: str) -> Path: """ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative path extracted from the uri) passed. Args: uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) to concatenate. Returns: :class:`Path`: the concatenated path (temp dir + path) """ src_dataset_path = Path(uri_or_path) tmp_dir = get_temporary_cache_files_directory() return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor)) def load_from_disk( dataset_path: str, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None, ) -> "Dataset": """ Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. Args: dataset_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) of the dataset directory where the dataset will be loaded from. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Returns: [`Dataset`] or [`DatasetDict`]: - If `dataset_path` is a path of a dataset directory, the dataset requested. - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split. Example: ```py >>> ds = load_from_disk("path/to/dataset/directory") ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, dataset_path = url_to_fs(dataset_path, **(storage_options or {})) dest_dataset_path = dataset_path dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME) dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) dataset_dict_is_file = fs.isfile(dataset_dict_json_path) dataset_info_is_file = fs.isfile(dataset_info_path) dataset_state_is_file = fs.isfile(dataset_state_json_path) if not dataset_info_is_file and not dataset_state_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) if not dataset_info_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) if not dataset_state_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies if is_remote_filesystem(fs): src_dataset_path = dest_dataset_path dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path) fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True) dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) with open(dataset_state_json_path, encoding="utf-8") as state_file: state = json.load(state_file) with open(dataset_info_path, encoding="utf-8") as dataset_info_file: dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file)) dataset_size = estimate_dataset_size( Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"] ) keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size) table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable arrow_table = concat_tables( thread_map( table_cls.from_file, [posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]], tqdm_class=hf_tqdm, desc="Loading dataset from disk", # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached disable=len(state["_data_files"]) <= 16 or None, ) ) split = state["_split"] split = Split(split) if split is not None else split dataset = Dataset( arrow_table=arrow_table, info=dataset_info, split=split, fingerprint=state["_fingerprint"], ) format = { "type": state["_format_type"], "format_kwargs": state["_format_kwargs"], "columns": state["_format_columns"], "output_all_columns": state["_output_all_columns"], } dataset = dataset.with_format(**format) return dataset def data(self) -> Table: """The Apache Arrow table backing the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.data MemoryMappedTable text: string label: int64 ---- text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]] label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]] ``` """ return self._data def cache_files(self) -> List[dict]: """The cache files containing the Apache Arrow table backing the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.cache_files [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}] ``` """ cache_files = list_table_cache_files(self._data) if self._indices is not None: cache_files += list_table_cache_files(self._indices) return [{"filename": cache_filename} for cache_filename in cache_files] def num_columns(self) -> int: """Number of columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.num_columns 2 ``` """ return self._data.num_columns def num_rows(self) -> int: """Number of rows in the dataset (same as [`Dataset.__len__`]). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.num_rows 1066 ``` """ if self._indices is not None: return self._indices.num_rows return self._data.num_rows def column_names(self) -> List[str]: """Names of the columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.column_names ['text', 'label'] ``` """ return self._data.column_names def shape(self) -> Tuple[int, int]: """Shape of the dataset (number of columns, number of rows). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.shape (1066, 2) ``` """ if self._indices is not None: return (self._indices.num_rows, self._data.num_columns) return self._data.shape def unique(self, column: str) -> List: """Return a list of the unique elements in a column. This is implemented in the low-level backend and as such, very fast. Args: column (`str`): Column name (list all the column names with [`~datasets.Dataset.column_names`]). Returns: `list`: List of unique elements in the given column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.unique('label') [1, 0] ``` """ if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") if self._indices is not None and self._indices.num_rows != self._data.num_rows: dataset = self.flatten_indices() else: dataset = self return dataset._data.column(column).unique().to_pylist() def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset": """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table. Args: column (`str`): The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`]) include_nulls (`bool`, defaults to `False`): Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. <Added version="1.14.2"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("boolq", split="validation") >>> ds.features {'answer': Value(dtype='bool', id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} >>> ds = ds.class_encode_column('answer') >>> ds.features {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} ``` """ # Sanity checks if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") src_feat = self._info.features[column] if not isinstance(src_feat, Value): raise ValueError( f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}." ) if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)): def stringify_column(batch): batch[column] = [ str(sample) if include_nulls or sample is not None else None for sample in batch[column] ] return batch dset = self.map( stringify_column, batched=True, desc="Stringifying the column", ) else: dset = self # Create the new feature class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None) dst_feat = ClassLabel(names=class_names) def cast_to_class_labels(batch): batch[column] = [ dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None for sample in batch[column] ] return batch new_features = dset.features.copy() new_features[column] = dst_feat dset = dset.map( cast_to_class_labels, batched=True, features=new_features, desc="Casting to class labels", ) return dset def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset": """Flatten the table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with flattened columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) ``` """ dataset = copy.deepcopy(self) for depth in range(1, max_depth): if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema): dataset._data = dataset._data.flatten() else: break dataset.info.features = self._info.features.flatten(max_depth=max_depth) dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names}) dataset._data = update_metadata_with_features(dataset._data, dataset.features) logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.') dataset._fingerprint = new_fingerprint return dataset def cast( self, features: Features, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, num_proc: Optional[int] = None, ) -> "Dataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset. batch_size (`int`, defaults to `1000`): Number of examples per batch provided to cast. If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. load_from_cache_file (`bool`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`]. num_proc (`int`, *optional*, defaults to `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. Returns: [`Dataset`]: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset, ClassLabel, Value >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ if sorted(features) != sorted(self._data.column_names): raise ValueError( f"The columns in features ({list(features)}) must be identical " f"as the columns in the dataset: {self._data.column_names}" ) schema = features.arrow_schema format = self.format dataset = self.with_format("arrow") # capture the PyArrow version here to make the lambda serializable on Windows dataset = dataset.map( partial(table_cast, schema=schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc="Casting the dataset", ) dataset = dataset.with_format(**format) return dataset def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`FeatureType`): Target feature. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ if hasattr(feature, "decode_example"): dataset = copy.deepcopy(self) dataset._info.features[column] = feature dataset._fingerprint = new_fingerprint dataset._data = dataset._data.cast(dataset.features.arrow_schema) dataset._data = update_metadata_with_features(dataset._data, dataset.features) return dataset else: features = self.features features[column] = feature return self.cast(features) def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": """ Remove one or several column(s) in the dataset and the features associated to them. You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.remove_columns('label') Dataset({ features: ['text'], num_rows: 1066 }) >>> ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0 Dataset({ features: [], num_rows: 0 }) ``` """ dataset = copy.deepcopy(self) if isinstance(column_names, str): column_names = [column_names] missing_columns = set(column_names) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) for column_name in column_names: del dataset._info.features[column_name] dataset._data = dataset._data.drop(column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def rename_column( self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None ) -> "Dataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.rename_column('label', 'label_new') Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) ``` """ dataset = copy.deepcopy(self) if original_column_name not in dataset._data.column_names: raise ValueError( f"Original column name {original_column_name} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if new_column_name in dataset._data.column_names: raise ValueError( f"New column name {new_column_name} already in the dataset. " f"Please choose a column name which is not already in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if not new_column_name: raise ValueError("New column name is empty.") def rename(columns): return [new_column_name if col == original_column_name else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { new_column_name if col == original_column_name else col: feature for col, feature in self._info.features.items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with renamed columns Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) ``` """ dataset = copy.deepcopy(self) extra_columns = set(column_mapping.keys()) - set(dataset.column_names) if extra_columns: raise ValueError( f"Original column names {extra_columns} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values())) if number_of_duplicates_in_new_columns != 0: raise ValueError( "New column names must all be different, but this column mapping " f"has {number_of_duplicates_in_new_columns} duplicates" ) empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col] if empty_new_columns: raise ValueError(f"New column names {empty_new_columns} are empty.") def rename(columns): return [column_mapping[col] if col in column_mapping else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { column_mapping[col] if col in column_mapping else col: feature for col, feature in (self._info.features or {}).items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": """Select one or several column(s) in the dataset and the features associated to them. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset object which only consists of selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.select_columns(['text']) Dataset({ features: ['text'], num_rows: 1066 }) ``` """ if isinstance(column_names, str): column_names = [column_names] missing_columns = set(column_names) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Current columns in the dataset: " f"{self._data.column_names}." ) dataset = copy.deepcopy(self) dataset._data = dataset._data.select(column_names) dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names}) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def __len__(self): """Number of rows in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.__len__ <bound method Dataset.__len__ of Dataset({ features: ['text', 'label'], num_rows: 1066 })> ``` """ return self.num_rows def __iter__(self): """Iterate through the examples. If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the selected format. """ if self._indices is None: # Fast iteration # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER for pa_subtable in table_iter(self.data, batch_size=batch_size): for i in range(pa_subtable.num_rows): pa_subtable_ex = pa_subtable.slice(i, 1) formatted_output = format_table( pa_subtable_ex, 0, formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns, ) yield formatted_output else: for i in range(self.num_rows): yield self._getitem( i, ) def iter(self, batch_size: int, drop_last_batch: bool = False): """Iterate through the batches of size `batch_size`. If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the selected format. Args: batch_size (:obj:`int`): size of each batch to yield. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped """ if self._indices is None: # Fast iteration # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch): formatted_batch = format_table( pa_subtable, range(pa_subtable.num_rows), formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns, ) yield formatted_batch else: num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size for i in range(0, num_rows, batch_size): yield self._getitem( slice(i, i + batch_size), ) def __repr__(self): return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})" def format(self): return { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self.column_names if self._format_columns is None else self._format_columns, "output_all_columns": self._output_all_columns, } def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set `__getitem__` return format (type and columns). Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__`` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ old_format_type = self._format_type old_format_kwargs = self._format_kwargs old_format_columns = self._format_columns old_output_all_columns = self._output_all_columns try: self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs) def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`]. Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as: ``` new formatted columns = (all columns - previously unformatted columns) ``` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.set_format(type='numpy', columns=['text', 'label']) >>> ds.format {'type': 'numpy', 'format_kwargs': {}, 'columns': ['text', 'label'], 'output_all_columns': False} ``` """ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format) # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter type = get_format_type_from_alias(type) get_formatter(type, features=self._info.features, **format_kwargs) # Check filter column if isinstance(columns, str): columns = [columns] if isinstance(columns, tuple): columns = list(columns) if columns is not None: missing_columns = set(columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if columns is not None: columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs self._format_type = type self._format_kwargs = format_kwargs self._format_columns = columns self._output_all_columns = output_all_columns logger.debug( "Set __getitem__(key) output type to %s for %s columns " " (when key is int or slice) and %s output other (un-formatted) columns.", "python objects" if type is None else type, "no" if columns is None else str(columns), "do" if output_all_columns else "don't", ) def reset_format(self): """Reset `__getitem__` return format to python objects and all columns. Same as `self.set_format()` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds.format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} >>> ds.reset_format() >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} ``` """ self.set_format() def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Args: transform (`Callable`, *optional*): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, *optional*): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). If set to True, then the other un-formatted columns are kept with the output of the transform. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') >>> def encode(batch): ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt') >>> ds.set_transform(encode) >>> ds[0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895, 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211, 5637, 1998, 11690, 2336, 1012, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object. Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds.format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'tensorflow'} ``` """ dataset = copy.deepcopy(self) dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) return dataset def with_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object. Args: transform (`Callable`, `optional`): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, `optional`): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). If set to `True`, then the other un-formatted columns are kept with the output of the transform. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> def encode(example): ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt') >>> ds = ds.with_transform(encode) >>> ds[0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617, 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105, 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ dataset = copy.deepcopy(self) dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) return dataset def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset": """ Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates). Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting. Args: task (`Union[str, TaskTemplate]`): The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include: - `"text-classification"` - `"question-answering"` If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates). id (`int`, defaults to `0`): The id required to unambiguously identify the task template when multiple task templates of the same type are supported. """ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD if isinstance(task, str): tasks = [template.task for template in (self.info.task_templates or [])] compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task] if not compatible_templates: raise ValueError( f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}" ) if not 0 <= id < len(compatible_templates): templates_list_str = "\n".join( f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates) ) raise ValueError( f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}" ) template = compatible_templates[id] elif isinstance(task, TaskTemplate): template = task else: raise ValueError( f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}." ) template = template.align_with_features(self.info.features) column_mapping = template.column_mapping columns_to_drop = [column for column in self.column_names if column not in column_mapping] dataset = self.remove_columns(columns_to_drop) dataset = dataset.rename_columns(column_mapping) # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__` dataset.info.task_templates = None dataset = dataset.cast(features=template.features) return dataset def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]: """ Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices) """ if isinstance(key, bool): raise TypeError("dataset index must be int, str, slice or collection of int, not bool") format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns output_all_columns = ( kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns ) format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs format_kwargs = format_kwargs if format_kwargs is not None else {} formatter = get_formatter(format_type, features=self._info.features, **format_kwargs) pa_subtable = query_table(self._data, key, indices=self._indices) formatted_output = format_table( pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns ) return formatted_output def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811 ... def __getitem__(self, key: str) -> List: # noqa: F811 ... def __getitem__(self, key): # noqa: F811 """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).""" return self._getitem(key) def __getitems__(self, keys: List) -> List: """Can be used to get a batch using a list of integers indices.""" batch = self.__getitem__(keys) n_examples = len(batch[next(iter(batch))]) return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)] def cleanup_cache_files(self) -> int: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files. Returns: `int`: Number of removed files. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.cleanup_cache_files() 10 ``` """ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files] if not current_cache_files: return 0 cache_directory = os.path.dirname(current_cache_files[0]) logger.info(f"Listing files in {cache_directory}") files: List[str] = os.listdir(cache_directory) files_to_remove = [] for f_name in files: full_name = os.path.abspath(os.path.join(cache_directory, f_name)) if f_name.startswith("cache-") and f_name.endswith(".arrow"): if full_name in current_cache_files: logger.info(f"Keeping currently used cache file at {full_name}") continue files_to_remove.append(full_name) for file_path in files_to_remove: logger.info(f"Removing {file_path}") os.remove(file_path) return len(files_to_remove) def _get_cache_file_path(self, fingerprint): if is_caching_enabled() and self.cache_files: cache_file_name = "cache-" + fingerprint + ".arrow" cache_directory = os.path.dirname(self.cache_files[0]["filename"]) else: cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow" cache_directory = get_temporary_cache_files_directory() cache_file_path = os.path.join(cache_directory, cache_file_name) return cache_file_path def map( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """ Apply a function to all the examples in the table (individually or in batches) and update the table. If your function returns a column that already exists, then it overwrites it. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`. - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`): Function with one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function`. num_proc (`int`, *optional*, defaults to `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially. suffix_template (`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for `rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> ds[0:3]["text"] ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .', 'Review: the soundtrack alone is worth the price of admission .', 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .'] # process a batch of examples >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) # set number of processors >>> ds = ds.map(add_prefix, num_proc=4) ``` """ if keep_in_memory and cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.") if num_proc is not None and num_proc <= 0: raise ValueError("num_proc must be an integer > 0.") # If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway) if len(self) == 0: if self._indices is not None: # empty indices mapping self = Dataset( self.data.slice(0, 0), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint, ) if remove_columns: return self.remove_columns(remove_columns) else: return self if function is None: function = lambda x: x # noqa: E731 if isinstance(input_columns, str): input_columns = [input_columns] if input_columns is not None: missing_columns = set(input_columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if isinstance(remove_columns, str): remove_columns = [remove_columns] if remove_columns is not None: missing_columns = set(remove_columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if fn_kwargs is None: fn_kwargs = {} if num_proc is not None and num_proc > len(self): num_proc = len(self) logger.warning( f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}." ) dataset_kwargs = { "shard": self, "function": function, "with_indices": with_indices, "with_rank": with_rank, "input_columns": input_columns, "batched": batched, "batch_size": batch_size, "drop_last_batch": drop_last_batch, "remove_columns": remove_columns, "keep_in_memory": keep_in_memory, "writer_batch_size": writer_batch_size, "features": features, "disable_nullable": disable_nullable, "fn_kwargs": fn_kwargs, } if new_fingerprint is None: # we create a unique hash from the function, # current dataset file and the mapping args transform = format_transform_for_fingerprint(Dataset._map_single) kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs) kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint" new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint) else: validate_fingerprint(new_fingerprint) dataset_kwargs["new_fingerprint"] = new_fingerprint if self.cache_files: if cache_file_name is None: cache_file_name = self._get_cache_file_path(new_fingerprint) dataset_kwargs["cache_file_name"] = cache_file_name def load_processed_shard_from_cache(shard_kwargs): """Load a processed shard from cache if it exists, otherwise throw an error.""" shard = shard_kwargs["shard"] # Check if we've already cached this computation (indexed by a hash) if shard_kwargs["cache_file_name"] is not None: if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file: info = shard.info.copy() info.features = features info.task_templates = None return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split) raise NonExistentDatasetError num_shards = num_proc if num_proc is not None else 1 if batched and drop_last_batch: pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size else: pbar_total = len(self) shards_done = 0 if num_proc is None or num_proc == 1: transformed_dataset = None try: transformed_dataset = load_processed_shard_from_cache(dataset_kwargs) logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}") except NonExistentDatasetError: pass if transformed_dataset is None: with hf_tqdm( unit=" examples", total=pbar_total, desc=desc or "Map", ) as pbar: for rank, done, content in Dataset._map_single(**dataset_kwargs): if done: shards_done += 1 logger.debug(f"Finished processing shard number {rank} of {num_shards}.") transformed_dataset = content else: pbar.update(content) assert transformed_dataset is not None, "Failed to retrieve the result from map" # update fingerprint if the dataset changed if transformed_dataset._fingerprint != self._fingerprint: transformed_dataset._fingerprint = new_fingerprint return transformed_dataset else: def format_cache_file_name( cache_file_name: Optional[str], rank: Union[int, Literal["*"]], # noqa: F722 ) -> Optional[str]: if not cache_file_name: return cache_file_name sep = cache_file_name.rindex(".") base_name, extension = cache_file_name[:sep], cache_file_name[sep:] if isinstance(rank, int): cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension logger.info(f"Process #{rank} will write at {cache_file_name}") else: cache_file_name = ( base_name + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc) + extension ) return cache_file_name def format_new_fingerprint(new_fingerprint: str, rank: int) -> str: new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc) validate_fingerprint(new_fingerprint) return new_fingerprint prev_env = deepcopy(os.environ) # check if parallelism if off # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22 if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in ( "", "off", "false", "f", "no", "n", "0", ): logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.") os.environ["TOKENIZERS_PARALLELISM"] = "false" shards = [ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory) for rank in range(num_proc) ] kwargs_per_job = [ { **dataset_kwargs, "shard": shards[rank], "cache_file_name": format_cache_file_name(cache_file_name, rank), "rank": rank, "offset": sum(len(s) for s in shards[:rank]), "new_fingerprint": format_new_fingerprint(new_fingerprint, rank), } for rank in range(num_shards) ] transformed_shards = [None] * num_shards for rank in range(num_shards): try: transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank]) kwargs_per_job[rank] = None except NonExistentDatasetError: pass kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None] # We try to create a pool with as many workers as dataset not yet cached. if kwargs_per_job: if len(kwargs_per_job) < num_shards: logger.info( f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache." ) with Pool(len(kwargs_per_job)) as pool: os.environ = prev_env logger.info(f"Spawning {num_proc} processes") with hf_tqdm( unit=" examples", total=pbar_total, desc=(desc or "Map") + f" (num_proc={num_proc})", ) as pbar: for rank, done, content in iflatmap_unordered( pool, Dataset._map_single, kwargs_iterable=kwargs_per_job ): if done: shards_done += 1 logger.debug(f"Finished processing shard number {rank} of {num_shards}.") transformed_shards[rank] = content else: pbar.update(content) # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805) for kwargs in kwargs_per_job: del kwargs["shard"] else: logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}") assert ( None not in transformed_shards ), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results" logger.info(f"Concatenating {num_proc} shards") result = _concatenate_map_style_datasets(transformed_shards) # update fingerprint if the dataset changed if any( transformed_shard._fingerprint != shard._fingerprint for transformed_shard, shard in zip(transformed_shards, shards) ): result._fingerprint = new_fingerprint else: result._fingerprint = self._fingerprint return result def _map_single( shard: "Dataset", function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[List[str]] = None, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, new_fingerprint: Optional[str] = None, rank: Optional[int] = None, offset: int = 0, ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]: """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does update examples). Args: shard (`datasets.Dataset`): Dataset to map the transform on. function (`Callable`): with one of the following signature: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: lambda x: x with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function` drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function` new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`. """ if fn_kwargs is None: fn_kwargs = {} # If we do batch computation but no batch size is provided, default to the full dataset if batched and (batch_size is None or batch_size <= 0): batch_size = shard.num_rows # We set this variable to True after processing the first example/batch in # `apply_function_on_filtered_inputs` if the map function returns a dict. # If set to False, no new arrow table will be created update_data = None format_kwargs = shard._format_kwargs.copy() # Lazy formatting is only available for the default format (None/python) if not input_columns and shard._format_type is None: format_kwargs["lazy"] = True input_formatter = get_formatter( shard._format_type, features=shard.features, **format_kwargs, ) class NumExamplesMismatchError(Exception): pass def validate_function_output(processed_inputs, indices): """Validate output of the map function.""" if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)): raise TypeError( f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects." ) elif isinstance(indices, list) and isinstance(processed_inputs, Mapping): allowed_batch_return_types = (list, np.ndarray, pd.Series) if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl allowed_batch_return_types += (pl.Series, pl.DataFrame) if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf allowed_batch_return_types += (tf.Tensor,) if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch allowed_batch_return_types += (torch.Tensor,) if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp allowed_batch_return_types += (jnp.ndarray,) all_dict_values_are_lists = all( isinstance(value, allowed_batch_return_types) for value in processed_inputs.values() ) if all_dict_values_are_lists is False: raise TypeError( f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`." ) def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0): """Utility to apply the function on a selection of columns.""" nonlocal update_data inputs = format_table( pa_inputs, 0 if not batched else range(pa_inputs.num_rows), format_columns=input_columns, formatter=input_formatter, ) fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] if offset == 0: effective_indices = indices else: effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset additional_args = () if with_indices: additional_args += (effective_indices,) if with_rank: additional_args += (rank,) processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) if isinstance(processed_inputs, LazyDict): processed_inputs = { k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format } returned_lazy_dict = True else: returned_lazy_dict = False if update_data is None: # Check if the function returns updated examples update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)) validate_function_output(processed_inputs, indices) if not update_data: return None # Nothing to update, let's move on if shard._format_type or input_columns: # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release) inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns())) elif isinstance(inputs, LazyDict): inputs_to_merge = { k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items() } else: inputs_to_merge = inputs if remove_columns is not None: for column in remove_columns: # `function` can modify input in-place causing column to be already removed. if column in inputs_to_merge: inputs_to_merge.pop(column) if returned_lazy_dict and column in processed_inputs: processed_inputs.pop(column) if check_same_num_examples: input_num_examples = len(pa_inputs) processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))]) if input_num_examples != processed_inputs_num_examples: raise NumExamplesMismatchError() if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping): # The .map() transform *updates* the dataset: # the output dictionary contains both the the input data and the output data. # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently. return {**inputs_to_merge, **processed_inputs} else: return processed_inputs def init_buffer_and_writer(): # Prepare output buffer and batched writer in memory or on file if we update the table writer_features = features if writer_features is None: writer_features = shard.features update_features = True else: update_features = False if keep_in_memory or cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( features=writer_features, stream=buf_writer, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) else: buf_writer = None logger.info(f"Caching processed dataset at {cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False) writer = ArrowWriter( features=writer_features, path=tmp_file.name, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) return buf_writer, writer, tmp_file num_examples_progress_update = 0 # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer` buf_writer, writer, tmp_file = None, None, None # Check if Polars is available and import it if so if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl # Optionally initialize the writer as a context manager with contextlib.ExitStack() as stack: try: arrow_formatted_shard = shard.with_format("arrow") # Loop over single examples or batches and write to buffer/file if examples are to be updated if not batched: shard_iterable = enumerate(arrow_formatted_shard) else: num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size shard_iterable = zip( range(0, num_rows, batch_size), arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch), ) if not batched: _time = time.time() for i, example in shard_iterable: example = apply_function_on_filtered_inputs(example, i, offset=offset) if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(example, pa.Table): writer.write_row(example) elif isinstance(example, pd.DataFrame): writer.write_row(pa.Table.from_pandas(example)) elif ( config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(example, pl.DataFrame) ): writer.write_row(example.to_arrow()) else: writer.write(example) num_examples_progress_update += 1 if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield rank, False, num_examples_progress_update num_examples_progress_update = 0 else: _time = time.time() for i, batch in shard_iterable: num_examples_in_batch = len(batch) indices = list( range(*(slice(i, i + batch_size).indices(shard.num_rows))) ) # Something simpler? try: batch = apply_function_on_filtered_inputs( batch, indices, check_same_num_examples=len(shard.list_indexes()) > 0, offset=offset, ) except NumExamplesMismatchError: raise DatasetTransformationNotAllowedError( "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it." ) from None if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(batch, pa.Table): writer.write_table(batch) elif isinstance(batch, pd.DataFrame): writer.write_table(pa.Table.from_pandas(batch)) elif ( config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(batch, pl.DataFrame) ): writer.write_table(batch.to_arrow()) else: writer.write_batch(batch) num_examples_progress_update += num_examples_in_batch if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield rank, False, num_examples_progress_update num_examples_progress_update = 0 if update_data and writer is not None: writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file except (Exception, KeyboardInterrupt): yield rank, False, num_examples_progress_update if update_data: if writer is not None: writer.finalize() if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise yield rank, False, num_examples_progress_update if update_data and tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_file_name, 0o666 & ~umask) if update_data: # Create new Dataset from buffer or file info = shard.info.copy() info.features = writer._features info.task_templates = None if buf_writer is None: yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split) else: yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split) else: yield rank, True, shard inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1" ) def filter( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """Apply a filter function to all the elements in the table in batches and update the table so that the dataset only includes examples according to the filter function. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, defaults to an always `True` function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched = True`. If `batched = False`, one example per batch is passed to `function`. If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. fn_kwargs (`dict`, *optional*): Keyword arguments to be passed to `function`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. suffix_template (`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default `_{rank:05d}_of_{num_proc:05d}`). new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while filtering examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.filter(lambda x: x["label"] == 1) Dataset({ features: ['text', 'label'], num_rows: 533 }) ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`" ) if function is None: function = lambda x: True # noqa: E731 if len(self) == 0: return self indices = self.map( function=partial( get_indices_from_mask_function, function, batched, with_indices, with_rank, input_columns, self._indices, ), with_indices=True, with_rank=True, features=Features({"indices": Value("uint64")}), batched=True, batch_size=batch_size, remove_columns=self.column_names, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, suffix_template=suffix_template, new_fingerprint=new_fingerprint, input_columns=input_columns, desc=desc or "Filter", ) new_dataset = copy.deepcopy(self) new_dataset._indices = indices.data new_dataset._fingerprint = new_fingerprint return new_dataset def flatten_indices( self, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, num_proc: Optional[int] = None, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create and cache a new Dataset by flattening the indices mapping. Args: keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_name (`str`, *optional*, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific [`Features`] to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Allow null values in the table. num_proc (`int`, optional, default `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ return self.map( batched=True, # for speed keep_in_memory=keep_in_memory, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, new_fingerprint=new_fingerprint, desc="Flattening the indices", num_proc=num_proc, ) def _new_dataset_with_indices( self, indices_cache_file_name: Optional[str] = None, indices_buffer: Optional[pa.Buffer] = None, fingerprint: Optional[str] = None, ) -> "Dataset": """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the current Dataset. """ if indices_cache_file_name is None and indices_buffer is None: raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.") if fingerprint is None: raise ValueError("please specify a fingerprint for the dataset with indices") if indices_cache_file_name is not None: indices_table = MemoryMappedTable.from_file(indices_cache_file_name) else: indices_table = InMemoryTable.from_buffer(indices_buffer) # Return new Dataset object # don't forget to copy the objects return Dataset( self._data, info=self.info.copy(), split=self.split, indices_table=indices_table, fingerprint=fingerprint, ) def select( self, indices: Iterable, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows selected following the list/array of indices. Args: indices (`range`, `list`, `iterable`, `ndarray` or `Series`): Range, list or 1D-array of integer indices for indexing. If the indices correspond to a contiguous range, the Arrow table is simply sliced. However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient, but still faster than recreating an Arrow table made of the requested rows. keep_in_memory (`bool`, defaults to `False`): Keep the indices mapping in memory instead of writing it to a cache file. indices_cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.select(range(4)) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # If indices is a PyArrow array, we convert to NumPy if isinstance(indices, (pa.Array, pa.ChunkedArray)): indices = indices.to_numpy().astype(np.int64) # Convert generator objects to lists if isinstance(indices, Iterator): indices = list(indices) # If the indices are contiguous, simply slice the arrow table if isinstance(indices, range): if _is_range_contiguous(indices) and indices.start >= 0: start, length = indices.start, indices.stop - indices.start return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) else: try: start = next(iter(indices)) except StopIteration: # if `indices` is an empty iterable, we return an empty dataset return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) if start >= 0: counter_from_start = itertools.count(start=start) if all(i == j for i, j in zip(indices, counter_from_start)): length = next(counter_from_start) - start return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) # If not contiguous, we need to create a new indices mapping return self._select_with_indices_mapping( indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) def _select_contiguous( self, start: int, length: int, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows from a contiguous slice of data. The slice is defined by that start index and its length. Args: start (`int`): start index. length (`int`): length of the slice to select. new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds._select_contiguous(0, 4) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self _check_valid_indices_value(start, len(self)) _check_valid_indices_value(start + length - 1, len(self)) if self._indices is None or length == 0: return Dataset( self.data.slice(start, length), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint, ) else: return Dataset( self.data, info=self.info.copy(), split=self.split, indices_table=self._indices.slice(start, length), fingerprint=new_fingerprint, ) def _select_with_indices_mapping( self, indices: Iterable, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows selected following the list/array of indices. The new dataset is made by creating a new indices mapping on top of the main arrow table. Args: indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing. keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file. indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds._select_with_indices_mapping(range(4)) Dataset({ features: ['text', 'label'], num_rows: 4 }) ``` """ if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Prepare the writer for our indices arrow table if keep_in_memory or indices_cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) else: buf_writer = None logger.info(f"Caching indices mapping at {indices_cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False) writer = ArrowWriter( path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) indices = indices if isinstance(indices, list) else list(indices) size = len(self) if indices: _check_valid_indices_value(int(max(indices)), size=size) _check_valid_indices_value(int(min(indices)), size=size) else: return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) indices_array = pa.array(indices, type=pa.uint64()) # Check if we need to convert indices if self._indices is not None: indices_array = self._indices.column(0).take(indices_array) indices_table = pa.Table.from_arrays([indices_array], names=["indices"]) with writer: try: writer.write_table(indices_table) writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file except (Exception, KeyboardInterrupt): if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise if tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, indices_cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(indices_cache_file_name, 0o666 & ~umask) # Return new Dataset object if buf_writer is None: return self._new_dataset_with_indices( indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint ) else: return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint) def sort( self, column_names: Union[str, Sequence_[str]], reverse: Union[bool, Sequence_[bool]] = False, kind="deprecated", null_placement: str = "at_end", keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset sorted according to a single or multiple columns. Args: column_names (`Union[str, Sequence[str]]`): Column name(s) to sort by. reverse (`Union[bool, Sequence[bool]]`, defaults to `False`): If `True`, sort by descending order rather than ascending. If a single bool is provided, the value is applied to the sorting of all column names. Otherwise a list of bools with the same length and order as column_names must be provided. kind (`str`, *optional*): Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`, The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general, the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility. <Deprecated version="2.8.0"> `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0. </Deprecated> null_placement (`str`, defaults to `at_end`): Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last` <Added version="1.14.2"/> keep_in_memory (`bool`, defaults to `False`): Keep the sorted indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the sorted indices can be identified, use it instead of recomputing. indices_cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the sorted indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. Higher value gives smaller cache files, lower value consume less temporary memory. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes', split='validation') >>> ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] >>> sorted_ds = ds.sort('label') >>> sorted_ds['label'][:10] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False]) >>> another_sorted_ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Deprecation warning if kind != "deprecated": warnings.warn( "'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.", category=FutureWarning, ) # Check proper format of and for duplicates in column_names if isinstance(column_names, str): column_names = [column_names] # Check proper format and length of reverse if not isinstance(reverse, bool): if len(reverse) != len(column_names): raise ValueError( "Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'." ) else: reverse = [reverse] * len(column_names) # Check whether column name(s) exist in dataset for column in column_names: if not isinstance(column, str) or column not in self._data.column_names: raise ValueError( f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}" ) # Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability if null_placement not in ["at_start", "at_end"]: if null_placement == "first": null_placement = "at_start" elif null_placement == "last": null_placement = "at_end" else: raise ValueError( f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'." ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) sort_table = query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ) sort_keys = [ (col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse) ] indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement) return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"] ) def shuffle( self, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new Dataset where the rows are shuffled. Currently shuffling uses numpy random generators. You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping. However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower. This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. This may take a lot of time depending of the size of your dataset though: ```python my_dataset[0] # fast my_dataset = my_dataset.shuffle(seed=42) my_dataset[0] # up to 10x slower my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data my_dataset[0] # fast again ``` In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`]. It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal: ```python my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128) for example in enumerate(my_iterable_dataset): # fast pass shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100) for example in enumerate(shuffled_iterable_dataset): # as fast as before pass ``` Args: seed (`int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. If `None`, then fresh, unpredictable entropy will be pulled from the OS. If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). keep_in_memory (`bool`, default `False`): Keep the shuffled indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the shuffled indices can be identified, use it instead of recomputing. indices_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the shuffled indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # set a seed >>> shuffled_ds = ds.shuffle(seed=42) >>> shuffled_ds['label'][:10] [1, 0, 1, 1, 0, 0, 0, 0, 0, 0] ``` """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if seed is not None and generator is not None: raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.") if generator is not None and not isinstance(generator, np.random.Generator): raise ValueError("The provided generator must be an instance of numpy.random.Generator") load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if generator is None: if seed is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) permutation = generator.permutation(len(self)) return self.select( indices=permutation, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) inplace=False, randomized_function=True, fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"], ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"], ) def train_test_split( self, test_size: Union[float, int, None] = None, train_size: Union[float, int, None] = None, shuffle: bool = True, stratify_by_column: Optional[str] = None, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, train_indices_cache_file_name: Optional[str] = None, test_indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, train_new_fingerprint: Optional[str] = None, test_new_fingerprint: Optional[str] = None, ) -> "DatasetDict": """Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits). Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`. This method is similar to scikit-learn `train_test_split`. Args: test_size (`numpy.random.Generator`, *optional*): Size of the test split If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split. If `int`, represents the absolute number of test samples. If `None`, the value is set to the complement of the train size. If `train_size` is also `None`, it will be set to `0.25`. train_size (`numpy.random.Generator`, *optional*): Size of the train split If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split. If `int`, represents the absolute number of train samples. If `None`, the value is automatically set to the complement of the test size. shuffle (`bool`, *optional*, defaults to `True`): Whether or not to shuffle the data before splitting. stratify_by_column (`str`, *optional*, defaults to `None`): The column name of labels to be used to perform stratified split of data. seed (`int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. If `None`, then fresh, unpredictable entropy will be pulled from the OS. If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). keep_in_memory (`bool`, defaults to `False`): Keep the splits indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the splits indices can be identified, use it instead of recomputing. train_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the train split indices instead of the automatically generated cache file name. test_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the test split indices instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. train_new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the train set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments test_new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the test set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds = ds.train_test_split(test_size=0.2, shuffle=True) DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 852 }) test: Dataset({ features: ['text', 'label'], num_rows: 214 }) }) # set a seed >>> ds = ds.train_test_split(test_size=0.2, seed=42) # stratified split >>> ds = load_dataset("imdb",split="train") Dataset({ features: ['text', 'label'], num_rows: 25000 }) >>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label") DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 20000 }) test: Dataset({ features: ['text', 'label'], num_rows: 5000 }) }) ``` """ from .dataset_dict import DatasetDict # import here because of circular dependency if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return DatasetDict({"train": self, "test": self}) if test_size is None and train_size is None: test_size = 0.25 # Safety checks similar to scikit-learn's ones. # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750) n_samples = len(self) if ( isinstance(test_size, int) and (test_size >= n_samples or test_size <= 0) or isinstance(test_size, float) and (test_size <= 0 or test_size >= 1) ): raise ValueError( f"test_size={test_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if ( isinstance(train_size, int) and (train_size >= n_samples or train_size <= 0) or isinstance(train_size, float) and (train_size <= 0 or train_size >= 1) ): raise ValueError( f"train_size={train_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if train_size is not None and not isinstance(train_size, (int, float)): raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}") if test_size is not None and not isinstance(test_size, (int, float)): raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}") if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1: raise ValueError( f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)" " range. Reduce test_size and/or train_size." ) if isinstance(test_size, float): n_test = ceil(test_size * n_samples) elif isinstance(test_size, int): n_test = float(test_size) if isinstance(train_size, float): n_train = floor(train_size * n_samples) elif isinstance(train_size, int): n_train = float(train_size) if train_size is None: n_train = n_samples - n_test elif test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError( f"The sum of train_size and test_size = {n_train + n_test}, " "should be smaller than the number of " f"samples {n_samples}. Reduce test_size and/or " "train_size." ) n_train, n_test = int(n_train), int(n_test) if n_train == 0: raise ValueError( f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the " "resulting train set will be empty. Adjust any of the " "aforementioned parameters." ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if generator is None and shuffle is True: if seed is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if train_indices_cache_file_name is None or test_indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args if train_indices_cache_file_name is None: train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint) if test_indices_cache_file_name is None: test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint) if ( os.path.exists(train_indices_cache_file_name) and os.path.exists(test_indices_cache_file_name) and load_from_cache_file ): logger.info( f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}" ) return DatasetDict( { "train": self._new_dataset_with_indices( fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name ), "test": self._new_dataset_with_indices( fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name ), } ) if not shuffle: if stratify_by_column is not None: raise ValueError("Stratified train/test split is not implemented for `shuffle=False`") train_indices = np.arange(n_train) test_indices = np.arange(n_train, n_train + n_test) else: # stratified partition if stratify_by_column is not None: if stratify_by_column not in self._info.features.keys(): raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}") if not isinstance(self._info.features[stratify_by_column], ClassLabel): raise ValueError( f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}." ) try: train_indices, test_indices = next( stratified_shuffle_split_generate_indices( self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator ) ) except Exception as error: if str(error) == "Minimum class count error": raise ValueError( f"The least populated class in {stratify_by_column} column has only 1" " member, which is too few. The minimum" " number of groups for any class cannot" " be less than 2." ) else: raise error # random partition else: permutation = generator.permutation(len(self)) test_indices = permutation[:n_test] train_indices = permutation[n_test : (n_test + n_train)] train_split = self.select( indices=train_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=train_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=train_new_fingerprint, ) test_split = self.select( indices=test_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=test_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=test_new_fingerprint, ) return DatasetDict({"train": train_split, "test": test_split}) def shard( self, num_shards: int, index: int, contiguous: bool = False, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, ) -> "Dataset": """Return the `index`-nth shard from dataset split into `num_shards` pieces. This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose index mod `n = i`. `dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks, so it can be easily concatenated back together after processing. If `n % i == l`, then the first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`. `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return a dataset with the same order as the original. Be sure to shard before using any randomizing operator (such as `shuffle`). It is best if the shard operator is used early in the dataset pipeline. Args: num_shards (`int`): How many shards to split the dataset into. index (`int`): Which shard to select and return. contiguous: (`bool`, defaults to `False`): Whether to select contiguous blocks of indices for shards. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. indices_cache_file_name (`str`, *optional*): Provide the name of a path for the cache file. It is used to store the indices of each shard instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds Dataset({ features: ['text', 'label'], num_rows: 1066 }) >>> ds.shard(num_shards=2, index=0) Dataset({ features: ['text', 'label'], num_rows: 533 }) ``` """ if not 0 <= index < num_shards: raise ValueError("index should be in [0, num_shards-1]") if contiguous: div = len(self) // num_shards mod = len(self) % num_shards start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) indices = range(start, end) else: indices = np.arange(index, len(self), num_shards) return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, ) def export( self, filename: str, format: str = "tfrecord", ): """Writes the Arrow dataset to a TFRecord file. The dataset must already be in tensorflow format. The records will be written with keys from `dataset._format_columns`. Args: filename (`str`): The filename, including the `.tfrecord` extension, to write to. format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as TFRecords are the only option. This enables a more flexible function signature later. """ try: import tensorflow as tf # noqa: F401 except ImportError: logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.") # From https://www.tensorflow.org/tutorials/load_data/tfrecord def _bytes_feature(values): """Returns a bytes_list from a list of string / byte.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=values)) def _float_feature(values): """Returns a float_list from a list of float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=values)) def _int64_feature(values): """Returns an int64_list from a list of bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature": """Typechecks `values` and returns the corresponding tf.train.Feature.""" if isinstance(values, list): if values and isinstance(values[0], str): return _bytes_feature([v.encode() for v in values]) else: raise ValueError(f"values={values} is empty or contains items that cannot be serialized") elif isinstance(values, np.ndarray): if values.dtype == np.dtype(float): return _float_feature(values) elif values.dtype == np.int64: return _int64_feature(values) elif values.dtype == np.dtype(str) or ( values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str) ): return _bytes_feature([v.encode() for v in values]) else: raise ValueError( f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized" ) elif hasattr(values, "dtype"): if np.issubdtype(values.dtype, np.floating): return _float_feature([values.item()]) elif np.issubdtype(values.dtype, np.integer): return _int64_feature([values.item()]) elif np.issubdtype(values.dtype, str): return _bytes_feature([values.item().encode()]) else: raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized") else: raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized") def serialize_example(ex): feature = {key: _feature(value) for key, value in ex.items()} example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(ex): tf_string = tf.py_function(serialize_example, (ex,), tf.string) return tf.reshape(tf_string, ()) def generator(): for ex in self: yield serialize_example(ex) if self._format_type != "numpy": raise ValueError("Dataset format must be numpy before exporting") if not filename.endswith(".tfrecord"): raise ValueError("filename {filename} must end with .tfrecord") tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=()) writer = tf.data.experimental.TFRecordWriter(filename) logger.info(f"Writing TFRecord to {filename}") writer.write(tf_dataset) logger.info(f"Finished writing TFRecord to {filename}") self = None # delete the dataset reference used by tf_dataset def to_csv( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **to_csv_kwargs, ) -> int: """Exports the dataset to csv Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.csv`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.csv`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. `batch_size` in this case defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **to_csv_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html). <Changed version="2.10.0"> Now, `index` defaults to `False` if not specified. If you would like to write the index, pass `index=True` and also set a name for the index column by passing `index_label`. </Changed> Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_csv("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetWriter return CsvDatasetWriter( self, path_or_buf, batch_size=batch_size, num_proc=num_proc, storage_options=storage_options, **to_csv_kwargs, ).write() def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]: """Returns the dataset as a Python dict. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). <Deprecated version="2.11.0"> Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead. </Deprecated> batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `dict` or `Iterator[dict]` Example: ```py >>> ds.to_dict() ``` """ if batched != "deprecated": warnings.warn( "'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.", FutureWarning, ) else: batched = False if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pydict() else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices, ).to_pydict() for offset in range(0, len(self), batch_size) ) def to_list(self) -> list: """Returns the dataset as a Python list. Returns: `list` Example: ```py >>> ds.to_list() ``` """ return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pylist() def to_json( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **to_json_kwargs, ) -> int: """Export the dataset to JSON Lines or JSON. Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.json`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.json`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (`int`, *optional*): Number of processes for multiprocessing. By default it doesn't use multiprocessing. `batch_size` in this case defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **to_json_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html). <Changed version="2.11.0"> Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`. If you would like to write the index, pass `index=True`. </Changed> Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_json("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetWriter return JsonDatasetWriter( self, path_or_buf, batch_size=batch_size, num_proc=num_proc, storage_options=storage_options, **to_json_kwargs, ).write() def to_pandas( self, batch_size: Optional[int] = None, batched: bool = False ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `pandas.DataFrame` or `Iterator[pandas.DataFrame]` Example: ```py >>> ds.to_pandas() ``` """ if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices, ).to_pandas(types_mapper=pandas_types_mapper) else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices, ).to_pandas(types_mapper=pandas_types_mapper) for offset in range(0, len(self), batch_size) ) def to_polars( self, batch_size: Optional[int] = None, batched: bool = False, schema_overrides: Optional[dict] = None, rechunk: bool = True, ) -> Union["pl.DataFrame", Iterator["pl.DataFrame"]]: """Returns the dataset as a `polars.DataFrame`. Can also return a generator for large datasets. Args: batched (`bool`): Set to `True` to return a generator that yields the dataset as batches of `batch_size` rows. Defaults to `False` (returns the whole datasets once). batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. Defaults to `genomicsml.datasets.config.DEFAULT_MAX_BATCH_SIZE`. schema_overrides (`dict`, *optional*): Support type specification or override of one or more columns; note that any dtypes inferred from the schema param will be overridden. rechunk (`bool`): Make sure that all data is in contiguous memory. Defaults to `True`. Returns: `polars.DataFrame` or `Iterator[polars.DataFrame]` Example: ```py >>> ds.to_polars() ``` """ if config.POLARS_AVAILABLE: import polars as pl if not batched: return pl.from_arrow( query_table( table=self._data, key=slice(0, len(self)), indices=self._indices if self._indices is not None else None, ), schema_overrides=schema_overrides, rechunk=rechunk, ) else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( pl.from_arrow( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices if self._indices is not None else None, ), schema_overrides=schema_overrides, rechunk=rechunk, ) for offset in range(0, len(self), batch_size) ) else: raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") def to_parquet( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, storage_options: Optional[dict] = None, **parquet_writer_kwargs, ) -> int: """Exports the dataset to parquet Args: path_or_buf (`PathLike` or `FileOrBuffer`): Either a path to a file (e.g. `file.parquet`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.parquet`), or a BinaryIO, where the dataset will be saved to in the specified format. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.19.0"/> **parquet_writer_kwargs (additional keyword arguments): Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`. Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_parquet("path/to/dataset/directory") ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetWriter return ParquetDatasetWriter( self, path_or_buf, batch_size=batch_size, storage_options=storage_options, **parquet_writer_kwargs ).write() def to_sql( self, name: str, con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], batch_size: Optional[int] = None, **sql_writer_kwargs, ) -> int: """Exports the dataset to a SQL database. Args: name (`str`): Name of SQL table. con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database. batch_size (`int`, *optional*): Size of the batch to load in memory and write at once. Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. **sql_writer_kwargs (additional keyword arguments): Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html). <Changed version="2.11.0"> Now, `index` defaults to `False` if not specified. If you would like to write the index, pass `index=True` and also set a name for the index column by passing `index_label`. </Changed> Returns: `int`: The number of records written. Example: ```py >>> # con provided as a connection URI string >>> ds.to_sql("data", "sqlite:///my_own_db.sql") >>> # con provided as a sqlite3 connection object >>> import sqlite3 >>> con = sqlite3.connect("my_own_db.sql") >>> with con: ... ds.to_sql("data", con) ``` """ # Dynamic import to avoid circular dependency from .io.sql import SqlDatasetWriter return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write() def _estimate_nbytes(self) -> int: dataset_nbytes = self.data.nbytes # Find decodable columns, because if there are any, we need to # adjust the dataset size computation (needed for sharding) to account for possible external files decodable_columns = [ k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True) ] if decodable_columns: # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples extra_nbytes = 0 def extra_nbytes_visitor(array, feature): nonlocal extra_nbytes if isinstance(feature, (Audio, Image)): for x in array.to_pylist(): if x is not None and x["bytes"] is None and x["path"] is not None: size = xgetsize(x["path"]) extra_nbytes += size extra_nbytes -= array.field("path").nbytes table = self.with_format("arrow")[:1000] table_visitor(table, extra_nbytes_visitor) extra_nbytes = extra_nbytes * len(self.data) / len(table) dataset_nbytes = dataset_nbytes + extra_nbytes if self._indices is not None: dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data) return dataset_nbytes def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int): for shard_idx, shard in enumerate(shards): for pa_table in shard.with_format("arrow").iter(batch_size): yield shard_idx, pa_table def _generate_tables_from_cache_file(filename: str): for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)): yield batch_idx, pa.Table.from_batches([batch]) def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset": """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`]. This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files. Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop). Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets. All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset. Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`]. This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough. To get the best speed performance, make sure your dataset doesn't have an indices mapping. If this is the case, the data are not read contiguously, which can be slow sometimes. You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset. Args: num_shards (`int`, default to `1`): Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly, and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example. Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk. Returns: [`datasets.IterableDataset`] Example: Basic usage: ```python >>> ids = ds.to_iterable_dataset() >>> for example in ids: ... pass ``` With lazy filtering and processing: ```python >>> ids = ds.to_iterable_dataset() >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset >>> for example in ids: ... pass ``` With sharding to enable efficient shuffling: ```python >>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating >>> for example in ids: ... pass ``` With a PyTorch DataLoader: ```python >>> import torch >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.filter(filter_fn).map(process_fn) >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating >>> for example in ids: ... pass ``` With a PyTorch DataLoader and shuffling: ```python >>> import torch >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating >>> for example in ids: ... pass ``` In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling ```python >>> from datasets.distributed import split_dataset_by_node >>> ids = ds.to_iterable_dataset(num_shards=512) >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating >>> for example in ids: ... pass ``` With shuffling and multiple epochs: ```python >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> for epoch in range(n_epochs): ... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating ... for example in ids: ... pass ``` Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups. """ from .iterable_dataset import ArrowExamplesIterable, IterableDataset if self._format_type is not None: raise NotImplementedError( "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset" ) if num_shards > len(self): raise ValueError( f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)." ) if self._indices is not None: logger.info( "Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. " "You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed." ) shards = ( [copy.deepcopy(self)] if num_shards == 1 else [ self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards) ] ) ex_iterable = ArrowExamplesIterable( Dataset._generate_tables_from_shards, kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE}, ) return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features)) def _push_parquet_shards_to_hub( self, repo_id: str, data_dir: str = "data", split: Optional[str] = None, token: Optional[str] = None, revision: Optional[str] = None, create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[int] = None, embed_external_files: bool = True, ) -> Tuple[str, str, int, int, List[str], int]: """Pushes the dataset shards as Parquet files to the hub. Returns: additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards uploaded_size (`int`): number of uploaded bytes to the repository dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression """ # Find decodable columns, because if there are any, we need to: # embed the bytes from the files in the shards decodable_columns = ( [k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)] if embed_external_files else [] ) dataset_nbytes = self._estimate_nbytes() if num_shards is None: max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) num_shards = int(dataset_nbytes / max_shard_size) + 1 num_shards = max(num_shards, 1) shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) if decodable_columns: def shards_with_embedded_external_files(shards): for shard in shards: format = shard.format shard = shard.with_format("arrow") shard = shard.map( embed_table_storage, batched=True, batch_size=1000, keep_in_memory=True, ) shard = shard.with_format(**format) yield shard shards = shards_with_embedded_external_files(shards) api = HfApi(endpoint=config.HF_ENDPOINT, token=token) uploaded_size = 0 additions = [] for index, shard in hf_tqdm( enumerate(shards), desc="Uploading the dataset shards", total=num_shards, ): shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet" buffer = BytesIO() shard.to_parquet(buffer) uploaded_size += buffer.tell() shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer) preupload_lfs_files( api, repo_id=repo_id, additions=[shard_addition], token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) additions.append(shard_addition) return additions, uploaded_size, dataset_nbytes def push_to_hub( self, repo_id: str, config_name: str = "default", set_default: Optional[bool] = None, split: Optional[str] = None, data_dir: Optional[str] = None, commit_message: Optional[str] = None, commit_description: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, revision: Optional[str] = None, branch="deprecated", create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[int] = None, embed_external_files: bool = True, ) -> CommitInfo: """Pushes the dataset to the hub as a Parquet dataset. The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`] data, the Parquet files will store the bytes of your images or audio files. You can disable this by setting `embed_external_files` to `False`. Args: repo_id (`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. config_name (`str`, defaults to "default"): The configuration name (or subset) of a dataset. Defaults to "default". set_default (`bool`, *optional*): Whether to set this configuration as the default one. Otherwise, the default configuration is the one named "default". split (`str`, *optional*): The name of the split that will be given to that dataset. Defaults to `self.split`. data_dir (`str`, *optional*): Directory name that will contain the uploaded data files. Defaults to the `config_name` if different from "default", else "data". <Added version="2.17.0"/> commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload dataset"`. commit_description (`str`, *optional*): Description of the commit that will be created. Additionally, description of the PR if a PR is created (`create_pr` is True). <Added version="2.16.0"/> private (`bool`, *optional*, defaults to `False`): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (`str`, *optional*): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with `huggingface-cli login`. Will raise an error if no token is passed and the user is not logged-in. revision (`str`, *optional*): Branch to push the uploaded files to. Defaults to the `"main"` branch. <Added version="2.15.0"/> branch (`str`, *optional*): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. <Deprecated version="2.15.0"> `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0. </Deprecated> create_pr (`bool`, *optional*, defaults to `False`): Whether to create a PR with the uploaded files or directly commit. <Added version="2.15.0"/> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). num_shards (`int`, *optional*): Number of shards to write. By default, the number of shards depends on `max_shard_size`. <Added version="2.8.0"/> embed_external_files (`bool`, defaults to `True`): Whether to embed file bytes in the shards. In particular, this will do the following before the push for the fields of type: - [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files. Return: huggingface_hub.CommitInfo Example: ```python >>> dataset.push_to_hub("<organization>/<dataset_id>") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True) >>> dataset.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB") >>> dataset.push_to_hub("<organization>/<dataset_id>", num_shards=1024) ``` If your dataset has multiple splits (e.g. train/validation/test): ```python >>> train_dataset.push_to_hub("<organization>/<dataset_id>", split="train") >>> val_dataset.push_to_hub("<organization>/<dataset_id>", split="validation") >>> # later >>> dataset = load_dataset("<organization>/<dataset_id>") >>> train_dataset = dataset["train"] >>> val_dataset = dataset["validation"] ``` If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): ```python >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en") >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr") >>> # later >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en") >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr") ``` """ if config_name == "data": raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.") if max_shard_size is not None and num_shards is not None: raise ValueError( "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." ) if split is None: split = str(self.split) if self.split is not None else "train" if not re.match(_split_re, split): raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") if branch != "deprecated": warnings.warn( "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'revision={branch}' instead.", FutureWarning, ) revision = branch api = HfApi(endpoint=config.HF_ENDPOINT, token=token) repo_url = api.create_repo( repo_id, token=token, repo_type="dataset", private=private, exist_ok=True, ) repo_id = repo_url.repo_id if revision is not None: api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) if not data_dir: data_dir = config_name if config_name != "default" else "data" # for backward compatibility additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub( repo_id=repo_id, data_dir=data_dir, split=split, token=token, revision=revision, max_shard_size=max_shard_size, num_shards=num_shards, create_pr=create_pr, embed_external_files=embed_external_files, ) # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) # and delete old split shards (if they exist) repo_with_dataset_card, repo_with_dataset_infos = False, False deletions, deleted_size = [], 0 repo_splits = [] # use a list to keep the order of the splits repo_files_to_add = [addition.path_in_repo for addition in additions] for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token): if repo_file.rfilename == config.REPOCARD_FILENAME: repo_with_dataset_card = True elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: repo_with_dataset_infos = True elif ( repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add ): deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) deleted_size += repo_file.size elif fnmatch.fnmatch( repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*") ): repo_split = string_to_dict( repo_file.rfilename, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED), )["split"] if repo_split not in repo_splits: repo_splits.append(repo_split) organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id) info_to_dump = self.info.copy() info_to_dump.download_checksums = None info_to_dump.download_size = uploaded_size info_to_dump.dataset_size = dataset_nbytes info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes info_to_dump.config_name = config_name info_to_dump.splits = SplitDict( {split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)} ) # get the info from the README to update them if repo_with_dataset_card: dataset_card_path = api.hf_hub_download( repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision ) dataset_card = DatasetCard.load(Path(dataset_card_path)) dataset_card_data = dataset_card.data metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data) if dataset_infos and config_name in dataset_infos: repo_info = dataset_infos[config_name] else: repo_info = None # get the deprecated dataset_infos.json to update them elif repo_with_dataset_infos: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None else: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() repo_info = None # update the total info to dump from existing info if repo_info is not None: logger.info("Updating downloaded metadata with the new split.") if repo_info.splits and list(repo_info.splits) != [split]: if self._info.features != repo_info.features: raise ValueError( f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}" ) if split in repo_info.splits: repo_info.download_size -= deleted_size repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0 repo_info.download_checksums = None repo_info.download_size = (repo_info.download_size or 0) + uploaded_size repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size repo_info.splits.pop(split, None) repo_info.splits[split] = SplitInfo( split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name ) info_to_dump = repo_info # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed if not metadata_configs and repo_splits: default_metadata_configs_to_dump = { "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits] } MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) # update the metadata configs if config_name in metadata_configs: metadata_config = metadata_configs[config_name] if "data_files" in metadata_config: data_files_to_dump = sanitize_patterns(metadata_config["data_files"]) else: data_files_to_dump = {} # add the new split data_files_to_dump[split] = [f"{data_dir}/{split}-*"] metadata_config_to_dump = { "data_files": [ { "split": _split, "path": _pattern[0] if len(_pattern) == 1 else _pattern, } for _split, _pattern in data_files_to_dump.items() ] } else: metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]} if set_default and config_name != "default": if metadata_configs: default_config_name = metadata_configs.get_default_config_name() if default_config_name == "default": raise ValueError( "There exists a configuration named 'default'. To set a different configuration as default, " "rename the 'default' one first." ) else: _ = metadata_configs[default_config_name].pop("default") metadata_config_to_dump["default"] = True # push to the deprecated dataset_infos.json if repo_with_dataset_infos: dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_infos[config_name] = asdict(info_to_dump) buffer = BytesIO() buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) additions.append( CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) ) # push to README DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card additions.append( CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) commit_message = commit_message if commit_message is not None else "Upload dataset" if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: commit_info = api.create_commit( repo_id, operations=additions + deletions, commit_message=commit_message, commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) else: logger.info( f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits." ) num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) for i in range(0, num_commits): operations = additions[ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT ] + (deletions if i == 0 else []) commit_info = api.create_commit( repo_id, operations=operations, commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) logger.info( f"Commit #{i+1} completed" + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "") + "." ) return commit_info def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str): """Add column to Dataset. <Added version="1.7"/> Args: name (`str`): Column name. column (`list` or `np.array`): Column data to be added. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> more_text = ds["text"] >>> ds.add_column(name="text_2", column=more_text) Dataset({ features: ['text', 'label', 'text_2'], num_rows: 1066 }) ``` """ column_table = InMemoryTable.from_pydict({name: column}) _check_column_names(self._data.column_names + column_table.column_names) dataset = self.flatten_indices() if self._indices is not None else self # Concatenate tables horizontally table = concat_tables([dataset._data, column_table], axis=1) # Update features info = dataset.info.copy() info.features.update(Features.from_arrow_schema(column_table.schema)) table = update_metadata_with_features(table, info.features) return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint) def add_faiss_index( self, column: str, index_name: Optional[str] = None, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. By default the index is done over the vectors of the specified column. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) Args: column (`str`): The column of the vectors to add to the index. index_name (`str`, *optional*): The `index_name`/identifier of the index. This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`]. By default it corresponds to `column`. device (`Union[int, List[int]]`, *optional*): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (`str`, *optional*): This is passed to the index factory of Faiss to create the index. Default index class is `IndexFlat`. metric_type (`int`, *optional*): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (`faiss.Index`, *optional*): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (`int`): Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`. <Added version="2.4.0"/> train_size (`int`, *optional*): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to `False`): Enable the verbosity of the Faiss index. dtype (`data-type`): The dtype of the numpy arrays that are indexed. Default is `np.float32`. Example: ```python >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']})) >>> ds_with_embeddings.add_faiss_index(column='embeddings') >>> # query >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10) >>> # save index >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> # load index >>> ds.load_faiss_index('embeddings', 'my_index.faiss') >>> # query >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10) ``` """ with self.formatted_as(type="numpy", columns=[column], dtype=dtype): super().add_faiss_index( column=column, index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose, ) return self def add_faiss_index_from_external_arrays( self, external_arrays: np.array, index_name: str, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of `external_arrays`. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) Args: external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`. It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`. index_name (`str`): The `index_name`/identifier of the index. This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`]. device (Optional `Union[int, List[int]]`, *optional*): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (`str`, *optional*): This is passed to the index factory of Faiss to create the index. Default index class is `IndexFlat`. metric_type (`int`, *optional*): Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (`faiss.Index`, *optional*): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (`int`, *optional*): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. <Added version="2.4.0"/> train_size (`int`, *optional*): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. dtype (`numpy.dtype`): The dtype of the numpy arrays that are indexed. Default is np.float32. """ super().add_faiss_index_from_external_arrays( external_arrays=external_arrays.astype(dtype), index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose, ) def add_elasticsearch_index( self, column: str, index_name: Optional[str] = None, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821 es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): """Add a text index using ElasticSearch for fast retrieval. This is done in-place. Args: column (`str`): The column of the documents to add to the index. index_name (`str`, *optional*): The `index_name`/identifier of the index. This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`Dataset.search`]. By default it corresponds to `column`. host (`str`, *optional*, defaults to `localhost`): Host of where ElasticSearch is running. port (`str`, *optional*, defaults to `9200`): Port of where ElasticSearch is running. es_client (`elasticsearch.Elasticsearch`, *optional*): The elasticsearch client used to create the index if host and port are `None`. es_index_name (`str`, *optional*): The elasticsearch index name used to create the index. es_index_config (`dict`, *optional*): The configuration of the elasticsearch index. Default config is: ``` { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } ``` Example: ```python >>> es_client = elasticsearch.Elasticsearch() >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index") >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10) ``` """ with self.formatted_as(type=None, columns=[column]): super().add_elasticsearch_index( column=column, index_name=index_name, host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config, ) return self def add_item(self, item: dict, new_fingerprint: str): """Add item to Dataset. <Added version="1.7"/> Args: item (`dict`): Item data to be added. Returns: [`Dataset`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} >>> ds = ds.add_item(new_review) >>> ds[-1] {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} ``` """ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()}) # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe" dset_features, item_features = _align_features( [self._info.features, Features.from_arrow_schema(item_table.schema)] ) # Cast to align the schemas of the tables and concatenate the tables table = concat_tables( [ self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data, item_table.cast(item_features.arrow_schema), ] ) if self._indices is None: indices_table = None else: item_indices_array = pa.array([len(self._data)], type=pa.uint64()) item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"]) indices_table = concat_tables([self._indices, item_indices_table]) info = self.info.copy() info.features.update(item_features) table = update_metadata_with_features(table, info.features) return Dataset( table, info=info, split=self.split, indices_table=indices_table, fingerprint=new_fingerprint, ) def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset": """Align the dataset's label ID and label name mapping to match an input `label2id` mapping. This is useful when you want to ensure that a model's predicted labels are aligned with the dataset. The alignment in done using the lowercase label names. Args: label2id (`dict`): The label name to ID mapping to align the dataset with. label_column (`str`): The column name of labels to align on. Example: ```python >>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2} >>> ds = load_dataset("glue", "mnli", split="train") >>> # mapping to align with >>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2} >>> ds_aligned = ds.align_labels_with_mapping(label2id, "label") ``` """ # Sanity checks if label_column not in self._data.column_names: raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).") label_feature = self._info.features[label_column] if not ( isinstance(label_feature, ClassLabel) or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel)) ): raise ValueError( f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}." ) # Sort input mapping by ID value to ensure the label names are aligned label2id = dict(sorted(label2id.items(), key=lambda item: item[1])) label_names = list(label2id.keys()) # Some label mappings use uppercase label names so we lowercase them during alignment label2id = {k.lower(): v for k, v in label2id.items()} int2str_function = ( label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str ) if isinstance(label_feature, ClassLabel): def process_label_ids(batch): dset_label_names = [ int2str_function(label_id).lower() if label_id is not None else None for label_id in batch[label_column] ] batch[label_column] = [ label2id[label_name] if label_name is not None else None for label_name in dset_label_names ] return batch else: def process_label_ids(batch): dset_label_names = [ [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq] for seq in batch[label_column] ] batch[label_column] = [ [label2id[label_name] if label_name is not None else None for label_name in seq] for seq in dset_label_names ] return batch features = self.features features[label_column] = ( ClassLabel(num_classes=len(label_names), names=label_names) if isinstance(label_feature, ClassLabel) else Sequence(ClassLabel(num_classes=len(label_names), names=label_names)) ) return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels") def _concatenate_map_style_datasets( dsets: List[Dataset], info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, axis: int = 0, ): """ Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`. When you concatenate on axis 0, missing data are filled with None values. Args: dsets (`List[datasets.Dataset]`): List of Datasets to concatenate. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. axis (``{0, 1}``, default ``0``, meaning over rows): Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns (horizontally). *New in version 1.6.0* Example: ```py >>> ds3 = _concatenate_map_style_datasets([ds1, ds2]) ``` """ # Ignore datasets with no rows if any(dset.num_rows > 0 for dset in dsets): dsets = [dset for dset in dsets if dset.num_rows > 0] else: # Return first dataset if all datasets are empty return dsets[0] # Perform checks (and a potentional cast if axis=0) if axis == 0: _check_if_features_can_be_aligned([dset.features for dset in dsets]) else: if not all(dset.num_rows == dsets[0].num_rows for dset in dsets): raise ValueError("Number of rows must match for all datasets") _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names]) # Find common format or reset format format = dsets[0].format if any(dset.format != format for dset in dsets): format = {} logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.") def apply_offset_to_indices_table(table, offset): if offset == 0: return table else: array = table["indices"] new_array = pc.add(array, pa.scalar(offset, type=pa.uint64())) return InMemoryTable.from_arrays([new_array], names=["indices"]) # Concatenate indices if they exist if any(dset._indices is not None for dset in dsets): if axis == 0: # Datasets with no indices tables are replaced with a dataset with an indices table in memory. # Applying an offset to an indices table also brings the table in memory. indices_tables = [] for i in range(len(dsets)): if dsets[i]._indices is None: dsets[i] = dsets[i]._select_with_indices_mapping(range(len(dsets[i]))) indices_tables.append(dsets[i]._indices) # An offset needs to be applied to the indices before concatenating offset = 0 for i in range(len(dsets)): indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset) offset += len(dsets[i]._data) # Concatenate indices indices_tables = [t for t in indices_tables if len(t) > 0] if indices_tables: indices_table = concat_tables(indices_tables) else: indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()})) else: if len(dsets) == 1: indices_table = dsets[0]._indices else: for i in range(len(dsets)): dsets[i] = dsets[i].flatten_indices() indices_table = None else: indices_table = None table = concat_tables([dset._data for dset in dsets], axis=axis) if axis == 0: features_list = _align_features([dset.features for dset in dsets]) else: features_list = [dset.features for dset in dsets] table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()}) # Concatenate infos if info is None: info = DatasetInfo.from_merge([dset.info for dset in dsets]) fingerprint = update_fingerprint( "".join(dset._fingerprint for dset in dsets), _concatenate_map_style_datasets, {"info": info, "split": split} ) # Make final concatenated dataset concatenated_dataset = Dataset( table, info=info, split=split, indices_table=indices_table, fingerprint=fingerprint, ) concatenated_dataset.set_format(**format) return concatenated_dataset class DatasetDict(dict): """A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)""" def _check_values_type(self): for dataset in self.values(): if not isinstance(dataset, Dataset): raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'") def _check_values_features(self): items = list(self.items()) for item_a, item_b in zip(items[:-1], items[1:]): if item_a[1].features != item_b[1].features: raise ValueError( f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}" ) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables for dataset in self.values(): if hasattr(dataset, "_data"): del dataset._data if hasattr(dataset, "_indices"): del dataset._indices def __getitem__(self, k) -> Dataset: if isinstance(k, (str, NamedSplit)) or len(self) == 0: return super().__getitem__(k) else: available_suggested_splits = [ split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self ] suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0] raise KeyError( f"Invalid key: {k}. Please first select a split. For example: " f"`my_dataset_dictionary['{suggested_split}'][{k}]`. " f"Available splits: {sorted(self)}" ) def data(self) -> Dict[str, Table]: """The Apache Arrow tables backing each split. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.data ``` """ self._check_values_type() return {k: dataset.data for k, dataset in self.items()} def cache_files(self) -> Dict[str, Dict]: """The cache files containing the Apache Arrow table backing each split. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.cache_files {'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}], 'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}], 'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]} ``` """ self._check_values_type() return {k: dataset.cache_files for k, dataset in self.items()} def num_columns(self) -> Dict[str, int]: """Number of columns in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.num_columns {'test': 2, 'train': 2, 'validation': 2} ``` """ self._check_values_type() return {k: dataset.num_columns for k, dataset in self.items()} def num_rows(self) -> Dict[str, int]: """Number of rows in each split of the dataset (same as :func:`datasets.Dataset.__len__`). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.num_rows {'test': 1066, 'train': 8530, 'validation': 1066} ``` """ self._check_values_type() return {k: dataset.num_rows for k, dataset in self.items()} def column_names(self) -> Dict[str, List[str]]: """Names of the columns in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.column_names {'test': ['text', 'label'], 'train': ['text', 'label'], 'validation': ['text', 'label']} ``` """ self._check_values_type() return {k: dataset.column_names for k, dataset in self.items()} def shape(self) -> Dict[str, Tuple[int]]: """Shape of each split of the dataset (number of columns, number of rows). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.shape {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)} ``` """ self._check_values_type() return {k: dataset.shape for k, dataset in self.items()} def flatten(self, max_depth=16) -> "DatasetDict": """Flatten the Apache Arrow Table of each split (nested features are flatten). Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad") >>> ds["train"].features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() DatasetDict({ train: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) validation: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 10570 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()}) def unique(self, column: str) -> Dict[str, List]: """Return a list of the unique elements in a column for each split. This is implemented in the low-level backend and as such, very fast. Args: column (`str`): column name (list all the column names with [`~datasets.Dataset.column_names`]) Returns: Dict[`str`, `list`]: Dictionary of unique elements in the given column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.unique("label") {'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]} ``` """ self._check_values_type() return {k: dataset.unique(column) for k, dataset in self.items()} def cleanup_cache_files(self) -> Dict[str, int]: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files. Return: `Dict` with the number of removed files for each split Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.cleanup_cache_files() {'test': 0, 'train': 0, 'validation': 0} ``` """ self._check_values_type() return {k: dataset.cleanup_cache_files() for k, dataset in self.items()} def __repr__(self): repr = "\n".join([f"{k}: {v}" for k, v in self.items()]) repr = re.sub(r"^", " " * 4, repr, 0, re.M) return f"DatasetDict({{\n{repr}\n}})" def cast(self, features: Features) -> "DatasetDict": """ Cast the dataset to a new set of features. The transformation is applied to all the datasets of the dataset dictionary. You can also remove a column using [`Dataset.map`] with `feature` but `cast` is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: features ([`Features`]): New features to cast the dataset to. The name and order of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds["train"].features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ self._check_values_type() return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) def cast_column(self, column: str, feature) -> "DatasetDict": """Cast column to feature for decoding. Args: column (`str`): Column name. feature ([`Feature`]): Target feature. Returns: [`DatasetDict`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ self._check_values_type() return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}) def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict": """ Remove one or several column(s) from each split in the dataset and the features associated to the column(s). The transformation is applied to all the splits of the dataset dictionary. You can also remove a column using [`Dataset.map`] with `remove_columns` but the present method is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.remove_columns("label") DatasetDict({ train: Dataset({ features: ['text'], num_rows: 8530 }) validation: Dataset({ features: ['text'], num_rows: 1066 }) test: Dataset({ features: ['text'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()}) def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict": """ Rename a column in the dataset and move the features associated to the original column under the new column name. The transformation is applied to all the datasets of the dataset dictionary. You can also rename a column using [`~Dataset.map`] with `remove_columns` but the present method: - takes care of moving the original features under the new column name. - doesn't copy the data to a new dataset and is thus much faster. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.rename_column("label", "label_new") DatasetDict({ train: Dataset({ features: ['text', 'label_new'], num_rows: 8530 }) validation: Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) test: Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict( { k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) for k, dataset in self.items() } ) def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. The transformation is applied to all the datasets of the dataset dictionary. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names. Returns: [`DatasetDict`]: A copy of the dataset with renamed columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) DatasetDict({ train: Dataset({ features: ['text_new', 'label_new'], num_rows: 8530 }) validation: Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) test: Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}) def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict": """Select one or several column(s) from each split in the dataset and the features associated to the column(s). The transformation is applied to all the splits of the dataset dictionary. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.select_columns("text") DatasetDict({ train: Dataset({ features: ['text'], num_rows: 8530 }) validation: Dataset({ features: ['text'], num_rows: 1066 }) test: Dataset({ features: ['text'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()}) def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict": """Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables. Args: column (`str`): The name of the column to cast. include_nulls (`bool`, defaults to `False`): Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. <Added version="1.14.2"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("boolq") >>> ds["train"].features {'answer': Value(dtype='bool', id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} >>> ds = ds.class_encode_column("answer") >>> ds["train"].features {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} ``` """ self._check_values_type() return DatasetDict( {k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()} ) def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set `__getitem__` return format (type and columns). The transformation is applied to all the datasets of the dataset dictionary. Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to False): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ self._check_values_type() old_format_type = {k: dataset._format_type for k, dataset in self.items()} old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()} old_format_columns = {k: dataset._format_columns for k, dataset in self.items()} old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()} try: self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: for k, dataset in self.items(): dataset.set_format( old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k] ) def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The format is set for every dataset in the dataset dictionary. Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to False): Keep un-formatted columns as well in the output (as python objects), **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted: `new formatted columns = (all columns - previously unformatted columns)` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True) >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds["train"].format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} ``` """ self._check_values_type() for dataset in self.values(): dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) def reset_format(self): """Reset `__getitem__` return format to python objects and all columns. The transformation is applied to all the datasets of the dataset dictionary. Same as `self.set_format()` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True) >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds["train"].format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} >>> ds.reset_format() >>> ds["train"].format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} ``` """ self._check_values_type() for dataset in self.values(): dataset.set_format() def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called. The transform is set for every dataset in the dataset dictionary As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format` Args: transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format` A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in ``__getitem__``. columns (`List[str]`, optional): columns to format in the output If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects) If set to True, then the other un-formatted columns are kept with the output of the transform. """ self._check_values_type() for dataset in self.values(): dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ) -> "DatasetDict": """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. The format is set for every dataset in the dataset dictionary. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects. Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds["train"].format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds["train"].format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'tensorflow'} ``` """ dataset = copy.deepcopy(self) dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) return dataset def with_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ) -> "DatasetDict": """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. The transform is set for every dataset in the dataset dictionary As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects. Args: transform (`Callable`, *optional*): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, *optional*): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to False): Keep un-formatted columns as well in the output (as python objects). If set to `True`, then the other un-formatted columns are kept with the output of the transform. Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> def encode(example): ... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt") >>> ds = ds.with_transform(encode) >>> ds["train"][0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432, 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119, 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190, 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117, 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137, 188, 1566, 7912, 14516, 6997, 119, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ dataset = copy.deepcopy(self) dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) return dataset def map( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, desc: Optional[str] = None, ) -> "DatasetDict": """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does updated examples). The transformation is applied to all the datasets of the dataset dictionary. Args: function (`callable`): with one of the following signature: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`, `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`[datasets.Features]`, *optional*, defaults to `None`): Use a specific [`Features`] to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function` num_proc (`int`, *optional*, defaults to `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> ds["train"][0:3]["text"] ['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'Review: effective but too-tepid biopic'] # process a batch of examples >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) # set number of processors >>> ds = ds.map(add_prefix, num_proc=4) ``` """ self._check_values_type() if cache_file_names is None: cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.map( function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, fn_kwargs=fn_kwargs, num_proc=num_proc, desc=desc, ) for k, dataset in self.items() } ) def filter( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, desc: Optional[str] = None, ) -> "DatasetDict": """Apply a filter function to all the elements in the table in batches and update the table so that the dataset only includes examples according to the filter function. The transformation is applied to all the datasets of the dataset dictionary. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, defaults to an always `True` function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if chaching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function` num_proc (`int`, *optional*, defaults to `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while filtering examples. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.filter(lambda x: x["label"] == 1) DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 4265 }) validation: Dataset({ features: ['text', 'label'], num_rows: 533 }) test: Dataset({ features: ['text', 'label'], num_rows: 533 }) }) ``` """ self._check_values_type() if cache_file_names is None: cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.filter( function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, desc=desc, ) for k, dataset in self.items() } ) def flatten_indices( self, keep_in_memory: bool = False, cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, num_proc: Optional[int] = None, new_fingerprint: Optional[str] = None, ) -> "DatasetDict": """Create and cache a new Dataset by flattening the indices mapping. Args: keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_names (`Dict[str, str]`, *optional*, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific [`Features`] to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Allow null values in the table. num_proc (`int`, optional, default `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ self._check_values_type() if cache_file_names is None: cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.flatten_indices( keep_in_memory=keep_in_memory, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, num_proc=num_proc, new_fingerprint=new_fingerprint, ) for k, dataset in self.items() } ) def sort( self, column_names: Union[str, Sequence[str]], reverse: Union[bool, Sequence[bool]] = False, kind="deprecated", null_placement: str = "at_end", keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, ) -> "DatasetDict": """Create a new dataset sorted according to a single or multiple columns. Args: column_names (`Union[str, Sequence[str]]`): Column name(s) to sort by. reverse (`Union[bool, Sequence[bool]]`, defaults to `False`): If `True`, sort by descending order rather than ascending. If a single bool is provided, the value is applied to the sorting of all column names. Otherwise a list of bools with the same length and order as column_names must be provided. kind (`str`, *optional*): Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`, The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general, the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility. <Deprecated version="2.8.0"> `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0. </Deprecated> null_placement (`str`, defaults to `at_end`): Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last` keep_in_memory (`bool`, defaults to `False`): Keep the sorted indices in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the sorted indices can be identified, use it instead of recomputing. indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. Higher value gives smaller cache files, lower value consume less temporary memory. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes') >>> ds['train']['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] >>> sorted_ds = ds.sort('label') >>> sorted_ds['train']['label'][:10] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False]) >>> another_sorted_ds['train']['label'][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` """ self._check_values_type() if indices_cache_file_names is None: indices_cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.sort( column_names=column_names, reverse=reverse, kind=kind, null_placement=null_placement, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, indices_cache_file_name=indices_cache_file_names[k], writer_batch_size=writer_batch_size, ) for k, dataset in self.items() } ) def shuffle( self, seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None, seed: Optional[int] = None, generators: Optional[Dict[str, np.random.Generator]] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None, writer_batch_size: Optional[int] = 1000, ) -> "DatasetDict": """Create a new Dataset where the rows are shuffled. The transformation is applied to all the datasets of the dataset dictionary. Currently shuffling uses numpy random generators. You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). Args: seeds (`Dict[str, int]` or `int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. If `None`, then fresh, unpredictable entropy will be pulled from the OS. If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. You can provide one `seed` per dataset in the dataset dictionary. seed (`int`, *optional*): A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided). generators (`Dict[str, *optional*, np.random.Generator]`): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). You have to provide one `generator` per dataset in the dataset dictionary. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. indices_cache_file_names (`Dict[str, str]`, *optional*): Provide the name of a path for the cache file. It is used to store the indices mappings instead of the automatically generated cache file name. You have to provide one `cache_file_name` per dataset in the dataset dictionary. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds["train"]["label"][:10] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # set a seed >>> shuffled_ds = ds.shuffle(seed=42) >>> shuffled_ds["train"]["label"][:10] [0, 1, 0, 1, 0, 0, 0, 0, 0, 0] ``` """ self._check_values_type() if seed is not None and seeds is not None: raise ValueError("Please specify seed or seeds, but not both") seeds = seed if seed is not None else seeds if seeds is None: seeds = {k: None for k in self} elif not isinstance(seeds, dict): seeds = {k: seeds for k in self} if generators is None: generators = {k: None for k in self} if indices_cache_file_names is None: indices_cache_file_names = {k: None for k in self} return DatasetDict( { k: dataset.shuffle( seed=seeds[k], generator=generators[k], keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, indices_cache_file_name=indices_cache_file_names[k], writer_batch_size=writer_batch_size, ) for k, dataset in self.items() } ) def save_to_disk( self, dataset_dict_path: PathLike, fs="deprecated", max_shard_size: Optional[Union[str, int]] = None, num_shards: Optional[Dict[str, int]] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, ): """ Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`. For [`Image`] and [`Audio`] data: All the Image() and Audio() data are stored in the arrow files. If you want to store paths or urls, please use the Value("string") type. Args: dataset_dict_path (`str`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be saved to. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"50MB"`). num_shards (`Dict[str, int]`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. You need to provide the number of shards for each dataset in the dataset dictionary. Use a dictionary to define a different num_shards for each split. <Added version="2.8.0"/> num_proc (`int`, *optional*, default `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.8.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Example: ```python >>> dataset_dict.save_to_disk("path/to/dataset/directory") >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8}) ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, _ = url_to_fs(dataset_dict_path, **(storage_options or {})) if num_shards is None: num_shards = {k: None for k in self} elif not isinstance(num_shards, dict): raise ValueError( "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}" ) fs.makedirs(dataset_dict_path, exist_ok=True) with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f: json.dump({"splits": list(self)}, f) for k, dataset in self.items(): dataset.save_to_disk( posixpath.join(dataset_dict_path, k), num_shards=num_shards.get(k), max_shard_size=max_shard_size, num_proc=num_proc, storage_options=storage_options, ) def load_from_disk( dataset_dict_path: PathLike, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None, ) -> "DatasetDict": """ Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`. Args: dataset_dict_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) of the dataset dict directory where the dataset dict will be loaded from. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem where the dataset will be saved to. <Deprecated version="2.8.0"> `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` </Deprecated> keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Returns: [`DatasetDict`] Example: ```py >>> ds = load_from_disk('path/to/dataset/directory') ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, dataset_dict_path = url_to_fs(dataset_dict_path, **(storage_options or {})) dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME) dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME) if not fs.isfile(dataset_dict_json_path): if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path): raise FileNotFoundError( f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`." ) with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f: splits = json.load(f)["splits"] dataset_dict = DatasetDict() for k in splits: dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k) dataset_dict[k] = Dataset.load_from_disk( dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options ) return dataset_dict def from_csv( path_or_paths: Dict[str, PathLike], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ) -> "DatasetDict": """Create [`DatasetDict`] from CSV file(s). Args: path_or_paths (`dict` of path-like): Path(s) of the CSV file(s). features ([`Features`], *optional*): Dataset features. cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`pandas.read_csv`]. Returns: [`DatasetDict`] Example: ```py >>> from datasets import DatasetDict >>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'}) ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetReader return CsvDatasetReader( path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() def from_json( path_or_paths: Dict[str, PathLike], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ) -> "DatasetDict": """Create [`DatasetDict`] from JSON Lines file(s). Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the JSON Lines file(s). features ([`Features`], *optional*): Dataset features. cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`JsonConfig`]. Returns: [`DatasetDict`] Example: ```py >>> from datasets import DatasetDict >>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'}) ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetReader return JsonDatasetReader( path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() def from_parquet( path_or_paths: Dict[str, PathLike], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, columns: Optional[List[str]] = None, **kwargs, ) -> "DatasetDict": """Create [`DatasetDict`] from Parquet file(s). Args: path_or_paths (`dict` of path-like): Path(s) of the CSV file(s). features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. columns (`List[str]`, *optional*): If not `None`, only these columns will be read from the file. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e'. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`ParquetConfig`]. Returns: [`DatasetDict`] Example: ```py >>> from datasets import DatasetDict >>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'}) ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetReader return ParquetDatasetReader( path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, **kwargs, ).read() def from_text( path_or_paths: Dict[str, PathLike], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ) -> "DatasetDict": """Create [`DatasetDict`] from text file(s). Args: path_or_paths (`dict` of path-like): Path(s) of the text file(s). features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`TextConfig`]. Returns: [`DatasetDict`] Example: ```py >>> from datasets import DatasetDict >>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'}) ``` """ # Dynamic import to avoid circular dependency from .io.text import TextDatasetReader return TextDatasetReader( path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "DatasetDict": self._check_values_type() return DatasetDict({k: dataset.prepare_for_task(task=task, id=id) for k, dataset in self.items()}) def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict": self._check_values_type() return DatasetDict( { k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column) for k, dataset in self.items() } ) def push_to_hub( self, repo_id, config_name: str = "default", set_default: Optional[bool] = None, data_dir: Optional[str] = None, commit_message: Optional[str] = None, commit_description: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, revision: Optional[str] = None, branch="deprecated", create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[Dict[str, int]] = None, embed_external_files: bool = True, ) -> CommitInfo: """Pushes the [`DatasetDict`] to the hub as a Parquet dataset. The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed. Each dataset split will be pushed independently. The pushed dataset will keep the original split names. The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`] data, the Parquet files will store the bytes of your images or audio files. You can disable this by setting `embed_external_files` to False. Args: repo_id (`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. config_name (`str`): Configuration name of a dataset. Defaults to "default". set_default (`bool`, *optional*): Whether to set this configuration as the default one. Otherwise, the default configuration is the one named "default". data_dir (`str`, *optional*): Directory name that will contain the uploaded data files. Defaults to the `config_name` if different from "default", else "data". <Added version="2.17.0"/> commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload dataset"`. commit_description (`str`, *optional*): Description of the commit that will be created. Additionally, description of the PR if a PR is created (`create_pr` is True). <Added version="2.16.0"/> private (`bool`, *optional*): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (`str`, *optional*): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with `huggingface-cli login`. Will raise an error if no token is passed and the user is not logged-in. revision (`str`, *optional*): Branch to push the uploaded files to. Defaults to the `"main"` branch. <Added version="2.15.0"/> branch (`str`, *optional*): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. <Deprecated version="2.15.0"> `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0. </Deprecated> create_pr (`bool`, *optional*, defaults to `False`): Whether to create a PR with the uploaded files or directly commit. <Added version="2.15.0"/> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"500MB"` or `"1GB"`). num_shards (`Dict[str, int]`, *optional*): Number of shards to write. By default, the number of shards depends on `max_shard_size`. Use a dictionary to define a different num_shards for each split. <Added version="2.8.0"/> embed_external_files (`bool`, defaults to `True`): Whether to embed file bytes in the shards. In particular, this will do the following before the push for the fields of type: - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files. Return: huggingface_hub.CommitInfo Example: ```python >>> dataset_dict.push_to_hub("<organization>/<dataset_id>") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True) >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8}) ``` If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): ```python >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en") >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr") >>> # later >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en") >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr") ``` """ if num_shards is None: num_shards = {k: None for k in self} elif not isinstance(num_shards, dict): raise ValueError( "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}" ) if branch != "deprecated": warnings.warn( "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'revision={branch}' instead.", FutureWarning, ) revision = branch self._check_values_type() self._check_values_features() total_uploaded_size = 0 total_dataset_nbytes = 0 info_to_dump: DatasetInfo = next(iter(self.values())).info.copy() info_to_dump.config_name = config_name info_to_dump.splits = SplitDict() for split in self.keys(): if not re.match(_split_re, split): raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") api = HfApi(endpoint=config.HF_ENDPOINT, token=token) repo_url = api.create_repo( repo_id, token=token, repo_type="dataset", private=private, exist_ok=True, ) repo_id = repo_url.repo_id if revision is not None: api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) if not data_dir: data_dir = config_name if config_name != "default" else "data" # for backward compatibility additions = [] for split in self.keys(): logger.info(f"Pushing split {split} to the Hub.") # The split=key needs to be removed before merging split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub( repo_id, data_dir=data_dir, split=split, token=token, revision=revision, create_pr=create_pr, max_shard_size=max_shard_size, num_shards=num_shards.get(split), embed_external_files=embed_external_files, ) additions += split_additions total_uploaded_size += uploaded_size total_dataset_nbytes += dataset_nbytes info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split])) info_to_dump.download_checksums = None info_to_dump.download_size = total_uploaded_size info_to_dump.dataset_size = total_dataset_nbytes info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) # and delete old split shards (if they exist) repo_with_dataset_card, repo_with_dataset_infos = False, False repo_splits = [] # use a list to keep the order of the splits deletions = [] repo_files_to_add = [addition.path_in_repo for addition in additions] for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token): if repo_file.rfilename == config.REPOCARD_FILENAME: repo_with_dataset_card = True elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: repo_with_dataset_infos = True elif ( repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys())) and repo_file.rfilename not in repo_files_to_add ): deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) elif fnmatch.fnmatch( repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*") ): repo_split = string_to_dict( repo_file.rfilename, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED), )["split"] if repo_split not in repo_splits: repo_splits.append(split) # get the info from the README to update them if repo_with_dataset_card: dataset_card_path = api.hf_hub_download( repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision ) dataset_card = DatasetCard.load(Path(dataset_card_path)) dataset_card_data = dataset_card.data metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) # get the deprecated dataset_infos.json to update them elif repo_with_dataset_infos: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() else: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed if not metadata_configs and repo_splits: default_metadata_configs_to_dump = { "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits] } MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) metadata_config_to_dump = { "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()], } if set_default and config_name != "default": if metadata_configs: default_config_name = metadata_configs.get_default_config_name() if default_config_name == "default": raise ValueError( "There exists a configuration named 'default'. To set a different configuration as default, " "rename the 'default' one first." ) else: _ = metadata_configs[default_config_name].pop("default") metadata_config_to_dump["default"] = True # push to the deprecated dataset_infos.json if repo_with_dataset_infos: dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_infos[config_name] = asdict(info_to_dump) buffer = BytesIO() buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) additions.append( CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) ) # push to README DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card additions.append( CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) commit_message = commit_message if commit_message is not None else "Upload dataset" if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: commit_info = api.create_commit( repo_id, operations=additions + deletions, commit_message=commit_message, commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) else: logger.info( f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits." ) num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) for i in range(0, num_commits): operations = additions[ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT ] + (deletions if i == 0 else []) commit_info = api.create_commit( repo_id, operations=operations, commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) logger.info( f"Commit #{i+1} completed" + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "") + "." ) return commit_info class IterableDatasetDict(dict): def __repr__(self): repr = "\n".join([f"{k}: {v}" for k, v in self.items()]) repr = re.sub(r"^", " " * 4, repr, 0, re.M) return f"IterableDatasetDict({{\n{repr}\n}})" def with_format( self, type: Optional[str] = None, ) -> "IterableDatasetDict": """ Return a dataset with the specified format. This method only supports the "torch" format for now. The format is set to all the datasets of the dataset dictionary. Args: type (`str`, *optional*, defaults to `None`): If set to "torch", the returned dataset will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") >>> def encode(example): ... return tokenizer(examples["text"], truncation=True, padding="max_length") >>> ds = ds.map(encode, batched=True, remove_columns=["text"]) >>> ds = ds.with_format("torch") ``` """ return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()}) def map( self, function: Optional[Callable] = None, with_indices: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: int = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, fn_kwargs: Optional[dict] = None, ) -> "IterableDatasetDict": """ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. If your function returns a column that already exists, then it overwrites it. The function is applied on-the-fly on the examples when iterating over the dataset. The transformation is applied to all the datasets of the dataset dictionary. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`. - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`, *optional*, defaults to `None`): Function applied on-the-fly on the examples when you iterate on the dataset. It must have one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the `batch_size` should be dropped instead of being processed by the function. remove_columns (`[List[str]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function` Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> next(iter(ds["train"])) {'label': 1, 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return IterableDatasetDict( { k: dataset.map( function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs, ) for k, dataset in self.items() } ) def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, ) -> "IterableDatasetDict": """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. The filtering is done on-the-fly when iterating over the dataset. The filtering is applied to all the datasets of the dataset dictionary. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` If no function is provided, defaults to an always True function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function` Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.filter(lambda x: x["label"] == 0) >>> list(ds["train"].take(3)) [{'label': 0, 'text': 'Review: simplistic , silly and tedious .'}, {'label': 0, 'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, {'label': 0, 'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] ``` """ return IterableDatasetDict( { k: dataset.filter( function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, ) for k, dataset in self.items() } ) def shuffle( self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 ) -> "IterableDatasetDict": """ Randomly shuffles the elements of this dataset. The shuffling is applied to all the datasets of the dataset dictionary. This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will initially select a random element from only the first 1000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1000 element buffer. If the dataset is made of several shards, it also does `shuffle` the order of the shards. However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] then the order of the shards is kept unchanged. Args: seed (`int`, *optional*, defaults to `None`): Random seed that will be used to shuffle the dataset. It is used to sample from the shuffle buffer and also to shuffle the data shards. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). buffer_size (`int`, defaults to `1000`): Size of the buffer. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> list(ds["train"].take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> ds = ds.shuffle(seed=42) >>> list(ds["train"].take(3)) [{'label': 1, 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, {'label': 1, 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, {'label': 1, 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] ``` """ return IterableDatasetDict( { k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size) for k, dataset in self.items() } ) def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. The renaming is applied to all the datasets of the dataset dictionary. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Returns: [`IterableDatasetDict`]: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.rename_column("text", "movie_review") >>> next(iter(ds["train"])) {'label': 1, 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return IterableDatasetDict( { k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) for k, dataset in self.items() } ) def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. The renaming is applied to all the datasets of the dataset dictionary. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names. Returns: [`IterableDatasetDict`]: A copy of the dataset with renamed columns Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"}) >>> next(iter(ds["train"])) {'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'rating': 1} ``` """ return IterableDatasetDict( {k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()} ) def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict": """ Remove one or several column(s) in the dataset and the features associated to them. The removal is done on-the-fly on the examples when iterating over the dataset. The removal is applied to all the datasets of the dataset dictionary. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: [`IterableDatasetDict`]: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.remove_columns("label") >>> next(iter(ds["train"])) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()}) def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict": """Select one or several column(s) in the dataset and the features associated to them. The selection is done on-the-fly on the examples when iterating over the dataset. The selection is applied to all the datasets of the dataset dictionary. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. Returns: [`IterableDatasetDict`]: A copy of the dataset object with only selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds = ds.select("text") >>> next(iter(ds["train"])) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()}) def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict": """Cast column to feature for decoding. The type casting is applied to all the datasets of the dataset dictionary. Args: column (`str`): Column name. feature ([`Feature`]): Target feature. Returns: [`IterableDatasetDict`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return IterableDatasetDict( {k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()} ) def cast( self, features: Features, ) -> "IterableDatasetDict": """ Cast the dataset to a new set of features. The type casting is applied to all the datasets of the dataset dictionary. Args: features (`Features`): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset. Returns: [`IterableDatasetDict`]: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", streaming=True) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds["train"].features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds["train"].features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) class DatasetInfo: """Information about a dataset. `DatasetInfo` documents datasets, including its name, version, and features. See the constructor arguments and properties for a full list. Not all fields are known on construction and may be updated later. Attributes: description (`str`): A description of the dataset. citation (`str`): A BibTeX citation of the dataset. homepage (`str`): A URL to the official homepage for the dataset. license (`str`): The dataset's license. It can be the name of the license or a paragraph containing the terms of the license. features ([`Features`], *optional*): The features used to specify the dataset's column types. post_processed (`PostProcessedInfo`, *optional*): Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index. supervised_keys (`SupervisedKeysData`, *optional*): Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS). builder_name (`str`, *optional*): The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name. config_name (`str`, *optional*): The name of the configuration derived from [`BuilderConfig`]. version (`str` or [`Version`], *optional*): The version of the dataset. splits (`dict`, *optional*): The mapping between split name and metadata. download_checksums (`dict`, *optional*): The mapping between the URL to download the dataset's checksums and corresponding metadata. download_size (`int`, *optional*): The size of the files to download to generate the dataset, in bytes. post_processing_size (`int`, *optional*): Size of the dataset in bytes after post-processing, if any. dataset_size (`int`, *optional*): The combined size in bytes of the Arrow tables for all splits. size_in_bytes (`int`, *optional*): The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files). task_templates (`List[TaskTemplate]`, *optional*): The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`. **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`]. """ # Set in the dataset scripts description: str = dataclasses.field(default_factory=str) citation: str = dataclasses.field(default_factory=str) homepage: str = dataclasses.field(default_factory=str) license: str = dataclasses.field(default_factory=str) features: Optional[Features] = None post_processed: Optional[PostProcessedInfo] = None supervised_keys: Optional[SupervisedKeysData] = None task_templates: Optional[List[TaskTemplate]] = None # Set later by the builder builder_name: Optional[str] = None dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name config_name: Optional[str] = None version: Optional[Union[str, Version]] = None # Set later by `download_and_prepare` splits: Optional[dict] = None download_checksums: Optional[dict] = None download_size: Optional[int] = None post_processing_size: Optional[int] = None dataset_size: Optional[int] = None size_in_bytes: Optional[int] = None _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [ "config_name", "download_size", "dataset_size", "features", "splits", ] def __post_init__(self): # Convert back to the correct classes when we reload from dict if self.features is not None and not isinstance(self.features, Features): self.features = Features.from_dict(self.features) if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo): self.post_processed = PostProcessedInfo.from_dict(self.post_processed) if self.version is not None and not isinstance(self.version, Version): if isinstance(self.version, str): self.version = Version(self.version) else: self.version = Version.from_dict(self.version) if self.splits is not None and not isinstance(self.splits, SplitDict): self.splits = SplitDict.from_split_dict(self.splits) if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData): if isinstance(self.supervised_keys, (tuple, list)): self.supervised_keys = SupervisedKeysData(*self.supervised_keys) else: self.supervised_keys = SupervisedKeysData(**self.supervised_keys) # Parse and make a list of templates if self.task_templates is not None: if isinstance(self.task_templates, (list, tuple)): templates = [ template if isinstance(template, TaskTemplate) else task_template_from_dict(template) for template in self.task_templates ] self.task_templates = [template for template in templates if template is not None] elif isinstance(self.task_templates, TaskTemplate): self.task_templates = [self.task_templates] else: template = task_template_from_dict(self.task_templates) self.task_templates = [template] if template is not None else [] # Align task templates with features if self.task_templates is not None: self.task_templates = list(self.task_templates) if self.features is not None: self.task_templates = [ template.align_with_features(self.features) for template in (self.task_templates) ] def write_to_directory( self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None ): """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`. Args: dataset_info_dir (`str`): Destination directory. pretty_print (`bool`, defaults to `False`): If `True`, the JSON will be pretty-printed with the indent level of 4. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem used to download the files from. <Deprecated version="2.9.0"> `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`. </Deprecated> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.info.write_to_directory("/path/to/directory/") ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f: self._dump_info(f, pretty_print=pretty_print) if self.license: with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f: self._dump_license(f) def _dump_info(self, file, pretty_print=False): """Dump info in `file` file-like object open in bytes mode (to support remote files)""" file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8")) def _dump_license(self, file): """Dump license in `file` file-like object open in bytes mode (to support remote files)""" file.write(self.license.encode("utf-8")) def from_merge(cls, dataset_infos: List["DatasetInfo"]): dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None] if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos): # if all dataset_infos are equal we don't need to merge. Just return the first. return dataset_infos[0] description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip() citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip() homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip() license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip() features = None supervised_keys = None task_templates = None # Find common task templates across all dataset infos all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None] if len(all_task_templates) > 1: task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:])) elif len(all_task_templates): task_templates = list(set(all_task_templates[0])) # If no common task templates found, replace empty list with None task_templates = task_templates if task_templates else None return cls( description=description, citation=citation, homepage=homepage, license=license, features=features, supervised_keys=supervised_keys, task_templates=task_templates, ) def from_directory( cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None ) -> "DatasetInfo": """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`. This function updates all the dynamically generated fields (num_examples, hash, time of creation,...) of the [`DatasetInfo`]. This will overwrite all previous metadata. Args: dataset_info_dir (`str`): The directory containing the metadata file. This should be the root directory of a specific dataset version. fs (`fsspec.spec.AbstractFileSystem`, *optional*): Instance of the remote filesystem used to download the files from. <Deprecated version="2.9.0"> `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0. Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`. </Deprecated> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import DatasetInfo >>> ds_info = DatasetInfo.from_directory("/path/to/directory/") ``` """ if fs != "deprecated": warnings.warn( "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n" "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", FutureWarning, ) storage_options = fs.storage_options fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) logger.info(f"Loading Dataset info from {dataset_info_dir}") if not dataset_info_dir: raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.") with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f: dataset_info_dict = json.load(f) return cls.from_dict(dataset_info_dict) def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo": field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names}) def update(self, other_dataset_info: "DatasetInfo", ignore_none=True): self_dict = self.__dict__ self_dict.update( **{ k: copy.deepcopy(v) for k, v in other_dataset_info.__dict__.items() if (v is not None or not ignore_none) } ) def copy(self) -> "DatasetInfo": return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) def _to_yaml_dict(self) -> dict: yaml_dict = {} dataset_info_dict = asdict(self) for key in dataset_info_dict: if key in self._INCLUDED_INFO_IN_YAML: value = getattr(self, key) if hasattr(value, "_to_yaml_list"): # Features, SplitDict yaml_dict[key] = value._to_yaml_list() elif hasattr(value, "_to_yaml_string"): # Version yaml_dict[key] = value._to_yaml_string() else: yaml_dict[key] = value return yaml_dict def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo": yaml_data = copy.deepcopy(yaml_data) if yaml_data.get("features") is not None: yaml_data["features"] = Features._from_yaml_list(yaml_data["features"]) if yaml_data.get("splits") is not None: yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"]) field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in yaml_data.items() if k in field_names}) class IterableDataset(DatasetInfoMixin): """A Dataset backed by an iterable.""" def __init__( self, ex_iterable: _BaseExamplesIterable, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, formatting: Optional[FormattingConfig] = None, shuffling: Optional[ShufflingConfig] = None, distributed: Optional[DistributedConfig] = None, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None, format_type="deprecated", ): if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None: raise RuntimeError( "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. " "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. " ) if format_type != "deprecated": warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. " help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead." warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2) formatting = FormattingConfig(format_type=format_type) info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) self._ex_iterable = ex_iterable self._formatting = formatting self._shuffling = shuffling self._distributed = distributed self._epoch = 0 self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {} _maybe_add_torch_iterable_dataset_parent_class(self.__class__) def __repr__(self): return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n n_shards: {self.n_shards}\n}})" def __getstate__(self): return self.__dict__ def __setstate__(self, d): self.__dict__ = d # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling _maybe_add_torch_iterable_dataset_parent_class(self.__class__) def _head(self, n=5): return _examples_to_batch(list(self.take(n))) def _effective_generator(self): if self._shuffling and self._epoch == 0: return self._shuffling.generator elif self._shuffling: # Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars) effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed return np.random.default_rng(effective_seed) else: raise ValueError("This dataset is not shuffled") def n_shards(self) -> int: if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0: return self._ex_iterable.n_shards // self._distributed.world_size return self._ex_iterable.n_shards def _iter_pytorch(self): ex_iterable = self._prepare_ex_iterable_for_iteration() # Fix for fsspec when using multiprocess to avoid hanging in the ML training loop. (only required for fsspec >= 0.9.0) # See https://github.com/fsspec/gcsfs/issues/379 fsspec.asyn.reset_lock() # check if there aren't too many workers import torch.utils.data worker_info = torch.utils.data.get_worker_info() if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers: logger.warning( f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). " f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers." ) logger.info( f"To parallelize data loading, we give each process some shards (or data sources) to process. " f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. " f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}." ) # split workload _log_prefix = f"node#{self._distributed.rank} " if self._distributed else "" shards_indices = ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers) if shards_indices: logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards." ) ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers) if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1) else: iterator = _convert_to_arrow(ex_iterable, batch_size=1) for key, pa_table in iterator: yield formatter.format_row(pa_table) return else: for key, example in ex_iterable: if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_example`. example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self._token_per_repo_id ) yield format_dict(example) if format_dict else example logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards." ) else: logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})." ) def _is_main_process(self): if self._distributed and self._distributed.rank > 0: return False if "torch" in sys.modules: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if worker_info is not None and worker_info.id > 0: return False return True def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable: if self._shuffling: ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator()) else: ex_iterable = self._ex_iterable if self._distributed: rank = self._distributed.rank world_size = self._distributed.world_size if ex_iterable.n_shards % world_size == 0: if self._is_main_process(): n_shards_per_node = ex_iterable.n_shards // world_size plural = "s" if n_shards_per_node > 1 else "" logger.info( f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node." ) ex_iterable = ex_iterable.shard_data_sources(rank, world_size) else: if self._is_main_process(): logger.info( f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration." ) logger.info( f"It is more optimized to distribute the dataset shards (or data sources) across nodes. " f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. " f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}" ) ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank) return ex_iterable def __iter__(self): if "torch" in sys.modules: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None: # We're a torch.utils.data.IterableDataset in a PyTorch worker process yield from self._iter_pytorch() return ex_iterable = self._prepare_ex_iterable_for_iteration() if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1) else: iterator = _convert_to_arrow(ex_iterable, batch_size=1) for key, pa_table in iterator: yield formatter.format_row(pa_table) return for key, example in ex_iterable: if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_example`. example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self._token_per_repo_id ) yield format_dict(example) if format_dict else example def iter(self, batch_size: int, drop_last_batch: bool = False): """Iterate through the batches of size `batch_size`. Args: batch_size (:obj:`int`): size of each batch to yield. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped """ if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None ex_iterable = self._prepare_ex_iterable_for_iteration() if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): if ex_iterable.iter_arrow: iterator = _batch_arrow_tables( ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch ) else: iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) for key, pa_table in iterator: yield formatter.format_batch(pa_table) return iterator = iter(ex_iterable) for key, example in iterator: # If batched, first build the batch examples = [example] + [example for key, example in islice(iterator, batch_size - 1)] if drop_last_batch and len(examples) < batch_size: # ignore last batch return batch = _examples_to_batch(examples) if self.features: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_batch`. batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id) yield format_dict(batch) if format_dict else batch def from_generator( generator: Callable, features: Optional[Features] = None, gen_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Create an Iterable Dataset from a generator. Args: generator (`Callable`): A generator function that `yields` examples. features (`Features`, *optional*): Dataset features. gen_kwargs(`dict`, *optional*): Keyword arguments to be passed to the `generator` callable. You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`. This can be used to improve shuffling and when iterating over the dataset with multiple workers. Returns: `IterableDataset` Example: ```py >>> def gen(): ... yield {"text": "Good", "label": 0} ... yield {"text": "Bad", "label": 1} ... >>> ds = IterableDataset.from_generator(gen) ``` ```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards}) >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer >>> from torch.utils.data import DataLoader >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards ``` """ from .io.generator import GeneratorDatasetInputStream return GeneratorDatasetInputStream( generator=generator, features=features, gen_kwargs=gen_kwargs, streaming=True, ).read() def from_spark( df: "pyspark.sql.DataFrame", split: Optional[NamedSplit] = None, features: Optional[Features] = None, **kwargs, ) -> "IterableDataset": """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches. Args: df (`pyspark.sql.DataFrame`): The DataFrame containing the desired data. split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. Returns: [`IterableDataset`] Example: ```py >>> df = spark.createDataFrame( >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], >>> columns=["id", "name"], >>> ) >>> ds = IterableDataset.from_spark(df) ``` """ from .io.spark import SparkDatasetReader if sys.platform == "win32": raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows") return SparkDatasetReader( df, split=split, features=features, streaming=True, **kwargs, ).read() def from_file(filename: str) -> "IterableDataset": """Instantiate a IterableDataset from Arrow table at filename. Args: filename (`str`): File name of the dataset. Returns: [`IterableDataset`] """ pa_table_schema = read_schema_from_file(filename) inferred_features = Features.from_arrow_schema(pa_table_schema) ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename}) return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features)) def with_format( self, type: Optional[str] = None, ) -> "IterableDataset": """ Return a dataset with the specified format. Supported formats: "arrow", or None for regular python objects. The other formats are currently not implemented. Args: type (`str`, optional, default None): if set to "torch", the returned dataset will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader """ type = get_format_type_from_alias(type) # TODO(QL): add format_kwargs # TODO(QL): add format_columns and return_all_columns # TODO(QL): add pandas format return IterableDataset( ex_iterable=self._ex_iterable, info=self._info.copy(), split=self._split, formatting=FormattingConfig(format_type=type), shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def map( self, function: Optional[Callable] = None, with_indices: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, features: Optional[Features] = None, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. If your function returns a column that already exists, then it overwrites it. The function is applied on-the-fly on the examples when iterating over the dataset. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}. - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`, *optional*, defaults to `None`): Function applied on-the-fly on the examples when you iterate on the dataset. It must have one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`[List[str]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. features (`[Features]`, *optional*, defaults to `None`): Feature types of the resulting dataset. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> list(ds.take(3)) [{'label': 1, 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'Review: effective but too-tepid biopic'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns] if isinstance(remove_columns, str): remove_columns = [remove_columns] if function is None: function = identity_func if fn_kwargs is None: fn_kwargs = {} ex_iterable = MappedExamplesIterable( TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs, formatting=self._formatting, ) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. The filtering is done on-the-fly when iterating over the dataset. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` If no function is provided, defaults to an always True function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, default `1000`): Number of examples per batch provided to `function` if `batched=True`. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds = ds.filter(lambda x: x["label"] == 0) >>> list(ds.take(3)) [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'}, {'label': 0, 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, {'label': 0, 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns] # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example) info = copy.deepcopy(self._info) info.features = None # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here ex_iterable = FilteredExamplesIterable( TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, formatting=self._formatting, ) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def shuffle( self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 ) -> "IterableDataset": """ Randomly shuffles the elements of this dataset. This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will initially select a random element from only the first 1000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1000 element buffer. If the dataset is made of several shards, it also does shuffle the order of the shards. However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] then the order of the shards is kept unchanged. Args: seed (`int`, *optional*, defaults to `None`): Random seed that will be used to shuffle the dataset. It is used to sample from the shuffle buffer and also to shuffle the data shards. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). buffer_size (`int`, defaults to `1000`): Size of the buffer. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> shuffled_ds = ds.shuffle(seed=42) >>> list(shuffled_ds.take(3)) [{'label': 1, 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, {'label': 1, 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, {'label': 1, 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] ``` """ if generator is None: generator = np.random.default_rng(seed) else: generator = deepcopy(generator) shuffling = ShufflingConfig(generator=generator, _original_seed=seed) return IterableDataset( ex_iterable=BufferShuffledExamplesIterable( self._ex_iterable, buffer_size=buffer_size, generator=generator ).shuffle_data_sources(generator), info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=shuffling, distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def set_epoch(self, epoch: int): self._epoch = epoch def skip(self, n) -> "IterableDataset": """ Create a new [`IterableDataset`] that skips the first `n` elements. Args: n (`int`): Number of elements to skip. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> ds = ds.skip(1) >>> list(ds.take(3)) [{'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}, {'label': 1, 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}] ``` """ ex_iterable = SkipExamplesIterable(self._ex_iterable, n) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def take(self, n) -> "IterableDataset": """ Create a new [`IterableDataset`] with only the first `n` elements. Args: n (`int`): Number of elements to take. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> small_ds = ds.take(2) >>> list(small_ds) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] ``` """ ex_iterable = TakeExamplesIterable(self._ex_iterable, n) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def column_names(self) -> Optional[List[str]]: """Names of the columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True) >>> ds.column_names ['text', 'label'] ``` """ return list(self._info.features.keys()) if self._info.features is not None else None def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset": """Add column to Dataset. Args: name (str): Column name. column (list or np.array): Column data to be added. Returns: `IterableDataset` """ return self.map(partial(add_column_fn, name=name, column=column), with_indices=True) def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Returns: `IterableDataset`: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} >>> ds = ds.rename_column("text", "movie_review") >>> next(iter(ds)) {'label': 1, 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return self.rename_columns({original_column_name: new_column_name}) def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names Returns: `IterableDataset`: A copy of the dataset with renamed columns """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map( partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping) ) if original_features is not None: ds_iterable._info.features = Features( { column_mapping[col] if col in column_mapping.keys() else col: feature for col, feature in original_features.items() } ) # check that it's still valid, especially with regard to task templates try: ds_iterable._info.copy() except ValueError: ds_iterable._info.task_templates = None return ds_iterable def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """ Remove one or several column(s) in the dataset and the features associated to them. The removal is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: `IterableDataset`: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.remove_columns("label") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map(remove_columns=column_names) if original_features is not None: ds_iterable._info.features = original_features.copy() for col, _ in original_features.items(): if col in column_names: del ds_iterable._info.features[col] # check that it's still valid, especially with regard to task templates try: ds_iterable._info.copy() except ValueError: ds_iterable._info.task_templates = None return ds_iterable def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """Select one or several column(s) in the dataset and the features associated to them. The selection is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to select. Returns: `IterableDataset`: A copy of the dataset object with selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.select_columns("text") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ if isinstance(column_names, str): column_names = [column_names] if self._info: info = copy.deepcopy(self._info) if self._info.features is not None: missing_columns = set(column_names) - set(self._info.features.keys()) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Columns in the dataset: " f"{list(self._info.features.keys())}." ) info.features = Features({c: info.features[c] for c in column_names}) # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=self._shuffling, distributed=self._distributed, token_per_repo_id=self._token_per_repo_id, ) def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`Feature`): Target feature. Returns: `IterableDataset` Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True) >>> ds.features {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds.features {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} ``` """ info = self._info.copy() info.features[column] = feature # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def cast( self, features: Features, ) -> "IterableDataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. Returns: `IterableDataset`: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features["label"] = ClassLabel(names=["bad", "good"]) >>> new_features["text"] = Value("large_string") >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ info = self._info.copy() info.features = features # check that it's still valid, especially with regard to task templates try: info.copy() except ValueError: info.task_templates = None return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def _step(self, step: int, offset: int) -> "IterableDataset": ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def _resolve_features(self): if self.features is not None: return self elif isinstance(self._ex_iterable, TypedExamplesIterable): features = self._ex_iterable.features else: features = _infer_features_from_batch(self.with_format(None)._head()) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def _concatenate_iterable_datasets( dsets: List[IterableDataset], info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, axis: int = 0, ) -> IterableDataset: """ Converts a list of `IterableDataset` with the same schema into a single `IterableDataset`. Missing data are filled with None values. <Added version="2.4.0"/> Args: dsets (`List[datasets.IterableDataset]`): List of Datasets to concatenate. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split. axis (``{0, 1}``, default ``0``, meaning over rows): Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns (horizontally). *New in version 1.6.0* Example: ```py >>> ds3 = _concatenate_iterable_datasets([ds1, ds2]) ``` """ dsets = [d._resolve_features() for d in dsets] # Perform checks (and a potentional cast if axis=0) if axis == 0: _check_if_features_can_be_aligned([dset.features for dset in dsets]) else: _check_column_names([col_name for dset in dsets for col_name in dset.features]) # TODO: improve this to account for a mix of ClassLabel and Value for example # right now it would keep the type of the first dataset in the list features = Features( {k: v for features in _align_features([dset.features for dset in dsets]) for k, v in features.items()} ) ex_iterables = [d._ex_iterable for d in dsets] if axis == 0: ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) else: ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) # Set new info - we update the features # setting the features also ensures to fill missing columns with None if info is None: info = DatasetInfo.from_merge([d.info for d in dsets]) else: info = info.copy() info.features = features # Get all the auth tokens per repository - in case the datasets come from different private repositories token_per_repo_id = {repo_id: token for dataset in dsets for repo_id, token in dataset._token_per_repo_id.items()} # Return new daset return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id) class NamedSplit(SplitBase): """Descriptor corresponding to a named split (train, test, ...). Example: Each descriptor can be composed with other using addition or slice: ```py split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST ``` The resulting split will correspond to 25% of the train split merged with 100% of the test split. A split cannot be added twice, so the following will fail: ```py split = ( datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + datasets.Split.TRAIN.subsplit(datasets.percent[75:]) ) # Error split = datasets.Split.TEST + datasets.Split.ALL # Error ``` The slices can be applied only one time. So the following are valid: ```py split = ( datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + datasets.Split.TEST.subsplit(datasets.percent[:50]) ) split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50]) ``` But this is not valid: ```py train = datasets.Split.TRAIN test = datasets.Split.TEST split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25]) split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50]) ``` """ def __init__(self, name): self._name = name split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")] for split_name in split_names_from_instruction: if not re.match(_split_re, split_name): raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.") def __str__(self): return self._name def __repr__(self): return f"NamedSplit({self._name!r})" def __eq__(self, other): """Equality: datasets.Split.TRAIN == 'train'.""" if isinstance(other, NamedSplit): return self._name == other._name # pylint: disable=protected-access elif isinstance(other, SplitBase): return False elif isinstance(other, str): # Other should be string return self._name == other else: raise ValueError(f"Equality not supported between split {self} and {other}") def __lt__(self, other): return self._name < other._name # pylint: disable=protected-access def __hash__(self): return hash(self._name) def get_read_instruction(self, split_dict): return SplitReadInstruction(split_dict[self._name]) The provided code snippet includes necessary dependencies for implementing the `concatenate_datasets` function. Write a Python function `def concatenate_datasets( dsets: List[DatasetType], info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, axis: int = 0, ) -> DatasetType` to solve the following problem: Converts a list of [`Dataset`] with the same schema into a single [`Dataset`]. Args: dsets (`List[datasets.Dataset]`): List of Datasets to concatenate. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. axis (`{0, 1}`, defaults to `0`): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Example: ```py >>> ds3 = concatenate_datasets([ds1, ds2]) ``` Here is the function: def concatenate_datasets( dsets: List[DatasetType], info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, axis: int = 0, ) -> DatasetType: """ Converts a list of [`Dataset`] with the same schema into a single [`Dataset`]. Args: dsets (`List[datasets.Dataset]`): List of Datasets to concatenate. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. axis (`{0, 1}`, defaults to `0`): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Example: ```py >>> ds3 = concatenate_datasets([ds1, ds2]) ``` """ if not dsets: raise ValueError("Unable to concatenate an empty list of datasets.") for i, dataset in enumerate(dsets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis) else: return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
Converts a list of [`Dataset`] with the same schema into a single [`Dataset`]. Args: dsets (`List[datasets.Dataset]`): List of Datasets to concatenate. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. axis (`{0, 1}`, defaults to `0`): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Example: ```py >>> ds3 = concatenate_datasets([ds1, ds2]) ```
17,992
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.download_config import DownloadConfig from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict class Image: """Image [`Feature`] to read image data from an image file. Input: The Image feature accepts as input: - A `str`: Absolute path to the image file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the image file to the archive file. - `bytes`: Bytes of the image file. This is useful for archived files with sequential access. - An `np.ndarray`: NumPy array representing an image. - A `PIL.Image.Image`: PIL image object. Args: mode (`str`, *optional*): The mode to convert the image to. If `None`, the native mode of the image is used. decode (`bool`, defaults to `True`): Whether to decode the image data. If `False`, returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`. Examples: ```py >>> from datasets import load_dataset, Image >>> ds = load_dataset("beans", split="train") >>> ds.features["image"] Image(decode=True, id=None) >>> ds[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0> >>> ds = ds.cast_column('image', Image(decode=False)) {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'} ``` """ mode: Optional[str] = None decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "PIL.Image.Image" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Image", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict: """Encode example into a format for Arrow. Args: value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`): Data passed as input to Image feature. Returns: `dict` with "path" and "bytes" fields """ if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if isinstance(value, list): value = np.array(value) if isinstance(value, str): return {"path": value, "bytes": None} elif isinstance(value, bytes): return {"path": None, "bytes": value} elif isinstance(value, np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(value) elif isinstance(value, PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(value) elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image": """Decode example image file into image data. Args: value (`str` or `dict`): A string with the absolute image file path, a dictionary with keys: - `path`: String with absolute or relative image file path. - `bytes`: The bytes of the image file. token_per_repo_id (`dict`, *optional*): To access and decode image files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). Returns: `PIL.Image.Image` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.") if config.PIL_AVAILABLE: import PIL.Image import PIL.ImageOps else: raise ImportError("To support decoding images, please install 'Pillow'.") if token_per_repo_id is None: token_per_repo_id = {} path, bytes_ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(path): image = PIL.Image.open(path) else: source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id.get(repo_id) except ValueError: token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: bytes_ = BytesIO(f.read()) image = PIL.Image.open(bytes_) else: image = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None: image = PIL.ImageOps.exif_transpose(image) if self.mode and self.mode != image.mode: image = image.convert(self.mode) return image def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.""" from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } ) def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: """Cast an Arrow array to the Image arrow storage type. The Arrow types that can be converted to the Image pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the image bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter - `pa.list(*)` - it must contain the image array data Args: storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_list(storage.type): bytes_array = pa.array( [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), ) path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays( [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed image files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) def encode_pil_image(image: "PIL.Image.Image") -> dict: if hasattr(image, "filename") and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(image)} def encode_np_array(array: np.ndarray) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") dtype = array.dtype dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER dtype_kind = dtype.kind dtype_itemsize = dtype.itemsize dest_dtype = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: if dtype_kind not in ["u", "i"]: raise TypeError( f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." ) dest_dtype = np.dtype("|u1") if dtype != dest_dtype: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: dest_dtype = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize) if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES: dest_dtype = np.dtype(dtype_str) warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" ) image = PIL.Image.fromarray(array.astype(dest_dtype)) return {"path": None, "bytes": image_to_bytes(image)} def no_op_if_value_is_null(func): """If the value is None, return None, else call `func`.""" def wrapper(value): return func(value) if value is not None else None return wrapper def first_non_null_value(iterable): """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index.""" for i, value in enumerate(iterable): if value is not None: return i, value return -1, None The provided code snippet includes necessary dependencies for implementing the `objects_to_list_of_image_dicts` function. Write a Python function `def objects_to_list_of_image_dicts( objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]], ) -> List[dict]` to solve the following problem: Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`. Here is the function: def objects_to_list_of_image_dicts( objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]], ) -> List[dict]: """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`.""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if objs: _, obj = first_non_null_value(objs) if isinstance(obj, str): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(obj, np.ndarray): obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array) return [obj_to_image_dict_func(obj) for obj in objs] elif isinstance(obj, PIL.Image.Image): obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image) return [obj_to_image_dict_func(obj) for obj in objs] else: return objs else: return objs
Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`.
17,993
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) The provided code snippet includes necessary dependencies for implementing the `string_to_arrow` function. Write a Python function `def string_to_arrow(datasets_dtype: str) -> pa.DataType` to solve the following problem: string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` This is necessary because the datasets.Value() primitive type is constructed using a string dtype Value(dtype=str) But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema, which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the purpose of this function. Here is the function: def string_to_arrow(datasets_dtype: str) -> pa.DataType: """ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` This is necessary because the datasets.Value() primitive type is constructed using a string dtype Value(dtype=str) But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema, which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the purpose of this function. """ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None): msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type." if examples: examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0] msg += f"\nValid examples include: {examples}." if urls: urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0] msg += f"\nFor more insformation, see: {urls}." return msg if datasets_dtype in pa.__dict__: return pa.__dict__[datasets_dtype]() if (datasets_dtype + "_") in pa.__dict__: return pa.__dict__[datasets_dtype + "_"]() timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype) if timestamp_matches: timestamp_internals = timestamp_matches.group(1) internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals) if timestamp_internals in ["s", "ms", "us", "ns"]: return pa.timestamp(timestamp_internals) elif internals_matches: return pa.timestamp(internals_matches.group(1), internals_matches.group(2)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "timestamp", examples=["timestamp[us]", "timestamp[us, tz=America/New_York"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"], ) ) duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype) if duration_matches: duration_internals = duration_matches.group(1) if duration_internals in ["s", "ms", "us", "ns"]: return pa.duration(duration_internals) else: raise ValueError( _dtype_error_msg( datasets_dtype, "duration", examples=["duration[s]", "duration[us]"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"], ) ) time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype) if time_matches: time_internals_bits = time_matches.group(1) if time_internals_bits == "32": time_internals_unit = time_matches.group(2) if time_internals_unit in ["s", "ms"]: return pa.time32(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)." ) elif time_internals_bits == "64": time_internals_unit = time_matches.group(2) if time_internals_unit in ["us", "ns"]: return pa.time64(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)." ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "time", examples=["time32[s]", "time64[us]"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html", "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html", ], ) ) decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype) if decimal_matches: decimal_internals_bits = decimal_matches.group(1) if decimal_internals_bits == "128": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal128(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal128", examples=["decimal128(10, 2)", "decimal128(4, -2)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"], ) ) elif decimal_internals_bits == "256": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal256(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal256", examples=["decimal256(30, 2)", "decimal256(38, -4)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"], ) ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal", examples=["decimal128(12, 3)", "decimal256(40, 6)"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html", "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html", ], ) ) raise ValueError( f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. " f"Please make sure to use a correct data type, see: " f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions" )
string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` This is necessary because the datasets.Value() primitive type is constructed using a string dtype Value(dtype=str) But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema, which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the purpose of this function.
17,994
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) The provided code snippet includes necessary dependencies for implementing the `_is_zero_copy_only` function. Write a Python function `def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool` to solve the following problem: When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not. This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array. # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration) # primitive types are types for which the physical representation in arrow and in numpy # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821 # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22 Here is the function: def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool: """ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not. This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array. # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration) # primitive types are types for which the physical representation in arrow and in numpy # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821 # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22 """ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType: if pa.types.is_list(pa_type): return _unnest_pa_type(pa_type.value_type) return pa_type if unnest: pa_type = _unnest_pa_type(pa_type) return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not. This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array. # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration) # primitive types are types for which the physical representation in arrow and in numpy # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821 # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
17,995
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages class _ArrayXDExtensionType(pa.ExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}") def __arrow_ext_serialize__(self): return json.dumps((self.shape, self.value_type)).encode() def __arrow_ext_deserialize__(cls, storage_type, serialized): args = json.loads(serialized) return cls(*args) # This was added to pa.ExtensionType in pyarrow >= 13.0.0 def __reduce__(self): return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type) class PandasArrayExtensionDtype(PandasExtensionDtype): _metadata = "value_type" def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]): self._value_type = value_type def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]): if isinstance(array, pa.ChunkedArray): array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks])) zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True) numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only) return PandasArrayExtensionArray(numpy_arr) def construct_array_type(cls): return PandasArrayExtensionArray def type(self) -> type: return np.ndarray def kind(self) -> str: return "O" def name(self) -> str: return f"array[{self.value_type}]" def value_type(self) -> np.dtype: return self._value_type def pandas_types_mapper(dtype): if isinstance(dtype, _ArrayXDExtensionType): return PandasArrayExtensionDtype(dtype.value_type)
null
17,996
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages class Value: """ The `Value` dtypes are as follows: - `null` - `bool` - `int8` - `int16` - `int32` - `int64` - `uint8` - `uint16` - `uint32` - `uint64` - `float16` - `float32` (alias float) - `float64` (alias double) - `time32[(s|ms)]` - `time64[(us|ns)]` - `timestamp[(s|ms|us|ns)]` - `timestamp[(s|ms|us|ns), tz=(tzstring)]` - `date32` - `date64` - `duration[(s|ms|us|ns)]` - `decimal128(precision, scale)` - `decimal256(precision, scale)` - `binary` - `large_binary` - `string` - `large_string` Example: ```py >>> from datasets import Features >>> features = Features({'stars': Value(dtype='int32')}) >>> features {'stars': Value(dtype='int32', id=None)} ``` """ dtype: str id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="Value", init=False, repr=False) def __post_init__(self): if self.dtype == "double": # fix inferred type self.dtype = "float64" if self.dtype == "float": # fix inferred type self.dtype = "float32" self.pa_type = string_to_arrow(self.dtype) def __call__(self): return self.pa_type def encode_example(self, value): if pa.types.is_boolean(self.pa_type): return bool(value) elif pa.types.is_integer(self.pa_type): return int(value) elif pa.types.is_floating(self.pa_type): return float(value) elif pa.types.is_string(self.pa_type): return str(value) else: return value class _ArrayXD: def __post_init__(self): self.shape = tuple(self.shape) def __call__(self): pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype) return pa_type def encode_example(self, value): return value class ClassLabel: """Feature type for integer class labels. There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments: * `num_classes`: Create 0 to (num_classes-1) labels. * `names`: List of label strings. * `names_file`: File containing the list of labels. Under the hood the labels are stored as integers. You can use negative integers to represent unknown/missing labels. Args: num_classes (`int`, *optional*): Number of classes. All labels must be < `num_classes`. names (`list` of `str`, *optional*): String names for the integer classes. The order in which the names are provided is kept. names_file (`str`, *optional*): Path to a file with names for the integer classes, one per line. Example: ```py >>> from datasets import Features >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])}) >>> features {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)} ``` """ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict names: List[str] = None names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "int64" pa_type: ClassVar[Any] = pa.int64() _str2int: ClassVar[Dict[str, int]] = None _int2str: ClassVar[Dict[int, int]] = None _type: str = field(default="ClassLabel", init=False, repr=False) def __post_init__(self, num_classes, names_file): self.num_classes = num_classes self.names_file = names_file if self.names_file is not None and self.names is not None: raise ValueError("Please provide either names or names_file but not both.") # Set self.names if self.names is None: if self.names_file is not None: self.names = self._load_names_from_file(self.names_file) elif self.num_classes is not None: self.names = [str(i) for i in range(self.num_classes)] else: raise ValueError("Please provide either num_classes, names or names_file.") elif not isinstance(self.names, SequenceABC): raise TypeError(f"Please provide names as a list, is {type(self.names)}") # Set self.num_classes if self.num_classes is None: self.num_classes = len(self.names) elif self.num_classes != len(self.names): raise ValueError( "ClassLabel number of names do not match the defined num_classes. " f"Got {len(self.names)} names VS {self.num_classes} num_classes" ) # Prepare mappings self._int2str = [str(name) for name in self.names] self._str2int = {name: i for i, name in enumerate(self._int2str)} if len(self._int2str) != len(self._str2int): raise ValueError("Some label names are duplicated. Each label name should be unique.") def __call__(self): return self.pa_type def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]: """Conversion class name `string` => `integer`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].str2int('neg') 0 ``` """ if not isinstance(values, str) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, str): values = [values] return_list = False output = [self._strval2int(value) for value in values] return output if return_list else output[0] def _strval2int(self, value: str) -> int: failed_parse = False value = str(value) # first attempt - raw string value int_value = self._str2int.get(value) if int_value is None: # second attempt - strip whitespace int_value = self._str2int.get(value.strip()) if int_value is None: # third attempt - convert str to int try: int_value = int(value) except ValueError: failed_parse = True else: if int_value < -1 or int_value >= self.num_classes: failed_parse = True if failed_parse: raise ValueError(f"Invalid string class label {value}") return int_value def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`. Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """ if not isinstance(values, int) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, int): values = [values] return_list = False for v in values: if not 0 <= v < self.num_classes: raise ValueError(f"Invalid integer class label {v:d}") output = [self._int2str[int(v)] for v in values] return output if return_list else output[0] def encode_example(self, example_data): if self.num_classes is None: raise ValueError( "Trying to use ClassLabel feature with undefined number of class. " "Please set ClassLabel.names or num_classes." ) # If a string is given, convert to associated integer if isinstance(example_data, str): example_data = self.str2int(example_data) # Allowing -1 to mean no label. if not -1 <= example_data < self.num_classes: raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}") return example_data def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array: """Cast an Arrow array to the `ClassLabel` arrow storage type. The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are: - `pa.string()` - `pa.int()` Args: storage (`Union[pa.StringArray, pa.IntegerArray]`): PyArrow array to cast. Returns: `pa.Int64Array`: Array in the `ClassLabel` arrow storage type. """ if isinstance(storage, pa.IntegerArray) and len(storage) > 0: min_max = pc.min_max(storage).as_py() if min_max["max"] is not None and min_max["max"] >= self.num_classes: raise ValueError( f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}" ) elif isinstance(storage, pa.StringArray): storage = pa.array( [self._strval2int(label) if label is not None else None for label in storage.to_pylist()] ) return array_cast(storage, self.pa_type) def _load_names_from_file(names_filepath): with open(names_filepath, encoding="utf-8") as f: return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature: A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool: """ Check if the object is not None. If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence. """ if obj is None: return False elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))): if len(obj) > 0: if schema is None: pass elif isinstance(schema, (list, tuple)): schema = schema[0] else: schema = schema.feature return _check_non_null_non_empty_recursive(obj[0], schema) else: return False else: return True class Audio: """Audio [`Feature`] to extract audio data from an audio file. Input: The Audio feature accepts as input: - A `str`: Absolute path to the audio file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `bytes`: Bytes content of the audio file. This is useful for archived files with sequential access. - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `array`: Array containing the audio sample - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample. This is useful for archived files with sequential access. Args: sampling_rate (`int`, *optional*): Target sampling rate. If `None`, the native sampling rate is used. mono (`bool`, defaults to `True`): Whether to convert the audio signal to mono by averaging samples across channels. decode (`bool`, defaults to `True`): Whether to decode the audio data. If `False`, returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`. Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` """ sampling_rate: Optional[int] = None mono: bool = True decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Audio", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict]) -> dict: """Encode example into a format for Arrow. Args: value (`str` or `dict`): Data passed as input to Audio feature. Returns: `dict` """ try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'.") from err if isinstance(value, str): return {"bytes": None, "path": value} elif isinstance(value, bytes): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes buffer = BytesIO() sf.write(buffer, value["array"], value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm"): # "PCM" only has raw audio bytes if value.get("sampling_rate") is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object") if value.get("bytes"): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767 else: bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767 buffer = BytesIO(bytes()) sf.write(buffer, bytes_value, value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example( self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: """Decode example audio file into audio data. Args: value (`dict`): A dictionary with keys: - `path`: String with relative audio file path. - `bytes`: Bytes of the audio file. token_per_repo_id (`dict`, *optional*): To access and decode audio files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`) Returns: `dict` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.") path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.") try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err audio_format = xsplitext(path)[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) if file is None: token_per_repo_id = token_per_repo_id or {} source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id[repo_id] except (ValueError, KeyError): token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: array, sampling_rate = sf.read(f) else: array, sampling_rate = sf.read(file) array = array.T if self.mono: array = librosa.to_mono(array) if self.sampling_rate and self.sampling_rate != sampling_rate: array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate) sampling_rate = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary.""" from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature.") return { "bytes": Value("binary"), "path": Value("string"), } def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray: """Cast an Arrow array to the Audio arrow storage type. The Arrow types that can be converted to the Audio pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the audio bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter Args: storage (`Union[pa.StringArray, pa.StructArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})` """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"): storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()]) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed audio files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) class Image: """Image [`Feature`] to read image data from an image file. Input: The Image feature accepts as input: - A `str`: Absolute path to the image file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the image file to the archive file. - `bytes`: Bytes of the image file. This is useful for archived files with sequential access. - An `np.ndarray`: NumPy array representing an image. - A `PIL.Image.Image`: PIL image object. Args: mode (`str`, *optional*): The mode to convert the image to. If `None`, the native mode of the image is used. decode (`bool`, defaults to `True`): Whether to decode the image data. If `False`, returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`. Examples: ```py >>> from datasets import load_dataset, Image >>> ds = load_dataset("beans", split="train") >>> ds.features["image"] Image(decode=True, id=None) >>> ds[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0> >>> ds = ds.cast_column('image', Image(decode=False)) {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'} ``` """ mode: Optional[str] = None decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "PIL.Image.Image" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Image", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict: """Encode example into a format for Arrow. Args: value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`): Data passed as input to Image feature. Returns: `dict` with "path" and "bytes" fields """ if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if isinstance(value, list): value = np.array(value) if isinstance(value, str): return {"path": value, "bytes": None} elif isinstance(value, bytes): return {"path": None, "bytes": value} elif isinstance(value, np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(value) elif isinstance(value, PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(value) elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image": """Decode example image file into image data. Args: value (`str` or `dict`): A string with the absolute image file path, a dictionary with keys: - `path`: String with absolute or relative image file path. - `bytes`: The bytes of the image file. token_per_repo_id (`dict`, *optional*): To access and decode image files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). Returns: `PIL.Image.Image` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.") if config.PIL_AVAILABLE: import PIL.Image import PIL.ImageOps else: raise ImportError("To support decoding images, please install 'Pillow'.") if token_per_repo_id is None: token_per_repo_id = {} path, bytes_ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(path): image = PIL.Image.open(path) else: source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id.get(repo_id) except ValueError: token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: bytes_ = BytesIO(f.read()) image = PIL.Image.open(bytes_) else: image = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None: image = PIL.ImageOps.exif_transpose(image) if self.mode and self.mode != image.mode: image = image.convert(self.mode) return image def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.""" from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } ) def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: """Cast an Arrow array to the Image arrow storage type. The Arrow types that can be converted to the Image pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the image bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter - `pa.list(*)` - it must contain the image array data Args: storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_list(storage.type): bytes_array = pa.array( [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), ) path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays( [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed image files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) class TranslationVariableLanguages: """`FeatureConnector` for translations with variable languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to one or more string translations. The languages present may vary from example to example. Returns: - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`): Language codes sorted in ascending order or plain text translations, sorted to align with language codes. Example: ```python >>> # At construction time: >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': ['le chat', 'la chatte,'] ... 'de': 'die katze' ... } >>> # Tensor returned : >>> { ... 'language': ['en', 'de', 'fr', 'fr'], ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'], ... } ``` """ languages: Optional[List] = None num_languages: Optional[int] = None id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="TranslationVariableLanguages", init=False, repr=False) def __post_init__(self): self.languages = sorted(set(self.languages)) if self.languages else None self.num_languages = len(self.languages) if self.languages else None def __call__(self): return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def encode_example(self, translation_dict): lang_set = set(self.languages) if set(translation_dict) == {"language", "translation"}: return translation_dict elif self.languages and set(translation_dict) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. translation_tuples = [] for lang, text in translation_dict.items(): if isinstance(text, str): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. languages, translations = zip(*sorted(translation_tuples)) return {"language": languages, "translation": translations} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """Flatten the TranslationVariableLanguages feature into a dictionary.""" from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), } The provided code snippet includes necessary dependencies for implementing the `encode_nested_example` function. Write a Python function `def encode_nested_example(schema, obj, level=0)` to solve the following problem: Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. Here is the function: def encode_nested_example(schema, obj, level=0): """Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): if level == 0 and obj is None: raise ValueError("Got None but expected a dictionary instead") return ( {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt: return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] return list(obj) elif isinstance(schema, Sequence): if obj is None: return None # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k in schema.feature: list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj] return list_dict else: # obj is a single dict for k in schema.feature: list_dict[k] = ( [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]] if k in obj else None ) return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f"Got a string but expected a list instead: '{obj}'") else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if ( not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt ): return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj
Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
17,997
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature: A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool: """ Check if the object is not None. If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence. """ if obj is None: return False elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))): if len(obj) > 0: if schema is None: pass elif isinstance(schema, (list, tuple)): schema = schema[0] else: schema = schema.feature return _check_non_null_non_empty_recursive(obj[0], schema) else: return False else: return True def zip_dict(*dicts): """Iterate over items of dictionaries grouped by their keys.""" for key in unique_values(itertools.chain(*dicts)): # set merge all keys # Will raise KeyError if the dict don't have the same keys yield key, tuple(d[key] for d in dicts) class Audio: """Audio [`Feature`] to extract audio data from an audio file. Input: The Audio feature accepts as input: - A `str`: Absolute path to the audio file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `bytes`: Bytes content of the audio file. This is useful for archived files with sequential access. - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `array`: Array containing the audio sample - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample. This is useful for archived files with sequential access. Args: sampling_rate (`int`, *optional*): Target sampling rate. If `None`, the native sampling rate is used. mono (`bool`, defaults to `True`): Whether to convert the audio signal to mono by averaging samples across channels. decode (`bool`, defaults to `True`): Whether to decode the audio data. If `False`, returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`. Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` """ sampling_rate: Optional[int] = None mono: bool = True decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Audio", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict]) -> dict: """Encode example into a format for Arrow. Args: value (`str` or `dict`): Data passed as input to Audio feature. Returns: `dict` """ try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'.") from err if isinstance(value, str): return {"bytes": None, "path": value} elif isinstance(value, bytes): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes buffer = BytesIO() sf.write(buffer, value["array"], value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm"): # "PCM" only has raw audio bytes if value.get("sampling_rate") is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object") if value.get("bytes"): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767 else: bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767 buffer = BytesIO(bytes()) sf.write(buffer, bytes_value, value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example( self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: """Decode example audio file into audio data. Args: value (`dict`): A dictionary with keys: - `path`: String with relative audio file path. - `bytes`: Bytes of the audio file. token_per_repo_id (`dict`, *optional*): To access and decode audio files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`) Returns: `dict` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.") path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.") try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err audio_format = xsplitext(path)[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) if file is None: token_per_repo_id = token_per_repo_id or {} source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id[repo_id] except (ValueError, KeyError): token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: array, sampling_rate = sf.read(f) else: array, sampling_rate = sf.read(file) array = array.T if self.mono: array = librosa.to_mono(array) if self.sampling_rate and self.sampling_rate != sampling_rate: array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate) sampling_rate = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary.""" from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature.") return { "bytes": Value("binary"), "path": Value("string"), } def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray: """Cast an Arrow array to the Audio arrow storage type. The Arrow types that can be converted to the Audio pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the audio bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter Args: storage (`Union[pa.StringArray, pa.StructArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})` """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"): storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()]) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed audio files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) class Image: """Image [`Feature`] to read image data from an image file. Input: The Image feature accepts as input: - A `str`: Absolute path to the image file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the image file to the archive file. - `bytes`: Bytes of the image file. This is useful for archived files with sequential access. - An `np.ndarray`: NumPy array representing an image. - A `PIL.Image.Image`: PIL image object. Args: mode (`str`, *optional*): The mode to convert the image to. If `None`, the native mode of the image is used. decode (`bool`, defaults to `True`): Whether to decode the image data. If `False`, returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`. Examples: ```py >>> from datasets import load_dataset, Image >>> ds = load_dataset("beans", split="train") >>> ds.features["image"] Image(decode=True, id=None) >>> ds[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0> >>> ds = ds.cast_column('image', Image(decode=False)) {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'} ``` """ mode: Optional[str] = None decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "PIL.Image.Image" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Image", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict: """Encode example into a format for Arrow. Args: value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`): Data passed as input to Image feature. Returns: `dict` with "path" and "bytes" fields """ if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if isinstance(value, list): value = np.array(value) if isinstance(value, str): return {"path": value, "bytes": None} elif isinstance(value, bytes): return {"path": None, "bytes": value} elif isinstance(value, np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(value) elif isinstance(value, PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(value) elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image": """Decode example image file into image data. Args: value (`str` or `dict`): A string with the absolute image file path, a dictionary with keys: - `path`: String with absolute or relative image file path. - `bytes`: The bytes of the image file. token_per_repo_id (`dict`, *optional*): To access and decode image files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). Returns: `PIL.Image.Image` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.") if config.PIL_AVAILABLE: import PIL.Image import PIL.ImageOps else: raise ImportError("To support decoding images, please install 'Pillow'.") if token_per_repo_id is None: token_per_repo_id = {} path, bytes_ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(path): image = PIL.Image.open(path) else: source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id.get(repo_id) except ValueError: token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: bytes_ = BytesIO(f.read()) image = PIL.Image.open(bytes_) else: image = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None: image = PIL.ImageOps.exif_transpose(image) if self.mode and self.mode != image.mode: image = image.convert(self.mode) return image def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.""" from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } ) def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: """Cast an Arrow array to the Image arrow storage type. The Arrow types that can be converted to the Image pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the image bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter - `pa.list(*)` - it must contain the image array data Args: storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_list(storage.type): bytes_array = pa.array( [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), ) path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays( [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed image files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) The provided code snippet includes necessary dependencies for implementing the `decode_nested_example` function. Write a Python function `def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None)` to solve the following problem: Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. Here is the function: def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return ( {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} else: return decode_nested_example([schema.feature], obj) # Object with special decoding: elif isinstance(schema, (Audio, Image)): # we pass the token to read and decode files from private repositories in streaming mode if obj is not None and schema.decode: return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) return obj
Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
17,998
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages logger = logging.get_logger(__name__) _FEATURE_TYPES: Dict[str, FeatureType] = { Value.__name__: Value, ClassLabel.__name__: ClassLabel, Translation.__name__: Translation, TranslationVariableLanguages.__name__: TranslationVariableLanguages, Sequence.__name__: Sequence, Array2D.__name__: Array2D, Array3D.__name__: Array3D, Array4D.__name__: Array4D, Array5D.__name__: Array5D, Audio.__name__: Audio, Image.__name__: Image, } The provided code snippet includes necessary dependencies for implementing the `register_feature` function. Write a Python function `def register_feature( feature_cls: type, feature_type: str, )` to solve the following problem: Register a Feature object using a name and class. This function must be used on a Feature class. Here is the function: def register_feature( feature_cls: type, feature_type: str, ): """ Register a Feature object using a name and class. This function must be used on a Feature class. """ if feature_type in _FEATURE_TYPES: logger.warning( f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})" ) _FEATURE_TYPES[feature_type] = feature_cls
Register a Feature object using a name and class. This function must be used on a Feature class.
17,999
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature: A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) _FEATURE_TYPES: Dict[str, FeatureType] = { Value.__name__: Value, ClassLabel.__name__: ClassLabel, Translation.__name__: Translation, TranslationVariableLanguages.__name__: TranslationVariableLanguages, Sequence.__name__: Sequence, Array2D.__name__: Array2D, Array3D.__name__: Array3D, Array4D.__name__: Array4D, Array5D.__name__: Array5D, Audio.__name__: Audio, Image.__name__: Image, } The provided code snippet includes necessary dependencies for implementing the `generate_from_dict` function. Write a Python function `def generate_from_dict(obj: Any)` to solve the following problem: Regenerate the nested feature object from a deserialized dict. We use the '_type' fields to get the dataclass name to load. generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that :class:`Value` automatically performs. Here is the function: def generate_from_dict(obj: Any): """Regenerate the nested feature object from a deserialized dict. We use the '_type' fields to get the dataclass name to load. generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that :class:`Value` automatically performs. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(obj, list): return [generate_from_dict(value) for value in obj] # Otherwise we have a dict or a dataclass if "_type" not in obj or isinstance(obj["_type"], dict): return {key: generate_from_dict(value) for key, value in obj.items()} obj = dict(obj) _type = obj.pop("_type") class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None) if class_type is None: raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}") if class_type == Sequence: return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1)) field_names = {f.name for f in fields(class_type)} return class_type(**{k: v for k, v in obj.items() if k in field_names})
Regenerate the nested feature object from a deserialized dict. We use the '_type' fields to get the dataclass name to load. generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that :class:`Value` automatically performs.
18,000
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str: """ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` """ if pyarrow.types.is_null(arrow_type): return "null" elif pyarrow.types.is_boolean(arrow_type): return "bool" elif pyarrow.types.is_int8(arrow_type): return "int8" elif pyarrow.types.is_int16(arrow_type): return "int16" elif pyarrow.types.is_int32(arrow_type): return "int32" elif pyarrow.types.is_int64(arrow_type): return "int64" elif pyarrow.types.is_uint8(arrow_type): return "uint8" elif pyarrow.types.is_uint16(arrow_type): return "uint16" elif pyarrow.types.is_uint32(arrow_type): return "uint32" elif pyarrow.types.is_uint64(arrow_type): return "uint64" elif pyarrow.types.is_float16(arrow_type): return "float16" # pyarrow dtype is "halffloat" elif pyarrow.types.is_float32(arrow_type): return "float32" # pyarrow dtype is "float" elif pyarrow.types.is_float64(arrow_type): return "float64" # pyarrow dtype is "double" elif pyarrow.types.is_time32(arrow_type): return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_time64(arrow_type): return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_timestamp(arrow_type): if arrow_type.tz is None: return f"timestamp[{arrow_type.unit}]" elif arrow_type.tz: return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]" else: raise ValueError(f"Unexpected timestamp object {arrow_type}.") elif pyarrow.types.is_date32(arrow_type): return "date32" # pyarrow dtype is "date32[day]" elif pyarrow.types.is_date64(arrow_type): return "date64" # pyarrow dtype is "date64[ms]" elif pyarrow.types.is_duration(arrow_type): return f"duration[{arrow_type.unit}]" elif pyarrow.types.is_decimal128(arrow_type): return f"decimal128({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_decimal256(arrow_type): return f"decimal256({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_binary(arrow_type): return "binary" elif pyarrow.types.is_large_binary(arrow_type): return "large_binary" elif pyarrow.types.is_string(arrow_type): return "string" elif pyarrow.types.is_large_string(arrow_type): return "large_string" else: raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.") class Value: """ The `Value` dtypes are as follows: - `null` - `bool` - `int8` - `int16` - `int32` - `int64` - `uint8` - `uint16` - `uint32` - `uint64` - `float16` - `float32` (alias float) - `float64` (alias double) - `time32[(s|ms)]` - `time64[(us|ns)]` - `timestamp[(s|ms|us|ns)]` - `timestamp[(s|ms|us|ns), tz=(tzstring)]` - `date32` - `date64` - `duration[(s|ms|us|ns)]` - `decimal128(precision, scale)` - `decimal256(precision, scale)` - `binary` - `large_binary` - `string` - `large_string` Example: ```py >>> from datasets import Features >>> features = Features({'stars': Value(dtype='int32')}) >>> features {'stars': Value(dtype='int32', id=None)} ``` """ dtype: str id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="Value", init=False, repr=False) def __post_init__(self): if self.dtype == "double": # fix inferred type self.dtype = "float64" if self.dtype == "float": # fix inferred type self.dtype = "float32" self.pa_type = string_to_arrow(self.dtype) def __call__(self): return self.pa_type def encode_example(self, value): if pa.types.is_boolean(self.pa_type): return bool(value) elif pa.types.is_integer(self.pa_type): return int(value) elif pa.types.is_floating(self.pa_type): return float(value) elif pa.types.is_string(self.pa_type): return str(value) else: return value class Array2D(_ArrayXD): """Create a two-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array2D", init=False, repr=False) class Array3D(_ArrayXD): """Create a three-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array3D", init=False, repr=False) class Array4D(_ArrayXD): """Create a four-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array4D", init=False, repr=False) class Array5D(_ArrayXD): """Create a five-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array5D", init=False, repr=False) class _ArrayXDExtensionType(pa.ExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}") def __arrow_ext_serialize__(self): return json.dumps((self.shape, self.value_type)).encode() def __arrow_ext_deserialize__(cls, storage_type, serialized): args = json.loads(serialized) return cls(*args) # This was added to pa.ExtensionType in pyarrow >= 13.0.0 def __reduce__(self): return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type) pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature: A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) FeatureType = Union[ dict, list, tuple, Value, ClassLabel, Translation, TranslationVariableLanguages, Sequence, Array2D, Array3D, Array4D, Array5D, Audio, Image, ] The provided code snippet includes necessary dependencies for implementing the `generate_from_arrow_type` function. Write a Python function `def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType` to solve the following problem: generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for a single field. This is the high-level arrow->datasets type conversion and is inverted by get_nested_type(). This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema Here is the function: def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType: """ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for a single field. This is the high-level arrow->datasets type conversion and is inverted by get_nested_type(). This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema """ if isinstance(pa_type, pa.StructType): return {field.name: generate_from_arrow_type(field.type) for field in pa_type} elif isinstance(pa_type, pa.FixedSizeListType): return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size) elif isinstance(pa_type, pa.ListType): feature = generate_from_arrow_type(pa_type.value_type) if isinstance(feature, (dict, tuple, list)): return [feature] return Sequence(feature=feature) elif isinstance(pa_type, _ArrayXDExtensionType): array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims] return array_feature(shape=pa_type.shape, dtype=pa_type.value_type) elif isinstance(pa_type, pa.DictionaryType): raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table elif isinstance(pa_type, pa.DataType): return Value(dtype=_arrow_to_datasets_dtype(pa_type)) else: raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for a single field. This is the high-level arrow->datasets type conversion and is inverted by get_nested_type(). This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
18,001
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a multidimensional NumPy array""" arr = np.array(arr) values = pa.array(arr.flatten(), type=type) for i in range(arr.ndim - 1): n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1) step_offsets = arr.shape[arr.ndim - i - 1] offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32()) values = pa.ListArray.from_arrays(offsets, values) return values def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray: null_mask = np.array([arr is None for arr in l_arr]) null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask)) l_arr = [arr for arr in l_arr if arr is not None] offsets = np.cumsum( [0] + [len(arr) for arr in l_arr], dtype=object ) # convert to dtype object to allow None insertion offsets = np.insert(offsets, null_indices, None) offsets = pa.array(offsets, type=pa.int32()) values = pa.concat_arrays(l_arr) return pa.ListArray.from_arrays(offsets, values) The provided code snippet includes necessary dependencies for implementing the `list_of_np_array_to_pyarrow_listarray` function. Write a Python function `def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray` to solve the following problem: Build a PyArrow ListArray from a possibly nested list of NumPy arrays Here is the function: def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a possibly nested list of NumPy arrays""" if len(l_arr) > 0: return list_of_pa_arrays_to_pyarrow_listarray( [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr] ) else: return pa.array([], type=type)
Build a PyArrow ListArray from a possibly nested list of NumPy arrays
18,002
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages class _ArrayXDExtensionType(pa.ExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}") def __arrow_ext_serialize__(self): return json.dumps((self.shape, self.value_type)).encode() def __arrow_ext_deserialize__(cls, storage_type, serialized): args = json.loads(serialized) return cls(*args) # This was added to pa.ExtensionType in pyarrow >= 13.0.0 def __reduce__(self): return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type) pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) def contains_any_np_array(data: Any): """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray. Args: data (Any): Data. Returns: bool """ if isinstance(data, np.ndarray): return True elif isinstance(data, list): return contains_any_np_array(first_non_null_value(data)[1]) else: return False def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray: """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray. Args: data (Union[np.ndarray, List]): Data. type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type. Returns: pa.ListArray """ if isinstance(data, np.ndarray): return numpy_to_pyarrow_listarray(data, type=type) elif isinstance(data, list): return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data]) The provided code snippet includes necessary dependencies for implementing the `to_pyarrow_listarray` function. Write a Python function `def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array` to solve the following problem: Convert to PyArrow ListArray. Args: data (Any): Sequence, iterable, np.ndarray or pd.Series. pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType. Returns: pyarrow.Array Here is the function: def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array: """Convert to PyArrow ListArray. Args: data (Any): Sequence, iterable, np.ndarray or pd.Series. pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType. Returns: pyarrow.Array """ if contains_any_np_array(data): return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type) else: return pa.array(data, pa_type.storage_dtype)
Convert to PyArrow ListArray. Args: data (Any): Sequence, iterable, np.ndarray or pd.Series. pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType. Returns: pyarrow.Array
18,003
import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool: """Check if a (possibly nested) feature requires decoding. Args: feature (FeatureType): the feature type to be checked ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value of the `decode` attribute of the decodable feature types. Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_decoding(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_decoding(feature[0]) elif isinstance(feature, Sequence): return require_decoding(feature.feature) else: return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True) The provided code snippet includes necessary dependencies for implementing the `keep_features_dicts_synced` function. Write a Python function `def keep_features_dicts_synced(func)` to solve the following problem: Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object in sync with the main dictionary. Here is the function: def keep_features_dicts_synced(func): """ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object in sync with the main dictionary. """ @wraps(func) def wrapper(*args, **kwargs): if args: self: "Features" = args[0] args = args[1:] else: self: "Features" = kwargs.pop("self") out = func(self, *args, **kwargs) assert hasattr(self, "_column_requires_decoding") self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()} return out wrapper._decorator_name_ = "_keep_dicts_synced" return wrapper
Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object in sync with the main dictionary.
18,004
import glob import os import shutil import time import warnings from pathlib import Path from typing import List, Optional, Tuple, Union import pyarrow as pa import datasets import datasets.config import datasets.data_files from datasets.naming import filenames_for_dataset_split logger = datasets.utils.logging.get_logger(__name__) def _get_modification_time(cached_directory_path): return (Path(cached_directory_path)).stat().st_mtime def _find_hash_in_cache( dataset_name: str, config_name: Optional[str], cache_dir: Optional[str] ) -> Tuple[str, str, str]: cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE)) cached_datasets_directory_path_root = os.path.join(cache_dir, dataset_name.replace("/", "___")) cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob( os.path.join(cached_datasets_directory_path_root, config_name or "*", "*", "*") ) if os.path.isdir(cached_directory_path) ] if not cached_directory_paths: if config_name is not None: cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob( os.path.join(cached_datasets_directory_path_root, "*", "*", "*") ) if os.path.isdir(cached_directory_path) ] available_configs = sorted( {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths} ) raise ValueError( f"Couldn't find cache for {dataset_name}" + (f" for config '{config_name}'" if config_name else "") + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "") ) # get most recent cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1]) version, hash = cached_directory_path.parts[-2:] other_configs = [ Path(cached_directory_path).parts[-3] for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash)) if os.path.isdir(cached_directory_path) ] if not config_name and len(other_configs) > 1: raise ValueError( f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}" f"\nPlease specify which configuration to reload from the cache, e.g." f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')" ) config_name = cached_directory_path.parts[-3] warning_msg = ( f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} " f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})." ) logger.warning(warning_msg) return config_name, version, hash
null
18,005
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]): df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}") for partition_id in new_partition_order[1:]: partition_df = df.select("*").where(f"part_id = {partition_id}") df_combined = df_combined.union(partition_df) return df_combined def _generate_iterable_examples( df: "pyspark.sql.DataFrame", partition_order: List[int], ): import pyspark def generate_fn(): df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id")) partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order) row_id = 0 # pipeline next partition in parallel to hide latency rows = partition_df.toLocalIterator(prefetchPartitions=True) curr_partition = -1 for row in rows: row_as_dict = row.asDict() part_id = row_as_dict["part_id"] row_as_dict.pop("part_id") if curr_partition != part_id: curr_partition = part_id row_id = 0 yield f"{part_id}_{row_id}", row_as_dict row_id += 1 return generate_fn
null
18,006
import collections import itertools import os from dataclasses import dataclass from typing import List, Optional, Tuple, Type import pandas as pd import pyarrow as pa import pyarrow.json as paj import datasets from datasets.features.features import FeatureType from datasets.tasks.base import TaskTemplate def count_path_segments(path): return path.replace("\\", "/").count("/")
null
18,007
import io import json from itertools import islice from typing import Any, Callable, Dict, List import numpy as np import pyarrow as pa import datasets def text_loads(data: bytes): return data.decode("utf-8")
null
18,008
import io import json from itertools import islice from typing import Any, Callable, Dict, List import numpy as np import pyarrow as pa import datasets def tenbin_loads(data: bytes): from . import _tenbin return _tenbin.decode_buffer(data)
null
18,009
import io import json from itertools import islice from typing import Any, Callable, Dict, List import numpy as np import pyarrow as pa import datasets def msgpack_loads(data: bytes): import msgpack return msgpack.unpackb(data)
null
18,010
import io import json from itertools import islice from typing import Any, Callable, Dict, List import numpy as np import pyarrow as pa import datasets def npy_loads(data: bytes): import numpy.lib.format stream = io.BytesIO(data) return numpy.lib.format.read_array(stream, allow_pickle=False)
null
18,011
import io import json from itertools import islice from typing import Any, Callable, Dict, List import numpy as np import pyarrow as pa import datasets def npz_loads(data: bytes): return np.load(io.BytesIO(data), allow_pickle=False)
null
18,012
import io import json from itertools import islice from typing import Any, Callable, Dict, List import numpy as np import pyarrow as pa import datasets def cbor_loads(data: bytes): import cbor return cbor.loads(data)
null
18,013
import struct import sys import numpy as np long_to_short = """ float16 f2 float32 f4 float64 f8 int8 i1 int16 i2 int32 i4 int64 i8 uint8 u1 uint16 u2 unit32 u4 uint64 u8 """.strip() long_to_short = [x.split() for x in long_to_short.split("\n")] long_to_short = {x[0]: x[1] for x in long_to_short} The provided code snippet includes necessary dependencies for implementing the `check_acceptable_input_type` function. Write a Python function `def check_acceptable_input_type(data, allow64)` to solve the following problem: Check that the data has an acceptable type for tensor encoding. :param data: array :param allow64: allow 64 bit types Here is the function: def check_acceptable_input_type(data, allow64): """Check that the data has an acceptable type for tensor encoding. :param data: array :param allow64: allow 64 bit types """ for a in data: if a.dtype.name not in long_to_short: raise ValueError("unsupported dataypte") if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]: raise ValueError("64 bit datatypes not allowed unless explicitly enabled")
Check that the data has an acceptable type for tensor encoding. :param data: array :param allow64: allow 64 bit types
18,014
import struct import sys import numpy as np def encode_list(l, infos=None): # noqa: E741 """Given a list of arrays, encode them into a list of byte arrays.""" if infos is None: infos = [""] else: if len(l) != len(infos): raise ValueError(f"length of list {l} must muatch length of infos {infos}") result = [] for i, a in enumerate(l): header = encode_header(a, infos[i % len(infos)]) result += [header, bytedata(a)] return result def encode_chunks(l): # noqa: E741 """Encode a list of chunks into a single byte array, with lengths and magics..""" size = sum(16 + roundup(b.nbytes) for b in l) result = bytearray(size) offset = 0 for b in l: result[offset : offset + 8] = magic_bytes offset += 8 result[offset : offset + 8] = struct.pack("@q", b.nbytes) offset += 8 result[offset : offset + bytelen(b)] = b offset += roundup(bytelen(b)) return result The provided code snippet includes necessary dependencies for implementing the `encode_buffer` function. Write a Python function `def encode_buffer(l, infos=None)` to solve the following problem: Encode a list of arrays into a single byte array. Here is the function: def encode_buffer(l, infos=None): # noqa: E741 """Encode a list of arrays into a single byte array.""" if not isinstance(l, list): raise ValueError("requires list") return encode_chunks(encode_list(l, infos=infos))
Encode a list of arrays into a single byte array.
18,015
import struct import sys import numpy as np def write(stream, l, infos=None): # noqa: E741 """Write a list of arrays to a stream, with magics, length, and padding.""" for chunk in encode_list(l, infos=infos): write_chunk(stream, chunk) The provided code snippet includes necessary dependencies for implementing the `save` function. Write a Python function `def save(fname, *args, infos=None, nocheck=False)` to solve the following problem: Save a list of arrays to a file, with magics, length, and padding. Here is the function: def save(fname, *args, infos=None, nocheck=False): """Save a list of arrays to a file, with magics, length, and padding.""" if not nocheck and not fname.endswith(".ten"): raise ValueError("file name should end in .ten") with open(fname, "wb") as stream: write(stream, args, infos=infos)
Save a list of arrays to a file, with magics, length, and padding.
18,016
import filecmp import glob import importlib import inspect import json import os import posixpath import shutil import signal import time import warnings from collections import Counter from contextlib import nullcontext from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union import fsspec import requests import yaml from fsspec.core import url_to_fs from huggingface_hub import DatasetCard, DatasetCardData, HfApi, HfFileSystem from . import config from .arrow_dataset import Dataset from .builder import BuilderConfig, DatasetBuilder from .data_files import ( DEFAULT_PATTERNS_ALL, DataFilesDict, DataFilesList, DataFilesPatternsDict, DataFilesPatternsList, EmptyDatasetError, get_data_patterns, get_metadata_patterns, sanitize_patterns, ) from .dataset_dict import DatasetDict, IterableDatasetDict from .download.download_config import DownloadConfig from .download.download_manager import DownloadMode from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin from .exceptions import DataFilesNotFoundError, DatasetNotFoundError from .features import Features from .fingerprint import Hasher from .info import DatasetInfo, DatasetInfosDict from .iterable_dataset import IterableDataset from .metric import Metric from .naming import camelcase_to_snakecase, snakecase_to_camelcase from .packaged_modules import ( _EXTENSION_TO_MODULE, _MODULE_SUPPORTS_METADATA, _MODULE_TO_EXTENSIONS, _PACKAGED_DATASETS_MODULES, _hash_python_lines, ) from .splits import Split from .utils import _datasets_server from .utils.deprecation_utils import deprecated from .utils.file_utils import ( OfflineModeIsEnabled, _raise_if_offline_mode_is_enabled, cached_path, head_hf_s3, hf_github_url, init_hf_modules, is_relative_path, relative_to_absolute_path, url_or_path_join, ) from .utils.hub import hf_hub_url from .utils.info_utils import VerificationMode, is_small_dataset from .utils.logging import get_logger from .utils.metadata import MetadataConfigs from .utils.py_utils import get_imports, lock_importable_file from .utils.version import Version def _raise_timeout_error(signum, frame): raise ValueError( "Loading this dataset requires you to execute custom code contained in the dataset repository on your local " "machine. Please set the option `trust_remote_code=True` to permit loading of this dataset." ) The provided code snippet includes necessary dependencies for implementing the `resolve_trust_remote_code` function. Write a Python function `def resolve_trust_remote_code(trust_remote_code: Optional[bool], repo_id: str) -> bool` to solve the following problem: Copied and adapted from Transformers https://github.com/huggingface/transformers/blob/2098d343cc4b4b9d2aea84b3cf1eb5a1e610deff/src/transformers/dynamic_module_utils.py#L589 Here is the function: def resolve_trust_remote_code(trust_remote_code: Optional[bool], repo_id: str) -> bool: """ Copied and adapted from Transformers https://github.com/huggingface/transformers/blob/2098d343cc4b4b9d2aea84b3cf1eb5a1e610deff/src/transformers/dynamic_module_utils.py#L589 """ trust_remote_code = trust_remote_code if trust_remote_code is not None else config.HF_DATASETS_TRUST_REMOTE_CODE if trust_remote_code is None: if config.TIME_OUT_REMOTE_CODE > 0: try: signal.signal(signal.SIGALRM, _raise_timeout_error) signal.alarm(config.TIME_OUT_REMOTE_CODE) while trust_remote_code is None: answer = input( f"The repository for {repo_id} contains custom code which must be executed to correctly " f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n" f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n" f"Do you wish to run the custom code? [y/N] " ) if answer.lower() in ["yes", "y", "1"]: trust_remote_code = True elif answer.lower() in ["no", "n", "0", ""]: trust_remote_code = False signal.alarm(0) except Exception: # OS which does not support signal.SIGALRM raise ValueError( f"The repository for {repo_id} contains custom code which must be executed to correctly " f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n" f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." ) else: # For the CI which might put the timeout at 0 _raise_timeout_error(None, None) return trust_remote_code
Copied and adapted from Transformers https://github.com/huggingface/transformers/blob/2098d343cc4b4b9d2aea84b3cf1eb5a1e610deff/src/transformers/dynamic_module_utils.py#L589