metadata
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 65571665.16636661
num_examples: 25000
- name: val
num_bytes: 2622866.606654664
num_examples: 1000
- name: test
num_bytes: 13114333.03327332
num_examples: 5000
download_size: 44610750
dataset_size: 81308864.80629459
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: val
path: data/val-*
- split: test
path: data/test-*
from datasets import load_dataset, DatasetDict
def length_filter(example):
input_len = len(example["inputs"])
output_len = len(example["targets"])
return input_len <= 20_000 and output_len <= 2_500
dataset = load_dataset("ai2-adapt-dev/flan_v2_converted", split='train')
dataset = dataset.filter(length_filter)
dataset = dataset.shuffle(seed=42)
train = dataset.select(range(25_000))
val = dataset.select(range(25_000, 26_000))
test = dataset.select(range(26_000, 31_000))
dataset = DatasetDict({"train": train, "val": val, "test": test})
dataset.push_to_hub("kh4dien/flan_v2")