metadata
dataset_info:
- config_name: corpus
features:
- name: docid
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 9772890386
num_examples: 3185312
download_size: 4854439740
dataset_size: 9772890386
- config_name: reddit_tomt
features:
- name: query_id
dtype: string
- name: query
dtype: string
- name: positive_passages
list:
- name: docid
dtype: string
- name: text
dtype: string
- name: title
dtype: string
- name: hard_negative_passages
sequence: 'null'
- name: negative_passages
list:
- name: docid
dtype: string
- name: text
dtype: string
- name: title
dtype: string
splits:
- name: train
num_bytes: 8080604019
num_examples: 9455
- name: val
num_bytes: 995934250
num_examples: 1186
- name: test
num_bytes: 12058548
num_examples: 1180
download_size: 4197412538
dataset_size: 9088596817
- config_name: trec_tot
features:
- name: query_id
dtype: string
- name: query
dtype: string
- name: positive_passages
list:
- name: docid
dtype: string
- name: text
dtype: string
- name: title
dtype: string
- name: hard_negative_passages
sequence: 'null'
- name: negative_passages
list:
- name: docid
dtype: string
- name: text
dtype: string
- name: title
dtype: string
splits:
- name: train
num_bytes: 266489376
num_examples: 150
- name: dev1
num_bytes: 261848666
num_examples: 150
- name: dev2
num_bytes: 1539542
num_examples: 150
download_size: 231138975
dataset_size: 529877584
configs:
- config_name: corpus
data_files:
- split: train
path: corpus/train-*
- config_name: reddit_tomt
data_files:
- split: train
path: reddit_tomt/train-*
- split: val
path: reddit_tomt/val-*
- split: test
path: reddit_tomt/test-*
- config_name: trec_tot
data_files:
- split: train
path: trec_tot/train-*
- split: dev1
path: trec_tot/dev1-*
- split: dev2
path: trec_tot/dev2-*