Datasets:
Duplicate from rag-datasets/rag-mini-bioasq
Browse filesCo-authored-by: Till Wenke <tillwenke@users.noreply.huggingface.co>
- .gitattributes +57 -0
- .gitignore +2 -0
- README.md +29 -0
- data/passages.parquet/part.0.parquet +3 -0
- data/test.parquet/part.0.parquet +3 -0
- generate.py +93 -0
- raw_data/training11b.json +3 -0
- requirements.txt +50 -0
.gitattributes
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
# Audio files - uncompressed
|
| 38 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
# Audio files - compressed
|
| 42 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
# Image files - uncompressed
|
| 48 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
# Image files - compressed
|
| 53 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
# custom
|
| 57 |
+
raw_data/** filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/env
|
| 2 |
+
credentials.json
|
README.md
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-2.5
|
| 3 |
+
task_categories:
|
| 4 |
+
- question-answering
|
| 5 |
+
- sentence-similarity
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
tags:
|
| 9 |
+
- rag
|
| 10 |
+
- dpr
|
| 11 |
+
- information-retrieval
|
| 12 |
+
- question-answering
|
| 13 |
+
- biomedical
|
| 14 |
+
configs:
|
| 15 |
+
- config_name: text-corpus
|
| 16 |
+
data_files:
|
| 17 |
+
- split: passages
|
| 18 |
+
path: "data/passages.parquet/*"
|
| 19 |
+
- config_name: question-answer-passages
|
| 20 |
+
data_files:
|
| 21 |
+
- split: test
|
| 22 |
+
path: "data/test.parquet/*"
|
| 23 |
+
---
|
| 24 |
+
|
| 25 |
+
See [here](https://huggingface.co/datasets/enelpol/rag-mini-bioasq) for an updated version without nans in text-corpus.
|
| 26 |
+
|
| 27 |
+
[In this huggingface discussion](https://discuss.huggingface.co/t/what-are-you-using-the-mini-bioasq-dataset-for/89042?u=tillwenke) you can share what you used the dataset for.
|
| 28 |
+
|
| 29 |
+
Derives from http://participants-area.bioasq.org/Tasks/11b/trainingDataset/ we generated our own subset using `generate.py`.
|
data/passages.parquet/part.0.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93afd23506e6ab65451a43a411fd6481b6378a00fef7983b44cc8c3ddbb37c84
|
| 3 |
+
size 24470604
|
data/test.parquet/part.0.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12679e03615d16b423b5554f8b2a6eb334f4cad89e62d202da5cc43cb9aeafb0
|
| 3 |
+
size 1290026
|
generate.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from Bio import Entrez
|
| 5 |
+
from retry import retry
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import dask.dataframe as dd
|
| 8 |
+
|
| 9 |
+
# provided your NIH credentials
|
| 10 |
+
# read from .json file
|
| 11 |
+
with open("credentials.json") as f:
|
| 12 |
+
credentials = json.load(f)
|
| 13 |
+
Entrez.email = credentials["email"]
|
| 14 |
+
Entrez.api_key = credentials["api_key"]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# change output file names here if necessary
|
| 18 |
+
RAW_EVALUATION_DATASET = "./raw_data/training11b.json"
|
| 19 |
+
PATH_TO_PASSAGE_DATASET = "./data/passages.parquet"
|
| 20 |
+
PATH_TO_EVALUATION_DATASET = "./data/test.parquet"
|
| 21 |
+
|
| 22 |
+
# only use questions that have at most MAX_PASSAGES passages to control the size of the dataset
|
| 23 |
+
# set to None to use all questions
|
| 24 |
+
MAX_PASSAGES = None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@retry()
|
| 28 |
+
def get_abstract(passage_id):
|
| 29 |
+
with Entrez.efetch(
|
| 30 |
+
db="pubmed", id=passage_id, rettype="abstract", retmode="text"
|
| 31 |
+
) as response:
|
| 32 |
+
# get only the abstract - no metadata
|
| 33 |
+
r = response.read()
|
| 34 |
+
r = r.split("\n\n")
|
| 35 |
+
abstract = max(r, key=len)
|
| 36 |
+
return abstract
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
if __name__ == "__main__":
|
| 40 |
+
# load the training data containing the questions, answers and the ids of relevant passages
|
| 41 |
+
# but lacks the actual passages
|
| 42 |
+
with open(RAW_EVALUATION_DATASET) as f:
|
| 43 |
+
eval_data = json.load(f)["questions"]
|
| 44 |
+
|
| 45 |
+
eval_df = pd.DataFrame(eval_data, columns=["body", "documents", "ideal_answer"])
|
| 46 |
+
eval_df = eval_df.rename(
|
| 47 |
+
columns={
|
| 48 |
+
"body": "question",
|
| 49 |
+
"documents": "relevant_passage_ids",
|
| 50 |
+
"ideal_answer": "answer",
|
| 51 |
+
}
|
| 52 |
+
)
|
| 53 |
+
eval_df.answer = eval_df.answer.apply(lambda x: x[0])
|
| 54 |
+
# get abstract id from url
|
| 55 |
+
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(
|
| 56 |
+
lambda x: [int(url.split("/")[-1]) for url in x]
|
| 57 |
+
)
|
| 58 |
+
if MAX_PASSAGES:
|
| 59 |
+
eval_df["passage_count"] = eval_df.relevant_passage_ids.apply(lambda x: len(x))
|
| 60 |
+
eval_df = eval_df.drop(columns=["passage_count"])
|
| 61 |
+
|
| 62 |
+
# remove duplicate passage ids
|
| 63 |
+
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(lambda x: set(x))
|
| 64 |
+
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(lambda x: list(x))
|
| 65 |
+
|
| 66 |
+
# get all passage ids that are relevant
|
| 67 |
+
passage_ids = set().union(*eval_df.relevant_passage_ids)
|
| 68 |
+
passage_ids = list(passage_ids)
|
| 69 |
+
passages = pd.DataFrame(index=passage_ids)
|
| 70 |
+
|
| 71 |
+
for i, passage_id in enumerate(tqdm(passages.index)):
|
| 72 |
+
passages.loc[passage_id, "passage"] = get_abstract(passage_id)
|
| 73 |
+
|
| 74 |
+
# intermediate save
|
| 75 |
+
if i % 1000 == 0:
|
| 76 |
+
passages.index.name = "id"
|
| 77 |
+
dd.from_pandas(passages, npartitions=1).to_parquet(PATH_TO_PASSAGE_DATASET)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# filter out the passages whos pmids (pubmed ids) where not available
|
| 81 |
+
unavailable_passages = passages[passages["passage"] == "1. "]
|
| 82 |
+
passages = passages[passages["passage"] != "1. "]
|
| 83 |
+
passages.index.name = "id"
|
| 84 |
+
dd.from_pandas(passages, npartitions=1).to_parquet(PATH_TO_PASSAGE_DATASET)
|
| 85 |
+
|
| 86 |
+
# remove passages from evaluation dataset whose abstract could not be retrieved from pubmed website
|
| 87 |
+
unavailable_ids = unavailable_passages.index.tolist()
|
| 88 |
+
eval_df["relevant_passage_ids"] = eval_df["relevant_passage_ids"].apply(
|
| 89 |
+
lambda x: [i for i in x if i not in unavailable_ids]
|
| 90 |
+
)
|
| 91 |
+
eval_df.index.name = "id"
|
| 92 |
+
eval_df = eval_df[["question", "answer", "relevant_passage_ids"]]
|
| 93 |
+
dd.from_pandas(eval_df, npartitions=1).to_parquet(PATH_TO_EVALUATION_DATASET)
|
raw_data/training11b.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6df656862ca860efc355c7805d07ddca700d64ecc3785c519a49afccaaeeac98
|
| 3 |
+
size 37639648
|
requirements.txt
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
asttokens==2.4.1
|
| 2 |
+
backcall==0.2.0
|
| 3 |
+
biopython==1.81
|
| 4 |
+
click==8.1.7
|
| 5 |
+
cloudpickle==3.0.0
|
| 6 |
+
comm==0.1.4
|
| 7 |
+
dask==2023.10.1
|
| 8 |
+
debugpy==1.8.0
|
| 9 |
+
decorator==5.1.1
|
| 10 |
+
exceptiongroup==1.1.3
|
| 11 |
+
executing==2.0.0
|
| 12 |
+
fsspec==2023.10.0
|
| 13 |
+
importlib-metadata==6.8.0
|
| 14 |
+
ipykernel==6.26.0
|
| 15 |
+
ipython==8.16.1
|
| 16 |
+
jedi==0.19.1
|
| 17 |
+
jupyter_client==8.5.0
|
| 18 |
+
jupyter_core==5.4.0
|
| 19 |
+
locket==1.0.0
|
| 20 |
+
matplotlib-inline==0.1.6
|
| 21 |
+
nest-asyncio==1.5.8
|
| 22 |
+
numpy==1.26.1
|
| 23 |
+
packaging==23.2
|
| 24 |
+
pandas==2.1.2
|
| 25 |
+
parso==0.8.3
|
| 26 |
+
partd==1.4.1
|
| 27 |
+
pexpect==4.8.0
|
| 28 |
+
pickleshare==0.7.5
|
| 29 |
+
platformdirs==3.11.0
|
| 30 |
+
prompt-toolkit==3.0.39
|
| 31 |
+
psutil==5.9.6
|
| 32 |
+
ptyprocess==0.7.0
|
| 33 |
+
pure-eval==0.2.2
|
| 34 |
+
py==1.11.0
|
| 35 |
+
pyarrow==13.0.0
|
| 36 |
+
Pygments==2.16.1
|
| 37 |
+
python-dateutil==2.8.2
|
| 38 |
+
pytz==2023.3.post1
|
| 39 |
+
PyYAML==6.0.1
|
| 40 |
+
pyzmq==25.1.1
|
| 41 |
+
retry==0.9.2
|
| 42 |
+
six==1.16.0
|
| 43 |
+
stack-data==0.6.3
|
| 44 |
+
toolz==0.12.0
|
| 45 |
+
tornado==6.3.3
|
| 46 |
+
tqdm==4.66.1
|
| 47 |
+
traitlets==5.12.0
|
| 48 |
+
tzdata==2023.3
|
| 49 |
+
wcwidth==0.2.8
|
| 50 |
+
zipp==3.17.0
|