Datasets:
David Wadden commited on
Commit ·
fd02f3e
1
Parent(s): 9c66dd8
Fix the entailment script.
Browse files- scifact_entailment.py +58 -47
scifact_entailment.py
CHANGED
|
@@ -2,9 +2,8 @@
|
|
| 2 |
using evidence from the cited abstracts. Formatted as a paragraph-level entailment task."""
|
| 3 |
|
| 4 |
|
| 5 |
-
import json
|
| 6 |
-
|
| 7 |
import datasets
|
|
|
|
| 8 |
|
| 9 |
|
| 10 |
_CITATION = """\
|
|
@@ -20,6 +19,12 @@ _DESCRIPTION = """\
|
|
| 20 |
SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales.
|
| 21 |
"""
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
class ScifactEntailmentConfig(datasets.BuilderConfig):
|
| 25 |
"""BuilderConfig for Scifact"""
|
|
@@ -43,17 +48,15 @@ class ScifactEntailment(datasets.GeneratorBasedBuilder):
|
|
| 43 |
|
| 44 |
def _info(self):
|
| 45 |
# TODO(scifact): Specifies the datasets.DatasetInfo object
|
|
|
|
| 46 |
features = {
|
| 47 |
-
"
|
| 48 |
-
"claim": datasets.Value("string"),
|
| 49 |
-
"
|
| 50 |
-
"
|
| 51 |
-
"
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
"cited_doc_ids": datasets.features.Sequence(
|
| 55 |
-
datasets.Value("int32")
|
| 56 |
-
), # The claim's "cited documents".
|
| 57 |
}
|
| 58 |
|
| 59 |
return datasets.DatasetInfo(
|
|
@@ -73,74 +76,82 @@ class ScifactEntailment(datasets.GeneratorBasedBuilder):
|
|
| 73 |
citation=_CITATION,
|
| 74 |
)
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
def _split_generators(self, dl_manager):
|
| 77 |
"""Returns SplitGenerators."""
|
| 78 |
# TODO(scifact): Downloads the data and defines the splits
|
| 79 |
# dl_manager is a datasets.download.DownloadManager that can be used to
|
| 80 |
# download and extract URLs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
return [
|
| 82 |
datasets.SplitGenerator(
|
| 83 |
name=datasets.Split.TRAIN,
|
| 84 |
# These kwargs will be passed to _generate_examples
|
| 85 |
gen_kwargs={
|
|
|
|
|
|
|
| 86 |
"split": "train",
|
| 87 |
},
|
| 88 |
),
|
| 89 |
-
datasets.SplitGenerator(
|
| 90 |
-
name=datasets.Split.TEST,
|
| 91 |
-
# These kwargs will be passed to _generate_examples
|
| 92 |
-
gen_kwargs={
|
| 93 |
-
"split": "test",
|
| 94 |
-
},
|
| 95 |
-
),
|
| 96 |
datasets.SplitGenerator(
|
| 97 |
name=datasets.Split.VALIDATION,
|
| 98 |
# These kwargs will be passed to _generate_examples
|
| 99 |
gen_kwargs={
|
| 100 |
-
"
|
|
|
|
|
|
|
| 101 |
},
|
| 102 |
),
|
| 103 |
]
|
| 104 |
|
| 105 |
-
def _generate_examples(self, split):
|
| 106 |
"""Yields examples."""
|
| 107 |
-
#
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
corpus = {x["doc_id"]: x for x in corpus}
|
| 112 |
-
|
| 113 |
-
# Load claims.
|
| 114 |
-
claims = datasets.load_dataset("bigbio/scifact", "scifact_claims_source", split=split)
|
| 115 |
-
|
| 116 |
-
for id_, claim in enumerate(claims):
|
| 117 |
-
evidence = {x["doc_id"]: x for x in claim["evidences"]}
|
| 118 |
for cited_doc_id in claim["cited_doc_ids"]:
|
| 119 |
cited_doc = corpus[cited_doc_id]
|
| 120 |
-
|
| 121 |
-
sent_ids = [f"[{i}]" for i in range(len(cited_doc["abstract"]))]
|
| 122 |
-
# Get rid of newlines.
|
| 123 |
-
sents = [sent.strip() for sent in cited_doc["abstract"]]
|
| 124 |
-
zipped = zip(sent_ids, sents)
|
| 125 |
-
cited_abstract = " ".join(
|
| 126 |
-
[f"{entry[0]} {entry[1]}" for entry in zipped]
|
| 127 |
-
)
|
| 128 |
|
| 129 |
if cited_doc_id in evidence:
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
else:
|
| 133 |
verdict = "NEI"
|
| 134 |
-
|
| 135 |
|
| 136 |
instance = {
|
| 137 |
-
"
|
| 138 |
"claim": claim["claim"],
|
| 139 |
"abstract_id": cited_doc_id,
|
| 140 |
-
"title":
|
| 141 |
-
"abstract":
|
| 142 |
"verdict": verdict,
|
| 143 |
-
"evidence":
|
| 144 |
}
|
| 145 |
|
|
|
|
| 146 |
yield id_, instance
|
|
|
|
| 2 |
using evidence from the cited abstracts. Formatted as a paragraph-level entailment task."""
|
| 3 |
|
| 4 |
|
|
|
|
|
|
|
| 5 |
import datasets
|
| 6 |
+
import json
|
| 7 |
|
| 8 |
|
| 9 |
_CITATION = """\
|
|
|
|
| 19 |
SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales.
|
| 20 |
"""
|
| 21 |
|
| 22 |
+
_URL = "https://scifact.s3-us-west-2.amazonaws.com/release/latest/data.tar.gz"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def flatten(xss):
|
| 26 |
+
return [x for xs in xss for x in xs]
|
| 27 |
+
|
| 28 |
|
| 29 |
class ScifactEntailmentConfig(datasets.BuilderConfig):
|
| 30 |
"""BuilderConfig for Scifact"""
|
|
|
|
| 48 |
|
| 49 |
def _info(self):
|
| 50 |
# TODO(scifact): Specifies the datasets.DatasetInfo object
|
| 51 |
+
|
| 52 |
features = {
|
| 53 |
+
"claim_id": datasets.Value("int32"),
|
| 54 |
+
"claim": datasets.Value("string"),
|
| 55 |
+
"abstract_id": datasets.Value("int32"),
|
| 56 |
+
"title": datasets.Value("string"),
|
| 57 |
+
"abstract": datasets.features.Sequence(datasets.Value("string")),
|
| 58 |
+
"verdict": datasets.Value("string"),
|
| 59 |
+
"evidence": datasets.features.Sequence(datasets.Value("int32")),
|
|
|
|
|
|
|
|
|
|
| 60 |
}
|
| 61 |
|
| 62 |
return datasets.DatasetInfo(
|
|
|
|
| 76 |
citation=_CITATION,
|
| 77 |
)
|
| 78 |
|
| 79 |
+
@staticmethod
|
| 80 |
+
def _read_tar_file(f):
|
| 81 |
+
res = []
|
| 82 |
+
for row in f:
|
| 83 |
+
this_row = json.loads(row.decode("utf-8"))
|
| 84 |
+
res.append(this_row)
|
| 85 |
+
|
| 86 |
+
return res
|
| 87 |
+
|
| 88 |
def _split_generators(self, dl_manager):
|
| 89 |
"""Returns SplitGenerators."""
|
| 90 |
# TODO(scifact): Downloads the data and defines the splits
|
| 91 |
# dl_manager is a datasets.download.DownloadManager that can be used to
|
| 92 |
# download and extract URLs
|
| 93 |
+
archive = dl_manager.download(_URL)
|
| 94 |
+
for path, f in dl_manager.iter_archive(archive):
|
| 95 |
+
if path == "data/corpus.jsonl":
|
| 96 |
+
corpus = self._read_tar_file(f)
|
| 97 |
+
corpus = {x["doc_id"]: x for x in corpus}
|
| 98 |
+
elif path == "data/claims_train.jsonl":
|
| 99 |
+
claims_train = self._read_tar_file(f)
|
| 100 |
+
elif path == "data/claims_dev.jsonl":
|
| 101 |
+
claims_validation = self._read_tar_file(f)
|
| 102 |
+
|
| 103 |
return [
|
| 104 |
datasets.SplitGenerator(
|
| 105 |
name=datasets.Split.TRAIN,
|
| 106 |
# These kwargs will be passed to _generate_examples
|
| 107 |
gen_kwargs={
|
| 108 |
+
"claims": claims_train,
|
| 109 |
+
"corpus": corpus,
|
| 110 |
"split": "train",
|
| 111 |
},
|
| 112 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
datasets.SplitGenerator(
|
| 114 |
name=datasets.Split.VALIDATION,
|
| 115 |
# These kwargs will be passed to _generate_examples
|
| 116 |
gen_kwargs={
|
| 117 |
+
"claims": claims_validation,
|
| 118 |
+
"corpus": corpus,
|
| 119 |
+
"split": "validation",
|
| 120 |
},
|
| 121 |
),
|
| 122 |
]
|
| 123 |
|
| 124 |
+
def _generate_examples(self, claims, corpus, split):
|
| 125 |
"""Yields examples."""
|
| 126 |
+
# Loop over claims and put evidence together with claim.
|
| 127 |
+
id_ = -1 # Will increment to 0 on first iteration.
|
| 128 |
+
for claim in claims:
|
| 129 |
+
evidence = {int(k): v for k, v in claim["evidence"].items()}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
for cited_doc_id in claim["cited_doc_ids"]:
|
| 131 |
cited_doc = corpus[cited_doc_id]
|
| 132 |
+
abstract_sents = [sent.strip() for sent in cited_doc["abstract"]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
|
| 134 |
if cited_doc_id in evidence:
|
| 135 |
+
this_evidence = evidence[cited_doc_id]
|
| 136 |
+
verdict = this_evidence[0][
|
| 137 |
+
"label"
|
| 138 |
+
] # Can take first evidence since all labels are same.
|
| 139 |
+
evidence_sents = flatten(
|
| 140 |
+
[entry["sentences"] for entry in this_evidence]
|
| 141 |
+
)
|
| 142 |
else:
|
| 143 |
verdict = "NEI"
|
| 144 |
+
evidence_sents = []
|
| 145 |
|
| 146 |
instance = {
|
| 147 |
+
"claim_id": claim["id"],
|
| 148 |
"claim": claim["claim"],
|
| 149 |
"abstract_id": cited_doc_id,
|
| 150 |
+
"title": cited_doc["title"],
|
| 151 |
+
"abstract": abstract_sents,
|
| 152 |
"verdict": verdict,
|
| 153 |
+
"evidence": evidence_sents,
|
| 154 |
}
|
| 155 |
|
| 156 |
+
id_ += 1
|
| 157 |
yield id_, instance
|