Using the new conversion script
Browse files- README.md +0 -6
- causal-candidate-extraction/test.parquet +2 -2
- causal-candidate-extraction/train.parquet +2 -2
- conversion_script.py +12 -131
README.md
CHANGED
|
@@ -18,8 +18,6 @@ configs:
|
|
| 18 |
data_files:
|
| 19 |
- split: train
|
| 20 |
path: causality-detection/train.parquet
|
| 21 |
-
#- split: dev
|
| 22 |
-
# path: causality-detection/dev.parquet
|
| 23 |
- split: test
|
| 24 |
path: causality-detection/test.parquet
|
| 25 |
features:
|
|
@@ -37,8 +35,6 @@ configs:
|
|
| 37 |
data_files:
|
| 38 |
- split: train
|
| 39 |
path: causal-candidate-extraction/train.parquet
|
| 40 |
-
# - split: dev
|
| 41 |
-
# path: causal-candidate-extraction/dev.parquet
|
| 42 |
- split: test
|
| 43 |
path: causal-candidate-extraction/test.parquet
|
| 44 |
features:
|
|
@@ -53,8 +49,6 @@ configs:
|
|
| 53 |
data_files:
|
| 54 |
- split: train
|
| 55 |
path: causality-identification/train.parquet
|
| 56 |
-
# - split: dev
|
| 57 |
-
# path: causal-candidate-extraction/dev.parquet
|
| 58 |
- split: test
|
| 59 |
path: causality-identification/test.parquet
|
| 60 |
features:
|
|
|
|
| 18 |
data_files:
|
| 19 |
- split: train
|
| 20 |
path: causality-detection/train.parquet
|
|
|
|
|
|
|
| 21 |
- split: test
|
| 22 |
path: causality-detection/test.parquet
|
| 23 |
features:
|
|
|
|
| 35 |
data_files:
|
| 36 |
- split: train
|
| 37 |
path: causal-candidate-extraction/train.parquet
|
|
|
|
|
|
|
| 38 |
- split: test
|
| 39 |
path: causal-candidate-extraction/test.parquet
|
| 40 |
features:
|
|
|
|
| 49 |
data_files:
|
| 50 |
- split: train
|
| 51 |
path: causality-identification/train.parquet
|
|
|
|
|
|
|
| 52 |
- split: test
|
| 53 |
path: causality-identification/test.parquet
|
| 54 |
features:
|
causal-candidate-extraction/test.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6218e7f9801e4d8973d331f066f204bd2981b8507ec7607f47d1534e4836c94e
|
| 3 |
+
size 5826
|
causal-candidate-extraction/train.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c01105c84f67465140e6e0b9ac94cf551acec78d4d499a5652ecd030ba0c7121
|
| 3 |
+
size 55701
|
conversion_script.py
CHANGED
|
@@ -5,141 +5,22 @@ Run this script as ./conversion_script.py to convert the UniCausal BECauSE files
|
|
| 5 |
"""
|
| 6 |
|
| 7 |
# 1) Install dependencies:
|
| 8 |
-
# pip install
|
| 9 |
# 2) Download these files:
|
| 10 |
# - https://raw.githubusercontent.com/tanfiona/UniCausal/refs/heads/main/data/grouped/splits/because_test.csv
|
| 11 |
# - https://raw.githubusercontent.com/tanfiona/UniCausal/refs/heads/main/data/grouped/splits/because_train.csv
|
| 12 |
|
| 13 |
-
import
|
| 14 |
-
from collections import defaultdict
|
| 15 |
-
import re
|
| 16 |
-
from typing import Literal, Union
|
| 17 |
|
| 18 |
-
|
|
|
|
| 19 |
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
Converts ["<ARG0>Bla</Arg0> bla <ARG1>Bla</ARG1>", "Bla <ARG0>bla</ARG0> <ARG1>Bla</ARG1>"] to
|
| 28 |
-
["Bla", " ", "bla", " ", "Bla"], [[1], [], [2], [], [3]], {relationship: 1, "first": 0, "second": 1}
|
| 29 |
-
"""
|
| 30 |
-
text, withpairs = row["text"], row["causal_text_w_pairs"]
|
| 31 |
-
# Step 1: Remove all <SIG> tags
|
| 32 |
-
withpairs = [re.sub(r"<(/?)SIG(\d+)>", "", t) for t in withpairs]
|
| 33 |
-
# Step 2: Check that all texts are the same if we remove the ARG tags
|
| 34 |
-
without_tags = [re.sub(r"<(/?)ARG(\d+)>", "", t) for t in withpairs]
|
| 35 |
-
assert all(x == text for x in without_tags) # All texts without tags must be equal to text
|
| 36 |
-
# Step 3: Iteratively update the tags
|
| 37 |
-
splits: list[str] = [text]
|
| 38 |
-
tags: list[list[int]] = [[]]
|
| 39 |
-
relations: list[tuple[int, int, int]] = []
|
| 40 |
-
def split_at_charidx(idx: int) -> int:
|
| 41 |
-
for i, s in enumerate(splits):
|
| 42 |
-
if idx < len(s):
|
| 43 |
-
if idx == 0: # We are already at the beginning of a str; no need to split
|
| 44 |
-
return i
|
| 45 |
-
else: # Need to split
|
| 46 |
-
# Concurrent editing an iterating the list; OK here since we return right after
|
| 47 |
-
splits.insert(i+1, s[idx:])
|
| 48 |
-
splits[i] = s[:idx]
|
| 49 |
-
tags.insert(i+1, tags[i].copy())
|
| 50 |
-
return i+1
|
| 51 |
-
idx -= len(s)
|
| 52 |
-
return len(splits)
|
| 53 |
-
def minify(tags: list[list[int]]) -> tuple[list[list[int]], dict[int, int]]:
|
| 54 |
-
"""
|
| 55 |
-
Joins entities that denote the same spans.
|
| 56 |
-
Maps, e.g., [[0], [], [1, 2], [], [3], []] to [[0], [], [1], [], [2], []]
|
| 57 |
-
"""
|
| 58 |
-
tag2pos = defaultdict(list) # Maps tags to the positions they occur in
|
| 59 |
-
for idx, lst in enumerate(tags):
|
| 60 |
-
for x in lst:
|
| 61 |
-
tag2pos[x].append(idx)
|
| 62 |
-
pos2tag = defaultdict(list)
|
| 63 |
-
for tag, pos in tag2pos.items():
|
| 64 |
-
pos2tag[tuple(pos)].append(tag)
|
| 65 |
-
newtags = [[] for _ in range(len(tags))]
|
| 66 |
-
tagmap: dict[int, int] = dict()
|
| 67 |
-
for i, (pos, tags) in enumerate(pos2tag.items()):
|
| 68 |
-
for t in tags:
|
| 69 |
-
tagmap[t] = i
|
| 70 |
-
for p in pos:
|
| 71 |
-
newtags[p].append(i)
|
| 72 |
-
return newtags, tagmap
|
| 73 |
-
nexttag: int = 0
|
| 74 |
-
for t in withpairs:
|
| 75 |
-
curtags: set[int] = set()
|
| 76 |
-
offset: int = 0
|
| 77 |
-
tagmap: dict[int, int] = dict()
|
| 78 |
-
for match in re.finditer(r"(.*?)<(/?)ARG(\d+)>", t):
|
| 79 |
-
# Put the text span from offset to offset+len(match[1]) into a new entity and set the entity label
|
| 80 |
-
# appropriately; should do nothing if the entity is already separated.
|
| 81 |
-
startidx = split_at_charidx(offset) # Will never split but we can use it to get the index
|
| 82 |
-
stopidx = split_at_charidx(offset + len(match[1]))
|
| 83 |
-
for i in range(startidx, stopidx):
|
| 84 |
-
tags[i].extend(curtags)
|
| 85 |
-
if match[2] == "":
|
| 86 |
-
if int(match[3]) not in tagmap:
|
| 87 |
-
tagmap[int(match[3])] = nexttag
|
| 88 |
-
nexttag += 1
|
| 89 |
-
curtags.add(tagmap[int(match[3])])
|
| 90 |
-
else:
|
| 91 |
-
curtags.remove(tagmap[int(match[3])])
|
| 92 |
-
offset += len(match[1])
|
| 93 |
-
# Each entry in withpairs contains exactly one cause (ARG0) and effect (ARG1)
|
| 94 |
-
relations.append((RelationCauseEffect, tagmap[0], tagmap[1]))
|
| 95 |
-
tags, tagmap = minify(tags)
|
| 96 |
-
for i in range(len(relations)):
|
| 97 |
-
relations[i] = (relations[i][0], tagmap[relations[i][1]], tagmap[relations[i][2]])
|
| 98 |
-
return (splits, tags, relations)
|
| 99 |
-
|
| 100 |
-
def convert_for_causality_detection(split: Split) -> None:
|
| 101 |
-
df = pd.read_csv(f"because_{split}.csv", converters={"causal_text_w_pairs": lambda x: ast.literal_eval(x) if x else [] })
|
| 102 |
-
df["label"] = df["causal_text_w_pairs"].apply(lambda x: 0 if len(x) == 0 else 1)
|
| 103 |
-
df = df.set_index("index")
|
| 104 |
-
df = df[["label", "text"]]
|
| 105 |
-
df.to_parquet(f"./causality-detection/{split}.parquet", engine="pyarrow")
|
| 106 |
-
|
| 107 |
-
def convert_for_causal_candidate_extraction(split: Split) -> None:
|
| 108 |
-
def map_list_to_tokens(row):
|
| 109 |
-
splits, tags, _ = __extract_entities_and_relations(row)
|
| 110 |
-
return pd.Series((splits, tags))
|
| 111 |
-
df = pd.read_csv(f"because_{split}.csv", converters={"causal_text_w_pairs": lambda x: ast.literal_eval(x) if x else [] })
|
| 112 |
-
df[["tokens", "entity"]] = df[["text", "causal_text_w_pairs"]].apply(map_list_to_tokens, axis=1)
|
| 113 |
-
df = df[["index", "tokens", "entity"]].set_index("index")
|
| 114 |
-
df.to_parquet(f"./causal-candidate-extraction/{split}.parquet", engine="pyarrow")
|
| 115 |
-
|
| 116 |
-
def convert_for_causality_identification(split: Split) -> None:
|
| 117 |
-
def map_to_labels(row):
|
| 118 |
-
splits, tags, relations = __extract_entities_and_relations(row)
|
| 119 |
-
text: str = ""
|
| 120 |
-
cur_ents: set[int] = set()
|
| 121 |
-
for s, t in zip(splits, tags):
|
| 122 |
-
for newent in (set(t)-cur_ents):
|
| 123 |
-
text += f"<e{newent+1}>"
|
| 124 |
-
for oldent in (cur_ents-set(t)):
|
| 125 |
-
text += f"</e{oldent+1}>"
|
| 126 |
-
cur_ents = set(t)
|
| 127 |
-
text += s
|
| 128 |
-
reldict: list[dict[str, Union[int, str]]] = []
|
| 129 |
-
for rtype, rfirst, rsecond in relations:
|
| 130 |
-
reldict.append({"relationship": rtype, "first": f"e{rfirst+1}", "second": f"e{rsecond+1}"})
|
| 131 |
-
return pd.Series((text, reldict))
|
| 132 |
-
df = pd.read_csv(f"because_{split}.csv", converters={"causal_text_w_pairs": lambda x: ast.literal_eval(x) if x else [] })
|
| 133 |
-
df[["text", "relations"]] = df[["text", "causal_text_w_pairs"]].apply(map_to_labels, axis=1)
|
| 134 |
-
df = df[["index", "text", "relations"]].set_index("index")
|
| 135 |
-
return df.to_parquet(f"./causality-identification/{split}.parquet", engine="pyarrow")
|
| 136 |
-
|
| 137 |
-
convert_for_causality_detection("test")
|
| 138 |
-
# convert_for_causality_detection("dev")
|
| 139 |
-
convert_for_causality_detection("train")
|
| 140 |
-
convert_for_causal_candidate_extraction("test")
|
| 141 |
-
#convert_for_causal_candidate_extraction("dev")
|
| 142 |
-
convert_for_causal_candidate_extraction("train")
|
| 143 |
-
convert_for_causality_identification("test")
|
| 144 |
-
#convert_for_causality_identification("dev")
|
| 145 |
-
convert_for_causality_identification("train")
|
|
|
|
| 5 |
"""
|
| 6 |
|
| 7 |
# 1) Install dependencies:
|
| 8 |
+
# pip install git+https://github.com/TheMrSheldon/causality-toolkit.git
|
| 9 |
# 2) Download these files:
|
| 10 |
# - https://raw.githubusercontent.com/tanfiona/UniCausal/refs/heads/main/data/grouped/splits/because_test.csv
|
| 11 |
# - https://raw.githubusercontent.com/tanfiona/UniCausal/refs/heads/main/data/grouped/splits/because_train.csv
|
| 12 |
|
| 13 |
+
from pathlib import Path
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
from ctk.data.constants import Task
|
| 16 |
+
from ctk.data.conversion import UniCausal2HF
|
| 17 |
|
| 18 |
+
converter = UniCausal2HF({"train": Path.cwd() / "because_train.csv",
|
| 19 |
+
"test": Path.cwd() / "because_test.csv"}, Path.cwd())
|
| 20 |
|
| 21 |
+
converter.convert(Task.CausalityDetection, "train")
|
| 22 |
+
converter.convert(Task.CausalityDetection, "test")
|
| 23 |
+
converter.convert(Task.CausalCandidateExtraction, "train")
|
| 24 |
+
converter.convert(Task.CausalCandidateExtraction, "test")
|
| 25 |
+
converter.convert(Task.CausalityIdentification, "train")
|
| 26 |
+
converter.convert(Task.CausalityIdentification, "test")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|