added causal-candidate-extraction and the conversion script
Browse files
README.md
CHANGED
|
@@ -37,6 +37,14 @@ configs:
|
|
| 37 |
path: causal-candidate-extraction/train.parquet
|
| 38 |
- split: test
|
| 39 |
path: causal-candidate-extraction/test.parquet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
- config_name: causality identification
|
| 41 |
data_files:
|
| 42 |
- split: train
|
|
|
|
| 37 |
path: causal-candidate-extraction/train.parquet
|
| 38 |
- split: test
|
| 39 |
path: causal-candidate-extraction/test.parquet
|
| 40 |
+
features:
|
| 41 |
+
- name: index
|
| 42 |
+
dtype: string
|
| 43 |
+
- name: tokens
|
| 44 |
+
sequence: dtype
|
| 45 |
+
- name: entity
|
| 46 |
+
sequence:
|
| 47 |
+
sequence: int32
|
| 48 |
- config_name: causality identification
|
| 49 |
data_files:
|
| 50 |
- split: train
|
causal-candidate-extraction/test.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad5e0271e92b904d5411453ae3f5643540eabf721accdc7ec4e78c91b4757da2
|
| 3 |
+
size 49728
|
causal-candidate-extraction/train.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36140219a24ab363966ed55377daea38fc4ce1d9e18bb8bd7787f698ec7facc5
|
| 3 |
+
size 252771
|
causality-detection/test.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c46d1c31537cb79ffe0eda84c1530842dd28757dc98eeaf173e146d05b4a9151
|
| 3 |
+
size 60652
|
causality-detection/train.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f80e8874bac0355199ac4d72155ae5d39acc531dc7bd8d455f937d8240daf5a7
|
| 3 |
+
size 331135
|
causality-identification/train.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:647fdcf674408b10e150f428041f756f79bc6914149b1a1e91e1a1386b046971
|
| 3 |
+
size 377299
|
conversion_script.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
Run this script as ./conversion_script.py to convert the SCITE files to HF-compatible parquet files.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import re
|
| 8 |
+
from typing import Literal
|
| 9 |
+
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
Split = Literal["train", "test"]
|
| 14 |
+
|
| 15 |
+
def convert_for_causality_detection(split: Split) -> None:
|
| 16 |
+
df = pd.read_xml(f"{split}-corpus.xml")
|
| 17 |
+
df["label"] = df["label"].apply(lambda x: 0 if x == "Non-Causal" else 1)
|
| 18 |
+
df["text"] = df["sentence"].apply(lambda x: re.sub(r'</?e\d+>', "", x))
|
| 19 |
+
df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}")
|
| 20 |
+
df = df.set_index("index")
|
| 21 |
+
df = df[["label", "text"]]
|
| 22 |
+
df.to_parquet(f"./causality-detection/{split}.parquet", engine="pyarrow")
|
| 23 |
+
|
| 24 |
+
def convert_for_causal_candidate_extraction(split: Split) -> None:
|
| 25 |
+
def map_to_tokens(text: str):
|
| 26 |
+
splits: list[str] = []
|
| 27 |
+
tags: list[set[int]] = []
|
| 28 |
+
curtags: set[int] = set()
|
| 29 |
+
for match in re.finditer(r"(.*?)<(/?)e(\d+)>", text):
|
| 30 |
+
splits.append(match[1])
|
| 31 |
+
tags.append(list(curtags))
|
| 32 |
+
if match[2] == "":
|
| 33 |
+
curtags.add(int(match[3]))
|
| 34 |
+
else:
|
| 35 |
+
curtags.remove(int(match[3]))
|
| 36 |
+
return pd.Series((splits, tags))
|
| 37 |
+
df = pd.read_xml(f"{split}-corpus.xml")
|
| 38 |
+
df[["tokens", "entity"]] = df["sentence"].apply(map_to_tokens)
|
| 39 |
+
df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}")
|
| 40 |
+
df = df[["index", "tokens", "entity"]].set_index("index")
|
| 41 |
+
print(df)
|
| 42 |
+
df.to_parquet(f"./causal-candidate-extraction/{split}.parquet", engine="pyarrow")
|
| 43 |
+
|
| 44 |
+
def convert_for_causality_identification(split: Split) -> None:
|
| 45 |
+
def map_label(label: str):
|
| 46 |
+
if label == "Non-Causal":
|
| 47 |
+
return []
|
| 48 |
+
tmp = list()
|
| 49 |
+
for t in label[len("Cause-Effect("):-1].split("),("):
|
| 50 |
+
left, right = t.strip('()').split(',')
|
| 51 |
+
tmp.append({"relationship": 1, "first": left, "second": right})
|
| 52 |
+
return tmp
|
| 53 |
+
df = pd.read_xml(f"{split}-corpus.xml", dtype_backend="pyarrow")
|
| 54 |
+
df["relations"] = df["label"].apply(map_label)
|
| 55 |
+
df["text"] = df["sentence"]
|
| 56 |
+
df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}")
|
| 57 |
+
df = df.set_index("index")
|
| 58 |
+
|
| 59 |
+
df = df[["text", "relations"]]
|
| 60 |
+
df.to_parquet(f"./causality-identification/{split}.parquet", engine="pyarrow")
|
| 61 |
+
|
| 62 |
+
convert_for_causality_detection("test")
|
| 63 |
+
convert_for_causality_detection("train")
|
| 64 |
+
convert_for_causal_candidate_extraction("test")
|
| 65 |
+
convert_for_causal_candidate_extraction("train")
|
| 66 |
+
convert_for_causality_identification("test")
|
| 67 |
+
convert_for_causality_identification("train")
|