LisaWang0306 commited on
Commit
d8d738d
·
1 Parent(s): 7b933a3

Create rel-text.py

Browse files
Files changed (1) hide show
  1. rel-text.py +96 -0
rel-text.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+ import pandas as pd
4
+
5
+ class relTextConfig(datasets.BuilderConfig):
6
+ def __init__(self, features, data_url, **kwargs):
7
+ super(relTextConfig, self).__init__(**kwargs)
8
+ self.features = features
9
+ self.data_url = data_url
10
+
11
+
12
+ class relText(datasets.GeneratorBasedBuilder):
13
+ BUILDER_CONFIGS = [
14
+ relTextConfig(
15
+ name="pairs",
16
+ features={
17
+ "ltable_id": datasets.Value("string"),
18
+ "rtable_id": datasets.Value("string"),
19
+ "label": datasets.Value("string"),
20
+ },
21
+ data_url="https://huggingface.co/datasets/matchbench/rel-text/resolve/main/",
22
+ ),
23
+
24
+ relTextConfig(
25
+ name="source",
26
+ features={
27
+ "content": datasets.Value("string"),
28
+ },
29
+ data_url="https://huggingface.co/datasets/matchbench/rel-text/resolve/main/left.txt",
30
+ ),
31
+
32
+ relTextConfig(
33
+ name="target",
34
+ features={
35
+ "id": datasets.Value("string"),
36
+ "title": datasets.Value("string"),
37
+ "authors": datasets.Value("string"),
38
+ "venue": datasets.Value("string"),
39
+ "year": datasets.Value("string"),
40
+ },
41
+ data_url="https://huggingface.co/datasets/matchbench/rel-text/resolve/main/right.csv",
42
+ ),
43
+ ]
44
+
45
+ def _info(self):
46
+ return datasets.DatasetInfo(
47
+ features=datasets.Features(self.config.features)
48
+ )
49
+
50
+ def _split_generators(self, dl_manager):
51
+ if self.config.name == "pairs":
52
+ return [
53
+ datasets.SplitGenerator(
54
+ name=split,
55
+ gen_kwargs={
56
+ "path_file": dl_manager.download_and_extract(
57
+ os.path.join(self.config.data_url, f"{split}.csv")),
58
+ "split": split,
59
+ }
60
+ )
61
+ for split in ["train", "valid", "test"]
62
+ ]
63
+
64
+ if self.config.name == "source":
65
+ return [datasets.SplitGenerator(name="source", gen_kwargs={
66
+ "path_file": dl_manager.download_and_extract(self.config.data_url), "split": "source", })]
67
+
68
+ if self.config.name == "target":
69
+ return [datasets.SplitGenerator(name="target", gen_kwargs={
70
+ "path_file": dl_manager.download_and_extract(self.config.data_url), "split": "target", })]
71
+
72
+ def _generate_examples(self, path_file, split):
73
+ if split=="source": #read in txt file.
74
+ with open(path_file, "r") as f:
75
+ file = f.readlines()
76
+ for i in range(len(data)):
77
+ yield i, {
78
+ "content": data[i]
79
+ }
80
+ else:
81
+ file = pd.read_csv(path_file)
82
+ for i, row in file.iterrows():
83
+ if split not in ['source','target']:
84
+ yield i, {
85
+ "ltable_id": row["ltable_id"],
86
+ "rtable_id": row["rtable_id"],
87
+ "label": row["label"],
88
+ }
89
+ elif split in ['target']:
90
+ yield i, {
91
+ "id": row["id"],
92
+ "title": row["title"],
93
+ "authors": row["authors"],
94
+ "venue": row["venue"],
95
+ "year": row["year"],
96
+ }