cloverhxy commited on
Commit
873d156
·
1 Parent(s): 67853ba

Create DBLP-Scholar.py

Browse files
Files changed (1) hide show
  1. DBLP-Scholar.py +86 -0
DBLP-Scholar.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+ import pandas as pd
4
+
5
+ class DBLPScholarConfig(datasets.BuilderConfig):
6
+ def __init__(self, features, data_url, **kwargs):
7
+ super(DBLPScholarConfig, self).__init__(**kwargs)
8
+ self.features = features
9
+ self.data_url = data_url
10
+
11
+ class DBLPScholar(datasets.GeneratorBasedBuilder):
12
+ BUILDER_CONFIGS = [
13
+ DBLPScholarConfig(
14
+ name="pairs",
15
+ features={
16
+ "ltable_id":datasets.Value("string"),
17
+ "rtable_id":datasets.Value("string"),
18
+ "label":datasets.Value("string"),
19
+ },
20
+ data_url="https://huggingface.co/datasets/matchbench/DBLP-Scholar/resolve/main/",
21
+ ),
22
+ DBLPScholarConfig(
23
+ name="source",
24
+ features={
25
+ "id":datasets.Value("string"),
26
+ "title":datasets.Value("string"),
27
+ "authors":datasets.Value("string"),
28
+ "venue":datasets.Value("string"),
29
+ "year":datasets.Value("string"),
30
+ },
31
+ data_url="https://huggingface.co/datasets/matchbench/DBLP-Scholar/resolve/main/tableA.csv",
32
+ ),
33
+ DBLPScholarConfig(
34
+ name="target",
35
+ features={
36
+ "id":datasets.Value("string"),
37
+ "title":datasets.Value("string"),
38
+ "authors":datasets.Value("string"),
39
+ "venue":datasets.Value("string"),
40
+ "year":datasets.Value("string"),
41
+ },
42
+ data_url="https://huggingface.co/datasets/matchbench/DBLP-Scholar/resolve/main/tableB.csv",
43
+ ),
44
+ ]
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ features=datasets.Features(self.config.features)
49
+ )
50
+
51
+ def _split_generators(self, dl_manager):
52
+ if self.config.name == "pairs":
53
+ return [
54
+ datasets.SplitGenerator(
55
+ name=split,
56
+ gen_kwargs={
57
+ "path_file": dl_manager.download_and_extract(os.path.join(self.config.data_url, f"{split}.csv")),
58
+ "split":split,
59
+ }
60
+ )
61
+ for split in ["train", "valid", "test"]
62
+ ]
63
+ if self.config.name == "source":
64
+ return [ datasets.SplitGenerator(name="source",gen_kwargs={"path_file":dl_manager.download_and_extract(self.config.data_url), "split":"source",})]
65
+ if self.config.name == "target":
66
+ return [ datasets.SplitGenerator(name="target",gen_kwargs={"path_file":dl_manager.download_and_extract(self.config.data_url), "split":"target",})]
67
+
68
+
69
+
70
+ def _generate_examples(self, path_file, split):
71
+ file = pd.read_csv(path_file)
72
+ for i, row in file.iterrows():
73
+ if split not in ['source', 'target']:
74
+ yield i, {
75
+ "ltable_id": row["ltable_id"],
76
+ "rtable_id": row["rtable_id"],
77
+ "label": row["label"],
78
+ }
79
+ else:
80
+ yield i, {
81
+ "id": row["id"],
82
+ "title": row["title"],
83
+ "authors": row["authors"],
84
+ "venue": row["venue"],
85
+ "year": row["year"],
86
+ }