zhsy commited on
Commit
ed41f4a
·
1 Parent(s): 7ea98fc

Upload 4 files

Browse files
Files changed (4) hide show
  1. Wiki-Data-SM.py +103 -0
  2. matches.txt +20 -0
  3. wiki_data-sm-1.csv +0 -0
  4. wiki_data-sm-2.csv +0 -0
Wiki-Data-SM.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+ import pandas as pd
5
+
6
+ class WikiDataSMConfig(datasets.BuilderConfig):
7
+
8
+ def __init__(self, features, data_url, **kwargs):
9
+ super(WikiDataSMConfig, self).__init__(**kwargs)
10
+ self.features = features
11
+ self.data_url = data_url
12
+
13
+ class WikiDataSM(datasets.GeneratorBasedBuilder):
14
+
15
+ BUILDER_CONFIGS = [
16
+ WikiDataSMConfig(
17
+ name="pairs",
18
+ features={
19
+ "column1": datasets.Value("string"),
20
+ "column2": datasets.Value("string"),
21
+ "label" : datasets.Value("bool")
22
+ },
23
+ data_url="https://huggingface.co/datasets/matchbench/Wiki-Data-SM/resolve/main/matches.txt"
24
+ ),
25
+ WikiDataSMConfig(
26
+ name="source",
27
+ features={
28
+ "json": datasets.Value("string")
29
+ },
30
+ data_url="https://huggingface.co/datasets/matchbench/Wiki-Data-SM/resolve/main/wiki_data-sm-1.csv"
31
+ ),
32
+ WikiDataSMConfig(
33
+ name="target",
34
+ features={
35
+ "json": datasets.Value("string")
36
+ },
37
+ data_url="https://huggingface.co/datasets/matchbench/Wiki-Data-SM/resolve/main/wiki_data-sm-2.csv"
38
+ ),
39
+ ]
40
+
41
+ def _info(self):
42
+ return datasets.DatasetInfo(
43
+ features=datasets.Features(self.config.features)
44
+ )
45
+
46
+ def _split_generators(self, dl_manager):
47
+ path = dl_manager.download(self.config.data_url)
48
+ if self.config.name == "pairs":
49
+ return [
50
+ datasets.SplitGenerator(
51
+ name=split,
52
+ gen_kwargs={
53
+ "file_path": path
54
+ }
55
+ )
56
+ for split in [
57
+ datasets.Split.TEST
58
+ ]
59
+ ]
60
+ elif self.config.name == "source":
61
+ return [
62
+ datasets.SplitGenerator(
63
+ name="source",
64
+ gen_kwargs={
65
+ "file_path": path
66
+ }
67
+ )
68
+ ]
69
+ elif self.config.name == "target":
70
+ return [
71
+ datasets.SplitGenerator(
72
+ name="target",
73
+ gen_kwargs={
74
+ "file_path": path
75
+ }
76
+ )
77
+ ]
78
+
79
+ def _generate_examples(self, file_path):
80
+ if self.config.name == "pairs":
81
+ with open(file_path, "r") as f:
82
+ md = {}
83
+ for _, line in enumerate(f):
84
+ t = line.strip().split(',')
85
+ if t[0] not in md:
86
+ md[t[0]] = {t[1]}
87
+ else:
88
+ md[t[0]].add(t[1])
89
+ id = -1
90
+ for k, v in md.items():
91
+ for i in v:
92
+ id = id + 1
93
+ yield id, {"column1": k, "column2": i, "label": True}
94
+ elif self.config.name == "source":
95
+ with open(file_path, "r") as f:
96
+ source = pd.read_csv(file_path)
97
+ source = source.to_json()
98
+ yield 0, {"json": source}
99
+ elif self.config.name == "target":
100
+ with open(file_path, "r") as f:
101
+ target = pd.read_csv(file_path)
102
+ target = target.to_json()
103
+ yield 0, {"json": target}
matches.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0_musician,1_musicianID
2
+ 0_musicianLabel,1_musicianName
3
+ 0_genderLabel,1_genderType
4
+ 0_birthDate,1_birthDate
5
+ 0_cityLabel,1_city
6
+ 0_familyNameLabel,1_familyName
7
+ 0_givenName,1_forename
8
+ 0_fatherLabel,1_fatherName
9
+ 0_motherLabel,1_motherName
10
+ 0_partner,1_spouse
11
+ 0_numberOfChildren,1_NChildren
12
+ 0_genreLabel,1_kind
13
+ 0_websiteLabel,1_webpage
14
+ 0_residenceLabel,1_residence
15
+ 0_ethnicityLabel,1_ethnicity
16
+ 0_religionLabel,1_religionLabel
17
+ 0_activityStart,1_kickoff
18
+ 0_twitterNameLabel,1_twitterUsername
19
+ 0_geniusNameLabel,1_geniusNameLabel
20
+ 0_recordLabelLabel,1_recordCompany
wiki_data-sm-1.csv ADDED
The diff for this file is too large to render. See raw diff
 
wiki_data-sm-2.csv ADDED
The diff for this file is too large to render. See raw diff