iamgroot42 commited on
Commit
0f47dab
·
verified ·
1 Parent(s): f092391

Update mimir.py

Browse files
Files changed (1) hide show
  1. mimir.py +68 -33
mimir.py CHANGED
@@ -25,11 +25,11 @@ We also cache neighbors (generated for the NE attack).
25
  """
26
 
27
  _CITATION = """\
28
- @article{duan2024do,
29
- title={Do Membership Inference Attacks Work on Large Language Models?},
30
- author={Duan*, Michael and \textbf{A. Suri*} and Mireshghallah, Niloofar and Min, Sewon and Shi, Weijia and Zettlemoyer, Luke and Tsvetkov, Yulia and Choi, Yejin and Evans, David and Hajishirzi, Hannaneh},
31
- journal={arXiv preprint arXiv:???},
32
- year={2024}
33
  }
34
  """
35
 
@@ -112,9 +112,12 @@ class MimirDataset(GeneratorBasedBuilder):
112
  # This is the description that will appear on the datasets page.
113
  description=_DESCRIPTION,
114
  # This defines the different columns of the dataset and their types
115
- features=datasets.Features(
116
- {"text": datasets.Sequence(datasets.Value("string"))}
117
- ),
 
 
 
118
  # If there's a common (input, target) tuple from the features,
119
  # specify them here. They'll be used if as_supervised=True in
120
  # builder.as_dataset.
@@ -122,7 +125,7 @@ class MimirDataset(GeneratorBasedBuilder):
122
  # Homepage of the dataset for documentation
123
  homepage=_HOMEPAGE,
124
  # Citation for the dataset
125
- # citation=_CITATION,
126
  )
127
 
128
  def _split_generators(self, dl_manager: DownloadManager):
@@ -136,47 +139,79 @@ class MimirDataset(GeneratorBasedBuilder):
136
  )
137
 
138
  if len(self.config.subsets) > 0:
139
- subset_splits = [f"{self.config.name}_{subset}" for subset in self.config.subsets]
 
140
  else:
141
- subset_splits = [self.config.name]
 
142
 
143
  file_paths = {}
144
- for subset_split in subset_splits:
 
 
 
 
145
  # Add standard member and non-member paths
146
- file_paths[f"{subset_split}_member"] = os.path.join(parent_dir, "train", subset_split + ".jsonl")
147
- file_paths[f"{subset_split}_nonmember"] = os.path.join(parent_dir, "test", subset_split + ".jsonl")
 
 
148
 
149
  # Load associated neighbors
150
- file_paths[f"{subset_split}_member_neighbors"] = os.path.join(
151
  parent_dir,
152
  "train_neighbors",
153
- subset_split + f"{NEIGHBOR_SUFFIX}.jsonl",
154
  )
155
- file_paths[f"{subset_split}_nonmember_neighbors"] = os.path.join(
156
  parent_dir,
157
  "test_neighbors",
158
- subset_split + f"{NEIGHBOR_SUFFIX}.jsonl",
159
  )
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  # Now that we know which files to load, download them
162
- download_paths = []
163
- k2i = {}
164
- for i, (k, v) in enumerate(file_paths.items()):
165
- download_paths.append(_DOWNLOAD_URL + v)
166
- k2i[k] = i
167
- data_dir = dl_manager.download_and_extract(download_paths)
 
 
 
168
 
169
  splits = []
170
- for k in file_paths.keys():
171
- splits.append(SplitGenerator(name=k, gen_kwargs={"file_path": data_dir[k2i[k]]}))
172
  return splits
173
 
174
- def _generate_examples(self, file_path):
175
  """Yields examples."""
 
 
 
 
 
 
 
 
 
 
176
  # Open the specified .jsonl file and read each line
177
- with open(file_path, "r") as f:
178
- for id, line in enumerate(f):
179
- data = json.loads(line)
180
- if type(data) != list:
181
- data = [data]
182
- yield id, {"text": data}
 
25
  """
26
 
27
  _CITATION = """\
28
+ @article{duan2024membership,
29
+ title={Do Membership Inference Attacks Work on Large Language Models?},
30
+ author={Michael Duan and Anshuman Suri and Niloofar Mireshghallah and Sewon Min and Weijia Shi and Luke Zettlemoyer and Yulia Tsvetkov and Yejin Choi and David Evans and Hannaneh Hajishirzi},
31
+ year={2024},
32
+ journal={arXiv:2402.07841},
33
  }
34
  """
35
 
 
112
  # This is the description that will appear on the datasets page.
113
  description=_DESCRIPTION,
114
  # This defines the different columns of the dataset and their types
115
+ features=datasets.Features({
116
+ "member": datasets.Value("string"),
117
+ "nonmember": datasets.Value("string"),
118
+ "member_neighbors": datasets.Sequence(datasets.Value("string")),
119
+ "nonmember_neighbors": datasets.Sequence(datasets.Value("string"))
120
+ }),
121
  # If there's a common (input, target) tuple from the features,
122
  # specify them here. They'll be used if as_supervised=True in
123
  # builder.as_dataset.
 
125
  # Homepage of the dataset for documentation
126
  homepage=_HOMEPAGE,
127
  # Citation for the dataset
128
+ citation=_CITATION,
129
  )
130
 
131
  def _split_generators(self, dl_manager: DownloadManager):
 
139
  )
140
 
141
  if len(self.config.subsets) > 0:
142
+ suffixes = [f"{subset}" for subset in self.config.subsets]
143
+ # subset_splits = [f"{self.config.name}_{subset}" for subset in self.config.subsets]
144
  else:
145
+ suffixes = ["none"]
146
+ # subset_splits = [self.config.name]
147
 
148
  file_paths = {}
149
+ for subset_split_suffix in suffixes:
150
+ internal_fp = {}
151
+
152
+ subset_split_suffix_use = f"_{subset_split_suffix}" if subset_split_suffix != "none" else ""
153
+
154
  # Add standard member and non-member paths
155
+ internal_fp['member'] = os.path.join(parent_dir, "train", f"{self.config.name}{subset_split_suffix_use}.jsonl")
156
+ internal_fp['nonmember'] = os.path.join(parent_dir, "test", f"{self.config.name}{subset_split_suffix_use}.jsonl")
157
+ # file_paths[f"{subset_split}_member"] = os.path.join(parent_dir, "train", subset_split + ".jsonl")
158
+ # file_paths[f"{subset_split}_nonmember"] = os.path.join(parent_dir, "test", subset_split + ".jsonl")
159
 
160
  # Load associated neighbors
161
+ internal_fp['member_neighbors'] = os.path.join(
162
  parent_dir,
163
  "train_neighbors",
164
+ f"{self.config.name}{subset_split_suffix_use}{NEIGHBOR_SUFFIX}.jsonl",
165
  )
166
+ internal_fp['nonmember_neighbors'] = os.path.join(
167
  parent_dir,
168
  "test_neighbors",
169
+ f"{self.config.name}{subset_split_suffix_use}{NEIGHBOR_SUFFIX}.jsonl",
170
  )
171
+ # file_paths[f"{subset_split}_member_neighbors"] = os.path.join(
172
+ # parent_dir,
173
+ # "train_neighbors",
174
+ # subset_split + f"{NEIGHBOR_SUFFIX}.jsonl",
175
+ # )
176
+ # file_paths[f"{subset_split}_nonmember_neighbors"] = os.path.join(
177
+ # parent_dir,
178
+ # "test_neighbors",
179
+ # subset_split + f"{NEIGHBOR_SUFFIX}.jsonl",
180
+ # )
181
+ file_paths[subset_split_suffix] = internal_fp
182
 
183
  # Now that we know which files to load, download them
184
+ data_dir = {}
185
+ for k, v_dict in file_paths.items():
186
+ download_paths = []
187
+ for v in v_dict.values():
188
+ download_paths.append(_DOWNLOAD_URL + v)
189
+ # [f"{k}{k_inside}"] = _DOWNLOAD_URL + v
190
+ paths = dl_manager.download_and_extract(download_paths)
191
+ internal_dict = {k:v for k, v in zip(v_dict.keys(), paths)}
192
+ data_dir[k] = internal_dict
193
 
194
  splits = []
195
+ for k in suffixes:
196
+ splits.append(SplitGenerator(name=k, gen_kwargs={"file_path_dict": data_dir[k]}))
197
  return splits
198
 
199
+ def _generate_examples(self, file_path_dict):
200
  """Yields examples."""
201
+ # yield 0, file_path_dict
202
+ # Open all four files in file_path_dict and yield examples (one from each file) simultaneously
203
+ with open(file_path_dict["member"], "r") as f_member, open(file_path_dict["nonmember"], "r") as f_nonmember, open(file_path_dict["member_neighbors"], "r") as f_member_neighbors, open(file_path_dict["nonmember_neighbors"], "r") as f_nonmember_neighbors:
204
+ for id, (member, nonmember, member_neighbors, nonmember_neighbors) in enumerate(zip(f_member, f_nonmember, f_member_neighbors, f_nonmember_neighbors)):
205
+ yield id, {
206
+ "member": json.loads(member),
207
+ "nonmember": json.loads(nonmember),
208
+ "member_neighbors": json.loads(member_neighbors),
209
+ "nonmember_neighbors": json.loads(nonmember_neighbors),
210
+ }
211
  # Open the specified .jsonl file and read each line
212
+ # with open(file_path, "r") as f:
213
+ # for id, line in enumerate(f):
214
+ # data = json.loads(line)
215
+ # if type(data) != list:
216
+ # data = [data]
217
+ # yield id, {"text": data}