change processing script
Browse files
README.md
CHANGED
|
@@ -27,20 +27,15 @@ To load the dataset:
|
|
| 27 |
```python
|
| 28 |
from datasets import load_dataset
|
| 29 |
|
| 30 |
-
dataset = load_dataset("
|
| 31 |
```
|
| 32 |
|
| 33 |
- Available Names: `arxiv`, `dm_mathematics`, `github`, `hackernews`, `pile_cc`, `pubmed_central`, `wikipedia_(en)`, `full_pile`, `c4`, `temporal_arxiv`, `temporal_wiki`
|
| 34 |
- Available Splits: `ngram_7_0.2`, `ngram_13_0.2`, `ngram_13_0.8` (for most sources), 'none' (for other sources)
|
| 35 |
- Available Features: `member` (str), `nonmember` (str), `member_neighbors` (List[str]), `nonmember_neighbors` (List[str])
|
| 36 |
|
| 37 |
-
## 🛠️ Codebase
|
| 38 |
-
For evaluating MIA methods on our datasets, visit our [GitHub repository](http://github.com/iamgroot42/mimir).
|
| 39 |
|
| 40 |
-
|
| 41 |
-
## ⭐ Citing our Work
|
| 42 |
-
|
| 43 |
-
If you find our codebase and datasets beneficial, kindly cite [our work](https://arxiv.org/pdf/2402.07841.pdf):
|
| 44 |
|
| 45 |
```bibtex
|
| 46 |
@inproceedings{duan2024membership,
|
|
@@ -49,4 +44,6 @@ If you find our codebase and datasets beneficial, kindly cite [our work](https:/
|
|
| 49 |
year={2024},
|
| 50 |
booktitle={Conference on Language Modeling (COLM)},
|
| 51 |
}
|
| 52 |
-
```
|
|
|
|
|
|
|
|
|
| 27 |
```python
|
| 28 |
from datasets import load_dataset
|
| 29 |
|
| 30 |
+
dataset = load_dataset("Al-not-AI/mimir", "pile_cc", split="ngram_7_0.2")
|
| 31 |
```
|
| 32 |
|
| 33 |
- Available Names: `arxiv`, `dm_mathematics`, `github`, `hackernews`, `pile_cc`, `pubmed_central`, `wikipedia_(en)`, `full_pile`, `c4`, `temporal_arxiv`, `temporal_wiki`
|
| 34 |
- Available Splits: `ngram_7_0.2`, `ngram_13_0.2`, `ngram_13_0.8` (for most sources), 'none' (for other sources)
|
| 35 |
- Available Features: `member` (str), `nonmember` (str), `member_neighbors` (List[str]), `nonmember_neighbors` (List[str])
|
| 36 |
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
This dataset is forked from a respository linked with this paper:
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
```bibtex
|
| 41 |
@inproceedings{duan2024membership,
|
|
|
|
| 44 |
year={2024},
|
| 45 |
booktitle={Conference on Language Modeling (COLM)},
|
| 46 |
}
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
The only cahange is in the processing script.
|
mimir.py
CHANGED
|
@@ -1,8 +1,3 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Data used for experiments with MIMIR. Processed train/test splits for models trained on the Pile (for now).
|
| 3 |
-
Processing data at HF end.
|
| 4 |
-
"""
|
| 5 |
-
|
| 6 |
from datasets import (
|
| 7 |
GeneratorBasedBuilder,
|
| 8 |
SplitGenerator,
|
|
@@ -21,7 +16,6 @@ _HOMEPAGE = "http://github.com/iamgroot42/mimir"
|
|
| 21 |
|
| 22 |
_DESCRIPTION = """\
|
| 23 |
Member and non-member splits for our MI experiments using MIMIR. Data is available for each source.
|
| 24 |
-
We also cache neighbors (generated for the NE attack).
|
| 25 |
"""
|
| 26 |
|
| 27 |
_CITATION = """\
|
|
@@ -39,7 +33,7 @@ _DOWNLOAD_URL = "https://huggingface.co/datasets/iamgroot42/mimir/resolve/main/"
|
|
| 39 |
class MimirConfig(BuilderConfig):
|
| 40 |
"""BuilderConfig for Mimir dataset."""
|
| 41 |
|
| 42 |
-
def __init__(self, *args, subsets: List[str]=[], **kwargs):
|
| 43 |
"""Constructs a MimirConfig.
|
| 44 |
|
| 45 |
Args:
|
|
@@ -50,10 +44,8 @@ class MimirConfig(BuilderConfig):
|
|
| 50 |
|
| 51 |
|
| 52 |
class MimirDataset(GeneratorBasedBuilder):
|
| 53 |
-
# Assuming 'VERSION' is defined
|
| 54 |
VERSION = datasets.Version("1.3.0")
|
| 55 |
|
| 56 |
-
# Define the builder configs
|
| 57 |
BUILDER_CONFIG_CLASS = MimirConfig
|
| 58 |
BUILDER_CONFIGS = [
|
| 59 |
MimirConfig(
|
|
@@ -91,52 +83,23 @@ class MimirDataset(GeneratorBasedBuilder):
|
|
| 91 |
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
|
| 92 |
description="This split contains data from the Pile's Wikipedia subset at various n-gram overlap thresholds"
|
| 93 |
),
|
| 94 |
-
MimirConfig(
|
| 95 |
-
name="full_pile", description="This split contains data from multiple sources in the Pile",
|
| 96 |
-
),
|
| 97 |
-
MimirConfig(
|
| 98 |
-
name="c4", description="This split contains data the C4 dataset",
|
| 99 |
-
),
|
| 100 |
-
MimirConfig(
|
| 101 |
-
name="temporal_arxiv",
|
| 102 |
-
subsets=["2020_08", "2021_01", "2021_06", "2022_01", "2022_06", "2023_01", "2023_06"],
|
| 103 |
-
description="This split contains benchmarks where non-members are selected from various months from 2020-08 and onwards",
|
| 104 |
-
),
|
| 105 |
-
MimirConfig(
|
| 106 |
-
name="temporal_wiki", description="This split contains benchmarks where non-members are selected from 2023-08 and onwards",
|
| 107 |
-
),
|
| 108 |
]
|
| 109 |
|
| 110 |
def _info(self):
|
| 111 |
return datasets.DatasetInfo(
|
| 112 |
-
# This is the description that will appear on the datasets page.
|
| 113 |
description=_DESCRIPTION,
|
| 114 |
-
# This defines the different columns of the dataset and their types
|
| 115 |
features=datasets.Features({
|
| 116 |
-
"
|
| 117 |
-
"
|
| 118 |
-
"member_neighbors": datasets.Sequence(datasets.Value("string")),
|
| 119 |
-
"nonmember_neighbors": datasets.Sequence(datasets.Value("string"))
|
| 120 |
}),
|
| 121 |
-
# If there's a common (input, target) tuple from the features,
|
| 122 |
-
# specify them here. They'll be used if as_supervised=True in
|
| 123 |
-
# builder.as_dataset.
|
| 124 |
supervised_keys=None,
|
| 125 |
-
# Homepage of the dataset for documentation
|
| 126 |
homepage=_HOMEPAGE,
|
| 127 |
-
# Citation for the dataset
|
| 128 |
citation=_CITATION,
|
| 129 |
)
|
| 130 |
|
| 131 |
def _split_generators(self, dl_manager: DownloadManager):
|
| 132 |
"""Returns SplitGenerators."""
|
| 133 |
-
|
| 134 |
-
NEIGHBOR_SUFFIX = "_neighbors_25_bert_in_place_swap"
|
| 135 |
-
parent_dir = (
|
| 136 |
-
"cache_100_200_10000_512"
|
| 137 |
-
if self.config.name == "full_pile"
|
| 138 |
-
else "cache_100_200_1000_512"
|
| 139 |
-
)
|
| 140 |
|
| 141 |
if len(self.config.subsets) > 0:
|
| 142 |
suffixes = [f"{subset}" for subset in self.config.subsets]
|
|
@@ -149,46 +112,35 @@ class MimirDataset(GeneratorBasedBuilder):
|
|
| 149 |
|
| 150 |
subset_split_suffix_use = f"_{subset_split_suffix}" if subset_split_suffix != "none" else ""
|
| 151 |
|
| 152 |
-
# Add standard member and non-member paths
|
| 153 |
internal_fp['member'] = os.path.join(parent_dir, "train", f"{self.config.name}{subset_split_suffix_use}.jsonl")
|
| 154 |
internal_fp['nonmember'] = os.path.join(parent_dir, "test", f"{self.config.name}{subset_split_suffix_use}.jsonl")
|
| 155 |
|
| 156 |
-
# Load associated neighbors
|
| 157 |
-
internal_fp['member_neighbors'] = os.path.join(
|
| 158 |
-
parent_dir,
|
| 159 |
-
"train_neighbors",
|
| 160 |
-
f"{self.config.name}{subset_split_suffix_use}{NEIGHBOR_SUFFIX}.jsonl",
|
| 161 |
-
)
|
| 162 |
-
internal_fp['nonmember_neighbors'] = os.path.join(
|
| 163 |
-
parent_dir,
|
| 164 |
-
"test_neighbors",
|
| 165 |
-
f"{self.config.name}{subset_split_suffix_use}{NEIGHBOR_SUFFIX}.jsonl",
|
| 166 |
-
)
|
| 167 |
file_paths[subset_split_suffix] = internal_fp
|
| 168 |
|
| 169 |
-
#
|
| 170 |
data_dir = {}
|
| 171 |
for k, v_dict in file_paths.items():
|
| 172 |
-
download_paths = []
|
| 173 |
-
for v in v_dict.values():
|
| 174 |
-
download_paths.append(_DOWNLOAD_URL + v)
|
| 175 |
paths = dl_manager.download_and_extract(download_paths)
|
| 176 |
-
internal_dict = {k:v for k, v in zip(v_dict.keys(), paths)}
|
| 177 |
data_dir[k] = internal_dict
|
| 178 |
|
| 179 |
-
splits = []
|
| 180 |
-
for k in suffixes:
|
| 181 |
-
splits.append(SplitGenerator(name=k, gen_kwargs={"file_path_dict": data_dir[k]}))
|
| 182 |
return splits
|
| 183 |
|
| 184 |
def _generate_examples(self, file_path_dict):
|
| 185 |
-
"""Yields examples."""
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
"
|
| 194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from datasets import (
|
| 2 |
GeneratorBasedBuilder,
|
| 3 |
SplitGenerator,
|
|
|
|
| 16 |
|
| 17 |
_DESCRIPTION = """\
|
| 18 |
Member and non-member splits for our MI experiments using MIMIR. Data is available for each source.
|
|
|
|
| 19 |
"""
|
| 20 |
|
| 21 |
_CITATION = """\
|
|
|
|
| 33 |
class MimirConfig(BuilderConfig):
|
| 34 |
"""BuilderConfig for Mimir dataset."""
|
| 35 |
|
| 36 |
+
def __init__(self, *args, subsets: List[str] = [], **kwargs):
|
| 37 |
"""Constructs a MimirConfig.
|
| 38 |
|
| 39 |
Args:
|
|
|
|
| 44 |
|
| 45 |
|
| 46 |
class MimirDataset(GeneratorBasedBuilder):
|
|
|
|
| 47 |
VERSION = datasets.Version("1.3.0")
|
| 48 |
|
|
|
|
| 49 |
BUILDER_CONFIG_CLASS = MimirConfig
|
| 50 |
BUILDER_CONFIGS = [
|
| 51 |
MimirConfig(
|
|
|
|
| 83 |
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
|
| 84 |
description="This split contains data from the Pile's Wikipedia subset at various n-gram overlap thresholds"
|
| 85 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
]
|
| 87 |
|
| 88 |
def _info(self):
|
| 89 |
return datasets.DatasetInfo(
|
|
|
|
| 90 |
description=_DESCRIPTION,
|
|
|
|
| 91 |
features=datasets.Features({
|
| 92 |
+
"input": datasets.Value("string"),
|
| 93 |
+
"label": datasets.Value("int32"),
|
|
|
|
|
|
|
| 94 |
}),
|
|
|
|
|
|
|
|
|
|
| 95 |
supervised_keys=None,
|
|
|
|
| 96 |
homepage=_HOMEPAGE,
|
|
|
|
| 97 |
citation=_CITATION,
|
| 98 |
)
|
| 99 |
|
| 100 |
def _split_generators(self, dl_manager: DownloadManager):
|
| 101 |
"""Returns SplitGenerators."""
|
| 102 |
+
parent_dir = "cache_100_200_1000_512"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
if len(self.config.subsets) > 0:
|
| 105 |
suffixes = [f"{subset}" for subset in self.config.subsets]
|
|
|
|
| 112 |
|
| 113 |
subset_split_suffix_use = f"_{subset_split_suffix}" if subset_split_suffix != "none" else ""
|
| 114 |
|
|
|
|
| 115 |
internal_fp['member'] = os.path.join(parent_dir, "train", f"{self.config.name}{subset_split_suffix_use}.jsonl")
|
| 116 |
internal_fp['nonmember'] = os.path.join(parent_dir, "test", f"{self.config.name}{subset_split_suffix_use}.jsonl")
|
| 117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
file_paths[subset_split_suffix] = internal_fp
|
| 119 |
|
| 120 |
+
# Download data
|
| 121 |
data_dir = {}
|
| 122 |
for k, v_dict in file_paths.items():
|
| 123 |
+
download_paths = [_DOWNLOAD_URL + v for v in v_dict.values()]
|
|
|
|
|
|
|
| 124 |
paths = dl_manager.download_and_extract(download_paths)
|
| 125 |
+
internal_dict = {k: v for k, v in zip(v_dict.keys(), paths)}
|
| 126 |
data_dir[k] = internal_dict
|
| 127 |
|
| 128 |
+
splits = [SplitGenerator(name=k, gen_kwargs={"file_path_dict": data_dir[k]}) for k in suffixes]
|
|
|
|
|
|
|
| 129 |
return splits
|
| 130 |
|
| 131 |
def _generate_examples(self, file_path_dict):
|
| 132 |
+
"""Yields individual examples for members and non-members."""
|
| 133 |
+
with open(file_path_dict["member"], "r") as f_member, open(file_path_dict["nonmember"], "r") as f_nonmember:
|
| 134 |
+
for id, (member, nonmember) in enumerate(zip(f_member, f_nonmember)):
|
| 135 |
+
member_text = json.loads(member)
|
| 136 |
+
nonmember_text = json.loads(nonmember)
|
| 137 |
+
|
| 138 |
+
# Yield separate examples for members and non-members
|
| 139 |
+
yield f"{id}_member", {
|
| 140 |
+
"input": member_text,
|
| 141 |
+
"label": 1, # Member example
|
| 142 |
+
}
|
| 143 |
+
yield f"{id}_nonmember", {
|
| 144 |
+
"input": nonmember_text,
|
| 145 |
+
"label": 0, # Non-member example
|
| 146 |
+
}
|