Datasets:

File size: 5,344 Bytes
296d8ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

from typing import Tuple

import datasets
import pandas as pd
from datasets import load_dataset, concatenate_datasets

from pii_leakage.arguments.ner_args import NERArgs
from pii_leakage.ner.pii_results import ListPII
from pii_leakage.ner.tagger import Tagger
from pii_leakage.ner.tagger_factory import TaggerFactory
from pii_leakage.utils.output import print_highlighted, print_dict_highlighted
from pii_leakage.utils.random import rnd_idx

from dataclasses import dataclass

@dataclass
class CustomEnronBuilder(datasets.BuilderConfig):
    name: str = None
    sample_duplication_rate: int = 1    # number of times a sample is repeated
    shuffle_facts_seed: int = 42
    pseudonymize: bool = False


class CustomEnron(datasets.GeneratorBasedBuilder):
    """ A wrapper around the Enron dataset that uses anonymization.  """

    VERSION = datasets.Version("1.0.0")
    _DESCRIPTION = "A custom wrapper for the Enron dataset."
    _TEXT = "text"
    
    BUILDER_CONFIGS = [
        CustomEnronBuilder(name="undefended", sample_duplication_rate=1, version=VERSION,pseudonymize=False,    
                          description="undefended, private data"),
        CustomEnronBuilder(name="scrubbed", sample_duplication_rate=1, version=VERSION,pseudonymize=True,    
                          description="PII replaced with anon token")
    ]
    DEFAULT_CONFIG_NAME = "unprotected"

    def __init__(self, *args, **kwargs):
        self.df: pd.DataFrame = pd.DataFrame()
        ner_args = NERArgs(ner='flair',
                           ner_model="flair/ner-english-ontonotes-large",
                           anon_token="<MASK>",
                           anonymize=kwargs.setdefault("config_name", None) == "scrubbed")
        self._tagger: Tagger = TaggerFactory.from_ner_args(ner_args)
        print_dict_highlighted(ner_args.__dict__)
        super().__init__(*args, **kwargs)

    def _info(self):
        fea_dict = {self._TEXT: datasets.Value("string"),}
        if self.config.pseudonymize:
            fea_dict.update({entity_class: datasets.Value("string") 
               for entity_class in self._tagger.get_entity_classes()})
        features = datasets.Features(fea_dict)
        return datasets.DatasetInfo(
            description=self._DESCRIPTION,
            features=features
        )

    def _split_generators(self, dl_manager):

        self.df = load_dataset("LLM-PBE/enron-email")
        print("done load data")
        print("self.config.pseudonymize", self.config.pseudonymize)
        self.data = [item for item in self.df["train"]["text"]]
        if self.config.shuffle_facts_seed > 0:
            self.data = [self.data[i] for i in rnd_idx(N=len(self.data), seed=self.config.shuffle_facts_seed)]

        return [
            datasets.SplitGenerator(  # use ~100k samples for the target model
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "split": "train",
                    "start": 0.0,
                    "end": 0.45  # default: 0.45
                },
            ),
            datasets.SplitGenerator(  # use 10% of the training samples for test
                name=datasets.Split.TEST,
                gen_kwargs={
                    "split": "test",
                    "start": 0.45,
                    "end": 0.55  # default: 0.55
                },
            ),
            datasets.SplitGenerator(  # Use ~110k samples for shadow models
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "split": "validation",
                    "start": 0.55,
                    "end": 1.0  # default: 1.0
                },
            ),
        ]

    def _generate_examples(self, split: str, start: float, end: float):
        """ Given a start and stop location, tag all PII and generate the dataset.

        We use multi_gpu generation for improved speed.

        """
        start_pos, end_pos = int(len(self.data) * start), int(len(self.data) * end)
        print_highlighted(
            f"Length of data: {len(self.data)}. Scrubbing from {start_pos} to {end_pos} (Total={end_pos - start_pos}).")

        unique_identifier = start_pos
        for i, text in enumerate(self.data[start_pos:end_pos]):
            if self.config.pseudonymize:
                pseudonymized_text, piis = self._tagger.pseudonymize(text)
                # total_piis += len(piis)
                # if i% 100 == 0:
                #     print(f"Found {total_piis} piis")

                if i == 0:
                    print_highlighted(pseudonymized_text)

                pii_annotations = {k: ListPII() for k in self._tagger.get_entity_classes()}
                pii_annotations.update({k: v.dumps() for k, v in piis.group_by_class().items()})
            else:
                pseudonymized_text = text
                pii_annotations = {}
            
            for _ in range(self.config.sample_duplication_rate):
                yield f"{unique_identifier}", {
                    self._TEXT: pseudonymized_text,
                    **pii_annotations
                }
                unique_identifier += 1