code
stringlengths
17
6.64M
@lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache() def bytes_to_unicode(): "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n " bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1)))) cs = bs[:] n = 0 for b in range((2 ** 8)): if (b not in bs): bs.append(b) cs.append(((2 ** 8) + n)) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs))
def get_pairs(word): 'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n ' pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs
def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip()
def whitespace_clean(text): text = re.sub('\\s+', ' ', text) text = text.strip() return text
class SimpleTokenizer(object): def __init__(self, bpe_path: str=default_bpe()): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} merges = gzip.open(bpe_path).read().decode('utf-8').split('\n') merges = merges[1:(((49152 - 256) - 2) + 1)] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = (vocab + [(v + '</w>') for v in vocab]) for merge in merges: vocab.append(''.join(merge)) vocab.extend(['<|startoftext|>', '<|endoftext|>']) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for (k, v) in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE) self.vocab = self.encoder def bpe(self, token): if (token in self.cache): return self.cache[token] word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def decode(self, tokens): text = ''.join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ') return text def tokenize(self, text): tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return tokens def convert_tokens_to_ids(self, tokens): return [self.encoder[bpe_token] for bpe_token in tokens]
def get_world_size(): if (not dist.is_available()): return 1 if (not dist.is_initialized()): return 1 return dist.get_world_size()
def get_rank(): if (not dist.is_available()): return 0 if (not dist.is_initialized()): return 0 return dist.get_rank()
def is_main_process(): return (get_rank() == 0)
def synchronize(): '\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n ' if (not dist.is_available()): return if (not dist.is_initialized()): return world_size = dist.get_world_size() if (world_size == 1): return dist.barrier()
def all_gather(data): '\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n ' world_size = get_world_size() if (world_size == 1): return [data] buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to('cuda') local_size = torch.LongTensor([tensor.numel()]).to('cuda') size_list = [torch.LongTensor([0]).to('cuda') for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) tensor_list = [] for _ in size_list: tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda')) if (local_size != max_size): padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda') tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for (size, tensor) in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list
def reduce_dict(input_dict, average=True): '\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n ' world_size = get_world_size() if (world_size < 2): return input_dict with torch.no_grad(): names = [] values = [] for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if ((dist.get_rank() == 0) and average): values /= world_size reduced_dict = {k: v for (k, v) in zip(names, values)} return reduced_dict
def setup_logger(name, save_dir, dist_rank, filename='log.txt'): logger = logging.getLogger(name) logger.setLevel(logging.ERROR) if (dist_rank > 0): return logger logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('[%(asctime)s %(name)s %(lineno)s %(levelname)s]: %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) logger.propagate = False if save_dir: fh = logging.FileHandler(os.path.join(save_dir, filename)) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) return logger
class SmoothedValue(object): 'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n ' def __init__(self, window_size=20): self.deque = deque(maxlen=window_size) self.series = [] self.total = 0.0 self.count = 0 def update(self, value): self.deque.append(value) self.series.append(value) self.count += 1 self.total += value @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque)) return d.mean().item() @property def global_avg(self): return (self.total / self.count)
class MetricLogger(object): def __init__(self, delimiter='\t'): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for (k, v) in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if (attr in self.meters): return self.meters[attr] if (attr in self.__dict__): return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) def __str__(self): loss_str = [] for (name, meter) in self.meters.items(): loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg)) return self.delimiter.join(loss_str)
def permutation_test(tokens, key, n, k, vocab_size, n_runs=100): rng = mersenne_rng(key) xi = np.array([rng.rand() for _ in range((n * vocab_size))], dtype=np.float32).reshape(n, vocab_size) test_result = detect(tokens, n, k, xi) p_val = 0 for run in range(n_runs): xi_alternative = np.random.rand(n, vocab_size).astype(np.float32) null_result = detect(tokens, n, k, xi_alternative) p_val += (null_result <= test_result) return ((p_val + 1.0) / (n_runs + 1.0))
def detect(tokens, n, k, xi, gamma=0.0): m = len(tokens) n = len(xi) A = np.empty(((m - (k - 1)), n)) for i in range((m - (k - 1))): for j in range(n): A[i][j] = levenshtein(tokens[i:(i + k)], xi[((j + np.arange(k)) % n)], gamma) return np.min(A)
def main(args): with open(args.document, 'r') as f: text = f.read() tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) tokens = tokenizer.encode(text, return_tensors='pt', truncation=True, max_length=2048).numpy()[0] t0 = time.time() pval = permutation_test(tokens, args.key, args.n, len(tokens), len(tokenizer)) print('p-value: ', pval) print(f'(elapsed time: {(time.time() - t0)}s)')
class mersenne_rng(object): def __init__(self, seed=5489): self.state = ([0] * 624) self.f = 1812433253 self.m = 397 self.u = 11 self.s = 7 self.b = 2636928640 self.t = 15 self.c = 4022730752 self.l = 18 self.index = 624 self.lower_mask = ((1 << 31) - 1) self.upper_mask = (1 << 31) self.state[0] = seed for i in range(1, 624): self.state[i] = self.int_32(((self.f * (self.state[(i - 1)] ^ (self.state[(i - 1)] >> 30))) + i)) def twist(self): for i in range(624): temp = self.int_32(((self.state[i] & self.upper_mask) + (self.state[((i + 1) % 624)] & self.lower_mask))) temp_shift = (temp >> 1) if ((temp % 2) != 0): temp_shift = (temp_shift ^ 2567483615) self.state[i] = (self.state[((i + self.m) % 624)] ^ temp_shift) self.index = 0 def int_32(self, number): return int((4294967295 & number)) def randint(self): if (self.index >= 624): self.twist() y = self.state[self.index] y = (y ^ (y >> self.u)) y = (y ^ ((y << self.s) & self.b)) y = (y ^ ((y << self.t) & self.c)) y = (y ^ (y >> self.l)) self.index += 1 return self.int_32(y) def rand(self): return (self.randint() * (1.0 / 4294967296.0)) def randperm(self, n): p = list(range(n)) for i in range((n - 1), 0, (- 1)): j = (self.randint() % i) (p[i], p[j]) = (p[j], p[i]) return p
def substitution_attack(tokens, p, vocab_size, distribution=None): if (distribution is None): distribution = (lambda x: (torch.ones(size=(len(tokens), vocab_size)) / vocab_size)) idx = torch.randperm(len(tokens))[:int((p * len(tokens)))] new_probs = distribution(tokens) samples = torch.multinomial(new_probs, 1).flatten() tokens[idx] = samples[idx] return tokens
def deletion_attack(tokens, p): idx = torch.randperm(len(tokens))[:int((p * len(tokens)))] keep = torch.ones(len(tokens), dtype=torch.bool) keep[idx] = False tokens = tokens[keep] return tokens
def insertion_attack(tokens, p, vocab_size, distribution=None): if (distribution is None): distribution = (lambda x: (torch.ones(size=(len(tokens), vocab_size)) / vocab_size)) idx = torch.randperm(len(tokens))[:int((p * len(tokens)))] new_probs = distribution(tokens) samples = torch.multinomial(new_probs, 1) for i in idx.sort(descending=True).values: tokens = torch.cat([tokens[:i], samples[i], tokens[i:]]) tokens[i] = samples[i] return tokens
def permutation_test(tokens, vocab_size, n, k, seed, test_stat, n_runs=100, max_seed=100000): generator = torch.Generator() generator.manual_seed(int(seed)) test_result = test_stat(tokens=tokens, n=n, k=k, generator=generator, vocab_size=vocab_size) p_val = 0 for run in range(n_runs): pi = torch.randperm(vocab_size) tokens = torch.argsort(pi)[tokens] seed = torch.randint(high=max_seed, size=(1,)).item() generator.manual_seed(int(seed)) null_result = test_stat(tokens=tokens, n=n, k=k, generator=generator, vocab_size=vocab_size, null=True) p_val += ((null_result <= test_result).float() / n_runs) return p_val
def fast_permutation_test(tokens, vocab_size, n, k, seed, test_stat, null_results): generator = torch.Generator() generator.manual_seed(int(seed)) test_result = test_stat(tokens=tokens, n=n, k=k, generator=generator, vocab_size=vocab_size) p_val = (torch.searchsorted(null_results, test_result, right=True) / len(null_results)) return p_val
def phi(tokens, n, k, generator, key_func, vocab_size, dist, null=False, normalize=False): if null: tokens = torch.unique(tokens, return_inverse=True, sorted=False)[1] eff_vocab_size = (torch.max(tokens) + 1) else: eff_vocab_size = vocab_size (xi, pi) = key_func(generator, n, vocab_size, eff_vocab_size) tokens = torch.argsort(pi)[tokens] if normalize: tokens = (tokens.float() / vocab_size) A = adjacency(tokens, xi, dist, k) closest = torch.min(A, axis=1)[0] return torch.min(closest)
def adjacency(tokens, xi, dist, k): m = len(tokens) n = len(xi) A = torch.empty(size=((m - (k - 1)), n)) for i in range((m - (k - 1))): for j in range(n): A[i][j] = dist(tokens[i:(i + k)], xi[((j + torch.arange(k)) % n)]) return A
def gumbel_key_func(generator, n, vocab_size, eff_vocab_size=None): if (eff_vocab_size is None): eff_vocab_size = vocab_size pi = torch.arange(eff_vocab_size) xi = torch.rand((n, eff_vocab_size), generator=generator) return (xi, pi)
def gumbel_sampling(probs, pi, xi): return torch.argmax((xi ** (1 / torch.gather(probs, 1, pi))), axis=1).unsqueeze((- 1))
def gumbel_score(tokens, xi): xi_samp = torch.gather(xi, (- 1), tokens.unsqueeze((- 1))).squeeze() return (- torch.sum(torch.log((1 / (1 - xi_samp)))))
def gumbel_edit_score(tokens, xi, gamma): return gumbel_levenshtein(tokens.numpy(), xi.numpy(), gamma)
class Categories(): '\n Work with aliases from ISO 15924.\n https://en.wikipedia.org/wiki/ISO_15924#List_of_codes\n ' fpath = os.path.join(DATA_LOCATION, 'categories.json') @classmethod def _get_ranges(cls, categories): '\n :return: iter: (start code, end code)\n :rtype: list\n ' with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) for category in categories: if (category not in data['aliases']): raise ValueError('Invalid category: {}'.format(category)) for point in data['points']: if (point[2] in categories): (yield point[:2]) @classmethod def get_alphabet(cls, categories): '\n :return: set of chars in alphabet by categories list\n :rtype: set\n ' alphabet = set() for (start, end) in cls._get_ranges(categories): chars = (chr(code) for code in range(start, (end + 1))) alphabet.update(chars) return alphabet @classmethod def detect(cls, char): '\n :return: category\n :rtype: str\n ' with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) try: category = unicodedata.name(char).split()[0] except (TypeError, ValueError): pass else: if (category in data['aliases']): return category code = ord(char) for point in data['points']: if (point[0] <= code <= point[1]): return point[2] @classmethod def get_all(cls): with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) return set(data['aliases'])
class Languages(): fpath = os.path.join(DATA_LOCATION, 'languages.json') @classmethod def get_alphabet(cls, languages): '\n :return: set of chars in alphabet by languages list\n :rtype: set\n ' with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) alphabet = set() for lang in languages: if (lang not in data): raise ValueError('Invalid language code: {}'.format(lang)) alphabet.update(data[lang]) return alphabet @classmethod def detect(cls, char): '\n :return: set of languages which alphabet contains passed char.\n :rtype: set\n ' with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) languages = set() for (lang, alphabet) in data.items(): if (char in alphabet): languages.add(lang) return languages @classmethod def get_all(cls): with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) return set(data.keys())
class Homoglyphs(): def __init__(self, categories=None, languages=None, alphabet=None, strategy=STRATEGY_IGNORE, ascii_strategy=STRATEGY_IGNORE, ascii_range=ASCII_RANGE): if (strategy not in (STRATEGY_LOAD, STRATEGY_IGNORE, STRATEGY_REMOVE)): raise ValueError('Invalid strategy') self.strategy = strategy self.ascii_strategy = ascii_strategy self.ascii_range = ascii_range if ((not categories) and (not languages) and (not alphabet)): categories = ('LATIN', 'COMMON') self.categories = set((categories or [])) self.languages = set((languages or [])) self.alphabet = set((alphabet or [])) if self.categories: alphabet = Categories.get_alphabet(self.categories) self.alphabet.update(alphabet) if self.languages: alphabet = Languages.get_alphabet(self.languages) self.alphabet.update(alphabet) self.table = self.get_table(self.alphabet) @staticmethod def get_table(alphabet): table = defaultdict(set) with open(os.path.join(DATA_LOCATION, 'confusables_sept2022.json')) as f: data = json.load(f) for char in alphabet: if (char in data): for homoglyph in data[char]: if (homoglyph in alphabet): table[char].add(homoglyph) return table @staticmethod def get_restricted_table(source_alphabet, target_alphabet): table = defaultdict(set) with open(os.path.join(DATA_LOCATION, 'confusables_sept2022.json')) as f: data = json.load(f) for char in source_alphabet: if (char in data): for homoglyph in data[char]: if (homoglyph in target_alphabet): table[char].add(homoglyph) return table @staticmethod def uniq_and_sort(data): result = list(set(data)) result.sort(key=(lambda x: ((- len(x)), x))) return result def _update_alphabet(self, char): langs = Languages.detect(char) if langs: self.languages.update(langs) alphabet = Languages.get_alphabet(langs) self.alphabet.update(alphabet) else: category = Categories.detect(char) if (category is None): return False self.categories.add(category) alphabet = Categories.get_alphabet([category]) self.alphabet.update(alphabet) self.table = self.get_table(self.alphabet) return True def _get_char_variants(self, char): if (char not in self.alphabet): if (self.strategy == STRATEGY_LOAD): if (not self._update_alphabet(char)): return [] elif (self.strategy == STRATEGY_IGNORE): return [char] elif (self.strategy == STRATEGY_REMOVE): return [] alt_chars = self.table.get(char, set()) if alt_chars: alt_chars2 = [self.table.get(alt_char, set()) for alt_char in alt_chars] alt_chars.update(*alt_chars2) alt_chars.add(char) return self.uniq_and_sort(alt_chars) def _get_combinations(self, text, ascii=False): variations = [] for char in text: alt_chars = self._get_char_variants(char) if ascii: alt_chars = [char for char in alt_chars if (ord(char) in self.ascii_range)] if ((not alt_chars) and (self.ascii_strategy == STRATEGY_IGNORE)): return if alt_chars: variations.append(alt_chars) if variations: for variant in product(*variations): (yield ''.join(variant)) def get_combinations(self, text): return list(self._get_combinations(text)) def _to_ascii(self, text): for variant in self._get_combinations(text, ascii=True): if (max(map(ord, variant)) in self.ascii_range): (yield variant) def to_ascii(self, text): return self.uniq_and_sort(self._to_ascii(text))
def normalization_strategy_lookup(strategy_name: str) -> object: if (strategy_name == 'unicode'): return UnicodeSanitizer() elif (strategy_name == 'homoglyphs'): return HomoglyphCanonizer() elif (strategy_name == 'truecase'): return TrueCaser()
class HomoglyphCanonizer(): 'Attempts to detect homoglyph attacks and find a consistent canon.\n\n This function does so on a per-ISO-category level. Language-level would also be possible (see commented code).\n ' def __init__(self): self.homoglyphs = None def __call__(self, homoglyphed_str: str) -> str: (target_category, all_categories) = self._categorize_text(homoglyphed_str) homoglyph_table = self._select_canon_category_and_load(target_category, all_categories) return self._sanitize_text(target_category, homoglyph_table, homoglyphed_str) def _categorize_text(self, text: str) -> dict: iso_categories = defaultdict(int) for char in text: iso_categories[Categories.detect(char)] += 1 target_category = max(iso_categories, key=iso_categories.get) all_categories = tuple(iso_categories) return (target_category, all_categories) @cache def _select_canon_category_and_load(self, target_category: str, all_categories: tuple[str]) -> dict: homoglyph_table = Homoglyphs(categories=(target_category, 'COMMON')) source_alphabet = Categories.get_alphabet(all_categories) restricted_table = homoglyph_table.get_restricted_table(source_alphabet, homoglyph_table.alphabet) return restricted_table def _sanitize_text(self, target_category: str, homoglyph_table: dict, homoglyphed_str: str) -> str: sanitized_text = '' for char in homoglyphed_str: cat = Categories.detect(char) if ((target_category in cat) or ('COMMON' in cat) or (len(cat) == 0)): sanitized_text += char else: sanitized_text += list(homoglyph_table[char])[0] return sanitized_text
class UnicodeSanitizer(): 'Regex-based unicode sanitzer. Has different levels of granularity.\n\n * ruleset="whitespaces" - attempts to remove only whitespace unicode characters\n * ruleset="IDN.blacklist" - does its best to remove unusual unicode based on Network.IDN.blacklist characters\n * ruleset="ascii" - brute-forces all text into ascii\n\n This is unlikely to be a comprehensive list.\n\n You can find a more comprehensive discussion at https://www.unicode.org/reports/tr36/\n and https://www.unicode.org/faq/security.html\n ' def __init__(self, ruleset='whitespaces'): if (ruleset == 'whitespaces'): 'Documentation:\n \xa0: Non-breaking space\n \u1680: Ogham space mark\n \u180e: Mongolian vowel separator\n \u2000-\u200b: Various space characters, including en space, em space, thin space, hair space, zero-width space, and zero-width non-joiner\n \u200c\u200d: Zero-width non-joiner and zero-width joiner\n \u200e,\u200f: Left-to-right-mark, Right-to-left-mark\n \u2060: Word joiner\n \u2063: Invisible separator\n \u202f: Narrow non-breaking space\n \u205f: Medium mathematical space\n \u3000: Ideographic space\n \ufeff: Zero-width non-breaking space\n ᅠ: Halfwidth hangul filler\n \ufff9\ufffa\ufffb: Interlinear annotation characters\n ︀-️: Variation selectors\n \u202a-\u202f: Embedding characters\n ㅤ: Korean hangul filler.\n\n Note that these characters are not always superfluous whitespace characters!\n ' self.pattern = re.compile('[\\u00A0\\u1680\\u180E\\u2000-\\u200B\\u200C\\u200D\\u200E\\u200F\\u2060\\u2063\\u202F\\u205F\\u3000\\uFEFF\\uFFA0\\uFFF9\\uFFFA\\uFFFB\\uFE00\\uFE01\\uFE02\\uFE03\\uFE04\\uFE05\\uFE06\\uFE07\\uFE08\\uFE09\\uFE0A\\uFE0B\\uFE0C\\uFE0D\\uFE0E\\uFE0F\\u3164\\u202A\\u202B\\u202C\\u202D\\u202E\\u202F]') elif (ruleset == 'IDN.blacklist'): 'Documentation:\n [\xa0\u1680\u180e\u2000-\u200b\u202f\u205f\u2060\u2063\ufeff]: Matches any whitespace characters in the Unicode character\n set that are included in the IDN blacklist.\n \ufff9-\ufffb: Matches characters that are not defined in Unicode but are used as language tags in various legacy encodings.\n These characters are not allowed in domain names.\n \ud800-\udb7f: Matches the first part of a surrogate pair. Surrogate pairs are used to represent characters in the Unicode character\n set that cannot be represented by a single 16-bit value. The first part of a surrogate pair is in the range U+D800 to U+DBFF,\n and the second part is in the range U+DC00 to U+DFFF.\n \udb80-\udbff][\udc00-\udfff]?: Matches the second part of a surrogate pair. The second part of a surrogate pair is in the range U+DC00\n to U+DFFF, and is optional.\n [\udb40\udc20-\udb40\udc7f][\udc00-\udfff]: Matches certain invalid UTF-16 sequences which should not appear in IDNs.\n ' self.pattern = re.compile('[\\u00A0\\u1680\\u180E\\u2000-\\u200B\\u202F\\u205F\\u2060\\u2063\\uFEFF\\uFFF9-\\uFFFB\\uD800-\\uDB7F\\uDB80-\\uDBFF][\\uDC00-\\uDFFF]?|[\\uDB40\\uDC20-\\uDB40\\uDC7F][\\uDC00-\\uDFFF]') else: 'Documentation:\n This is a simple restriction to "no-unicode", using only ascii characters. Control characters are included.\n ' self.pattern = re.compile('[^\\x00-\\x7F]+') def __call__(self, text: str) -> str: text = unicodedata.normalize('NFC', text) text = self.pattern.sub(' ', text) text = re.sub(' +', ' ', text) text = ''.join((c for c in text if (unicodedata.category(c) != 'Cc'))) return text
class TrueCaser(): 'True-casing, is a capitalization normalization that returns text to its original capitalization.\n\n This defends against attacks that wRIte TeXt lIkE spOngBoB.\n\n Here, a simple POS-tagger is used.\n ' uppercase_pos = ['PROPN'] def __init__(self, backend='spacy'): if (backend == 'spacy'): import spacy self.nlp = spacy.load('en_core_web_sm') self.normalize_fn = self._spacy_truecasing else: from nltk import pos_tag, word_tokenize import nltk nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('universal_tagset') self.normalize_fn = self._nltk_truecasing def __call__(self, random_capitalized_string: str) -> str: truecased_str = self.normalize_fn(random_capitalized_string) return truecased_str def _spacy_truecasing(self, random_capitalized_string: str): doc = self.nlp(random_capitalized_string.lower()) POS = self.uppercase_pos truecased_str = ''.join([(w.text_with_ws.capitalize() if ((w.pos_ in POS) or w.is_sent_start) else w.text_with_ws) for w in doc]) return truecased_str def _nltk_truecasing(self, random_capitalized_string: str): from nltk import pos_tag, word_tokenize import nltk nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('universal_tagset') POS = ['NNP', 'NNPS'] tagged_text = pos_tag(word_tokenize(random_capitalized_string.lower())) truecased_str = ' '.join([(w.capitalize() if (p in POS) else w) for (w, p) in tagged_text]) return truecased_str
class WatermarkBase(): def __init__(self, vocab: list[int]=None, gamma: float=0.5, delta: float=2.0, seeding_scheme: str='simple_1', hash_key: int=15485863, select_green_tokens: bool=True): self.vocab = vocab self.vocab_size = len(vocab) self.gamma = gamma self.delta = delta self.seeding_scheme = seeding_scheme self.rng = None self.hash_key = hash_key self.select_green_tokens = select_green_tokens def _seed_rng(self, input_ids: torch.LongTensor, seeding_scheme: str=None) -> None: if (seeding_scheme is None): seeding_scheme = self.seeding_scheme if (seeding_scheme == 'simple_1'): assert (input_ids.shape[(- 1)] >= 1), f'seeding_scheme={seeding_scheme} requires at least a 1 token prefix sequence to seed rng' prev_token = input_ids[(- 1)].item() self.rng.manual_seed((self.hash_key * prev_token)) else: raise NotImplementedError(f'Unexpected seeding_scheme: {seeding_scheme}') return def _get_greenlist_ids(self, input_ids: torch.LongTensor) -> list[int]: self._seed_rng(input_ids) greenlist_size = int((self.vocab_size * self.gamma)) vocab_permutation = torch.randperm(self.vocab_size, device=input_ids.device, generator=self.rng) if self.select_green_tokens: greenlist_ids = vocab_permutation[:greenlist_size] else: greenlist_ids = vocab_permutation[(self.vocab_size - greenlist_size):] return greenlist_ids
class WatermarkLogitsProcessor(WatermarkBase, LogitsProcessor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _calc_greenlist_mask(self, scores: torch.FloatTensor, greenlist_token_ids) -> torch.BoolTensor: green_tokens_mask = torch.zeros_like(scores) for b_idx in range(len(greenlist_token_ids)): green_tokens_mask[b_idx][greenlist_token_ids[b_idx]] = 1 final_mask = green_tokens_mask.bool() return final_mask def _bias_greenlist_logits(self, scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float) -> torch.Tensor: scores[greenlist_mask] = (scores[greenlist_mask] + greenlist_bias) return scores def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: if (self.rng is None): self.rng = torch.Generator(device=input_ids.device) batched_greenlist_ids = [None for _ in range(input_ids.shape[0])] for b_idx in range(input_ids.shape[0]): greenlist_ids = self._get_greenlist_ids(input_ids[b_idx]) batched_greenlist_ids[b_idx] = greenlist_ids green_tokens_mask = self._calc_greenlist_mask(scores=scores, greenlist_token_ids=batched_greenlist_ids) scores = self._bias_greenlist_logits(scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta) return scores
class WatermarkDetector(WatermarkBase): def __init__(self, *args, device: torch.device=None, tokenizer: Tokenizer=None, z_threshold: float=4.0, normalizers: list[str]=['unicode'], ignore_repeated_bigrams: bool=False, **kwargs): super().__init__(*args, **kwargs) assert device, 'Must pass device' assert tokenizer, 'Need an instance of the generating tokenizer to perform detection' self.tokenizer = tokenizer self.device = device self.z_threshold = z_threshold self.rng = torch.Generator(device=self.device) if (self.seeding_scheme == 'simple_1'): self.min_prefix_len = 1 else: raise NotImplementedError(f'Unexpected seeding_scheme: {self.seeding_scheme}') self.normalizers = [] for normalization_strategy in normalizers: self.normalizers.append(normalization_strategy_lookup(normalization_strategy)) self.ignore_repeated_bigrams = ignore_repeated_bigrams if self.ignore_repeated_bigrams: assert (self.seeding_scheme == 'simple_1'), 'No repeated bigram credit variant assumes the single token seeding scheme.' def _compute_z_score(self, observed_count, T): expected_count = self.gamma numer = (observed_count - (expected_count * T)) denom = sqrt(((T * expected_count) * (1 - expected_count))) z = (numer / denom) return z def _compute_p_value(self, z): p_value = scipy.stats.norm.sf(z) return p_value def _score_sequence(self, input_ids: Tensor, return_num_tokens_scored: bool=True, return_num_green_tokens: bool=True, return_green_fraction: bool=True, return_green_token_mask: bool=False, return_z_score: bool=True, return_p_value: bool=True): if self.ignore_repeated_bigrams: assert (return_green_token_mask == False), "Can't return the green/red mask when ignoring repeats." bigram_table = {} token_bigram_generator = ngrams(input_ids.cpu().tolist(), 2) freq = collections.Counter(token_bigram_generator) num_tokens_scored = len(freq.keys()) for (idx, bigram) in enumerate(freq.keys()): prefix = torch.tensor([bigram[0]], device=self.device) greenlist_ids = self._get_greenlist_ids(prefix) bigram_table[bigram] = (True if (bigram[1] in greenlist_ids) else False) green_token_count = sum(bigram_table.values()) else: num_tokens_scored = (len(input_ids) - self.min_prefix_len) if (num_tokens_scored < 1): raise ValueError(f'Must have at least {1} token to score after the first min_prefix_len={self.min_prefix_len} tokens required by the seeding scheme.') (green_token_count, green_token_mask) = (0, []) for idx in range(self.min_prefix_len, len(input_ids)): curr_token = input_ids[idx] greenlist_ids = self._get_greenlist_ids(input_ids[:idx]) if (curr_token in greenlist_ids): green_token_count += 1 green_token_mask.append(True) else: green_token_mask.append(False) score_dict = dict() if return_num_tokens_scored: score_dict.update(dict(num_tokens_scored=num_tokens_scored)) if return_num_green_tokens: score_dict.update(dict(num_green_tokens=green_token_count)) if return_green_fraction: score_dict.update(dict(green_fraction=(green_token_count / num_tokens_scored))) if return_z_score: score_dict.update(dict(z_score=self._compute_z_score(green_token_count, num_tokens_scored))) if return_p_value: z_score = score_dict.get('z_score') if (z_score is None): z_score = self._compute_z_score(green_token_count, num_tokens_scored) score_dict.update(dict(p_value=self._compute_p_value(z_score))) if return_green_token_mask: score_dict.update(dict(green_token_mask=green_token_mask)) return score_dict def detect(self, text: str=None, tokenized_text: list[int]=None, return_prediction: bool=True, return_scores: bool=True, z_threshold: float=None, **kwargs) -> dict: assert ((text is not None) ^ (tokenized_text is not None)), 'Must pass either the raw or tokenized string' if return_prediction: kwargs['return_p_value'] = True for normalizer in self.normalizers: text = normalizer(text) if (len(self.normalizers) > 0): print(f'''Text after normalization: {text} ''') if (tokenized_text is None): assert (self.tokenizer is not None), ('Watermark detection on raw string ', 'requires an instance of the tokenizer ', 'that was used at generation time.') tokenized_text = self.tokenizer(text, return_tensors='pt', add_special_tokens=False)['input_ids'][0].to(self.device) if (tokenized_text[0] == self.tokenizer.bos_token_id): tokenized_text = tokenized_text[1:] elif ((self.tokenizer is not None) and (tokenized_text[0] == self.tokenizer.bos_token_id)): tokenized_text = tokenized_text[1:] output_dict = {} score_dict = self._score_sequence(tokenized_text, **kwargs) if return_scores: output_dict.update(score_dict) if return_prediction: z_threshold = (z_threshold if z_threshold else self.z_threshold) assert (z_threshold is not None), 'Need a threshold in order to decide outcome of detection test' output_dict['prediction'] = (score_dict['z_score'] > z_threshold) if output_dict['prediction']: output_dict['confidence'] = (1 - score_dict['p_value']) return output_dict
def transform_key_func(generator, n, vocab_size, eff_vocab_size=None): pi = torch.randperm(vocab_size, generator=generator) xi = torch.rand((n, 1), generator=generator) return (xi, pi)
def transform_sampling(probs, pi, xi): cdf = torch.cumsum(torch.gather(probs, 1, pi), 1) return torch.gather(pi, 1, torch.searchsorted(cdf, xi))
def transform_score(tokens, xi): return torch.pow(torch.linalg.norm((tokens - xi.squeeze()), ord=1), 1)
def transform_edit_score(tokens, xi, gamma=1): return transform_levenshtein(tokens.numpy(), xi.squeeze().numpy(), gamma)
def MinMaxScaler(data): 'Min Max normalizer.\n \n Args:\n - data: original data\n \n Returns:\n - norm_data: normalized data\n ' numerator = (data - np.min(data, 0)) denominator = (np.max(data, 0) - np.min(data, 0)) norm_data = (numerator / (denominator + 1e-07)) return norm_data
def sine_data_generation(no, seq_len, dim): 'Sine data generation.\n \n Args:\n - no: the number of samples\n - seq_len: sequence length of the time-series\n - dim: feature dimensions\n \n Returns:\n - data: generated data\n ' data = list() for i in range(no): temp = list() for k in range(dim): freq = np.random.uniform(0, 0.1) phase = np.random.uniform(0, 0.1) temp_data = [np.sin(((freq * j) + phase)) for j in range(seq_len)] temp.append(temp_data) temp = np.transpose(np.asarray(temp)) temp = ((temp + 1) * 0.5) data.append(temp) return data
def real_data_loading(data_name, seq_len): 'Load and preprocess real-world datasets.\n \n Args:\n - data_name: stock or energy\n - seq_len: sequence length\n \n Returns:\n - data: preprocessed data.\n ' assert (data_name in ['stock', 'energy']) if (data_name == 'stock'): ori_data = np.loadtxt('data/stock_data.csv', delimiter=',', skiprows=1) elif (data_name == 'energy'): ori_data = np.loadtxt('data/energy_data.csv', delimiter=',', skiprows=1) ori_data = ori_data[::(- 1)] ori_data = MinMaxScaler(ori_data) temp_data = [] for i in range(0, (len(ori_data) - seq_len)): _x = ori_data[i:(i + seq_len)] temp_data.append(_x) idx = np.random.permutation(len(temp_data)) data = [] for i in range(len(temp_data)): data.append(temp_data[idx[i]]) return data
def main(args): 'Main function for timeGAN experiments.\n \n Args:\n - data_name: sine, stock, or energy\n - seq_len: sequence length\n - Network parameters (should be optimized for different datasets)\n - module: gru, lstm, or lstmLN\n - hidden_dim: hidden dimensions\n - num_layer: number of layers\n - iteration: number of training iterations\n - batch_size: the number of samples in each batch\n - metric_iteration: number of iterations for metric computation\n \n Returns:\n - ori_data: original data\n - generated_data: generated synthetic data\n - metric_results: discriminative and predictive scores\n ' if (args.data_name in ['stock', 'energy']): ori_data = real_data_loading(args.data_name, args.seq_len) elif (args.data_name == 'sine'): (no, dim) = (10000, 5) ori_data = sine_data_generation(no, args.seq_len, dim) print((args.data_name + ' dataset is ready.')) parameters = dict() parameters['module'] = args.module parameters['hidden_dim'] = args.hidden_dim parameters['num_layer'] = args.num_layer parameters['iterations'] = args.iteration parameters['batch_size'] = args.batch_size generated_data = timegan(ori_data, parameters) print('Finish Synthetic Data Generation') metric_results = dict() discriminative_score = list() for _ in range(args.metric_iteration): temp_disc = discriminative_score_metrics(ori_data, generated_data) discriminative_score.append(temp_disc) metric_results['discriminative'] = np.mean(discriminative_score) predictive_score = list() for tt in range(args.metric_iteration): temp_pred = predictive_score_metrics(ori_data, generated_data) predictive_score.append(temp_pred) metric_results['predictive'] = np.mean(predictive_score) visualization(ori_data, generated_data, 'pca') visualization(ori_data, generated_data, 'tsne') print(metric_results) return (ori_data, generated_data, metric_results)
def discriminative_score_metrics(ori_data, generated_data): 'Use post-hoc RNN to classify original data and synthetic data\n \n Args:\n - ori_data: original data\n - generated_data: generated synthetic data\n \n Returns:\n - discriminative_score: np.abs(classification accuracy - 0.5)\n ' tf.reset_default_graph() (no, seq_len, dim) = np.asarray(ori_data).shape (ori_time, ori_max_seq_len) = extract_time(ori_data) (generated_time, generated_max_seq_len) = extract_time(ori_data) max_seq_len = max([ori_max_seq_len, generated_max_seq_len]) hidden_dim = int((dim / 2)) iterations = 2000 batch_size = 128 X = tf.placeholder(tf.float32, [None, max_seq_len, dim], name='myinput_x') X_hat = tf.placeholder(tf.float32, [None, max_seq_len, dim], name='myinput_x_hat') T = tf.placeholder(tf.int32, [None], name='myinput_t') T_hat = tf.placeholder(tf.int32, [None], name='myinput_t_hat') def discriminator(x, t): 'Simple discriminator function.\n \n Args:\n - x: time-series data\n - t: time information\n \n Returns:\n - y_hat_logit: logits of the discriminator output\n - y_hat: discriminator output\n - d_vars: discriminator variables\n ' with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as vs: d_cell = tf.nn.rnn_cell.GRUCell(num_units=hidden_dim, activation=tf.nn.tanh, name='d_cell') (d_outputs, d_last_states) = tf.nn.dynamic_rnn(d_cell, x, dtype=tf.float32, sequence_length=t) y_hat_logit = tf.contrib.layers.fully_connected(d_last_states, 1, activation_fn=None) y_hat = tf.nn.sigmoid(y_hat_logit) d_vars = [v for v in tf.all_variables() if v.name.startswith(vs.name)] return (y_hat_logit, y_hat, d_vars) (y_logit_real, y_pred_real, d_vars) = discriminator(X, T) (y_logit_fake, y_pred_fake, _) = discriminator(X_hat, T_hat) d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit_real, labels=tf.ones_like(y_logit_real))) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit_fake, labels=tf.zeros_like(y_logit_fake))) d_loss = (d_loss_real + d_loss_fake) d_solver = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars) sess = tf.Session() sess.run(tf.global_variables_initializer()) (train_x, train_x_hat, test_x, test_x_hat, train_t, train_t_hat, test_t, test_t_hat) = train_test_divide(ori_data, generated_data, ori_time, generated_time) for itt in range(iterations): (X_mb, T_mb) = batch_generator(train_x, train_t, batch_size) (X_hat_mb, T_hat_mb) = batch_generator(train_x_hat, train_t_hat, batch_size) (_, step_d_loss) = sess.run([d_solver, d_loss], feed_dict={X: X_mb, T: T_mb, X_hat: X_hat_mb, T_hat: T_hat_mb}) (y_pred_real_curr, y_pred_fake_curr) = sess.run([y_pred_real, y_pred_fake], feed_dict={X: test_x, T: test_t, X_hat: test_x_hat, T_hat: test_t_hat}) y_pred_final = np.squeeze(np.concatenate((y_pred_real_curr, y_pred_fake_curr), axis=0)) y_label_final = np.concatenate((np.ones([len(y_pred_real_curr)]), np.zeros([len(y_pred_fake_curr)])), axis=0) acc = accuracy_score(y_label_final, (y_pred_final > 0.5)) discriminative_score = np.abs((0.5 - acc)) return discriminative_score
def is_image_file(filename): return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
def make_dataset(dir, max_dataset_size=float('inf')): cache = (dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(dir): print('Using image list from older version') image_list = [] for image in images: image_list.append(image) else: print('Adding prefix to saved image list') image_list = [] prefix = os.path.dirname(dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) return image_list print('Walking directory ...') images = [] assert os.path.isdir(dir), ('%s is not a valid directory' % dir) for (root, _, fnames) in sorted(os.walk(dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) image_list = images[:min(max_dataset_size, len(images))] with open(cache, 'w') as f: prefix = (os.path.dirname(dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) return image_list
def make_multiple_dataset(dir, max_dataset_size=float('inf')): subdir = ['Deepfakes', 'Face2Face', 'FaceSwap', 'NeuralTextures'] total_image_list = [] (last_dir, dir) = (((dir.split('/')[(- 2)] + '/') + dir.split('/')[(- 1)]), '/'.join(dir.split('/')[:(- 2)])) print(dir) for sdir in subdir: curr_dir = (((((dir + '/') + sdir) + '/') + last_dir) + '/') print(curr_dir) cache = (curr_dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(curr_dir): print('Using image list from older version') image_list = [] for image in images: image_list.append(image) else: print('Adding prefix to saved image list') image_list = [] prefix = os.path.dirname(curr_dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) image_list = random.sample(image_list, min(max_dataset_size, len(image_list))) total_image_list += image_list else: print('Walking directory ...') images = [] assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir) for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) image_list = random.sample(images, min(max_dataset_size, len(images))) with open(cache, 'w') as f: prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) total_image_list += image_list return total_image_list
def make_multiple_dataset_real(dir, max_dataset_size=float('inf')): subdir = ['faces/celebahq/real-tfr-1024-resized128', 'faces/celebahq/real-tfr-1024-resized128', 'faces/celebahq/real-tfr-1024-resized128', 'faceforensics_aligned/Deepfakes/original', 'faceforensics_aligned/Face2Face/original', 'faceforensics_aligned/FaceSwap/original', 'faceforensics_aligned/NeuralTextures/original'] total_image_list = [] (last_dir, dir) = (dir.split('/')[(- 1)], '/'.join(dir.split('/')[:(- 1)])) print(dir) for sdir in subdir: curr_dir = (((((dir + '/') + sdir) + '/') + last_dir) + '/') print(curr_dir) cache = (curr_dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(curr_dir): print('Using image list from older version') image_list = [] for image in images: image_list.append(image) else: print('Adding prefix to saved image list') image_list = [] prefix = os.path.dirname(curr_dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) image_list = random.sample(image_list, min(max_dataset_size, len(image_list))) total_image_list += image_list else: print('Walking directory ...') images = [] assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir) for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) image_list = random.sample(images, min(max_dataset_size, len(images))) with open(cache, 'w') as f: prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) total_image_list += image_list return total_image_list
def make_multiple_dataset_fake(dir, max_dataset_size=float('inf')): subdir = ['faces/celebahq/pgan-pretrained-128-png', 'faces/celebahq/sgan-pretrained-128-png', 'faces/celebahq/glow-pretrained-128-png', 'faceforensics_aligned/Deepfakes/manipulated', 'faceforensics_aligned/Face2Face/manipulated', 'faceforensics_aligned/FaceSwap/manipulated', 'faceforensics_aligned/NeuralTextures/manipulated'] total_image_list = [] (last_dir, dir) = (dir.split('/')[(- 1)], '/'.join(dir.split('/')[:(- 1)])) print(dir) for sdir in subdir: curr_dir = (((((dir + '/') + sdir) + '/') + last_dir) + '/') print(curr_dir) cache = (curr_dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(curr_dir): print('Using image list from older version') image_list = [] for image in images: image_list.append(image) else: print('Adding prefix to saved image list') image_list = [] prefix = os.path.dirname(curr_dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) image_list = random.sample(image_list, min(max_dataset_size, len(image_list))) total_image_list += image_list else: print('Walking directory ...') images = [] assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir) for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) image_list = random.sample(images, min(max_dataset_size, len(images))) with open(cache, 'w') as f: prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) total_image_list += image_list return total_image_list
def make_CNNDetection_dataset(dir, max_dataset_size=float('inf'), mode='real'): classes = os.listdir(dir) total_image_list = [] total_class_list = [] print(dir) if (mode == 'real'): sdir = '0_real' elif (mode == 'fake'): sdir = '1_fake' for cls in classes: curr_dir = ((((dir + '/') + cls) + '/') + sdir) print(curr_dir) cache = (curr_dir.rstrip('/') + '.txt') if os.path.isfile(cache): print(('Using filelist cached at %s' % cache)) with open(cache) as f: images = [line.strip() for line in f] if images[0].startswith(curr_dir): print('Using image list from older version') image_list = [] class_list = [] for image in images: image_list.append(image) class_list.append(cls) else: print('Adding prefix to saved image list') image_list = [] class_list = [] prefix = os.path.dirname(curr_dir.rstrip('/')) for image in images: image_list.append(os.path.join(prefix, image)) class_list.append(cls) total_image_list += image_list total_class_list += class_list else: print('Walking directory ...') images = [] class_list = [] assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir) for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) class_list.append(cls) image_list = images with open(cache, 'w') as f: prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/') for i in image_list: f.write(('%s\n' % util.remove_prefix(i, prefix))) total_image_list += image_list total_class_list += class_list return (total_image_list, total_class_list)
def default_loader(path): return Image.open(path).convert('RGB')
class PairedDataset(data.Dataset): 'A dataset class for paired images\n e.g. corresponding real and manipulated images\n ' def __init__(self, opt, im_path_real, im_path_fake, is_val=False, with_mask=False): 'Initialize this dataset class.\n\n Parameters:\n opt -- experiment options\n im_path_real -- path to folder of real images\n im_path_fake -- path to folder of fake images\n is_val -- is this training or validation? used to determine\n transform\n ' super().__init__() self.dir_real = im_path_real self.dir_fake = im_path_fake self.with_mask = with_mask if self.with_mask: self.real_paths = sorted([os.path.join(self.dir_real, im) for im in os.listdir(self.dir_real)]) self.fake_paths = sorted([os.path.join(self.dir_fake, im) for im in os.listdir(self.dir_fake)]) else: self.real_paths = sorted(make_dataset(self.dir_real, opt.max_dataset_size)) self.fake_paths = sorted(make_dataset(self.dir_fake, opt.max_dataset_size)) self.real_size = len(self.real_paths) self.fake_size = len(self.fake_paths) self.transform = transforms.get_transform(opt, for_val=is_val) if self.with_mask: self.real_mask_paths = sorted([os.path.join(self.dir_real.replace('face', 'mask'), im) for im in os.listdir(self.dir_real.replace('face', 'mask'))]) self.fake_mask_paths = sorted([os.path.join(self.dir_fake.replace('face', 'mask'), im) for im in os.listdir(self.dir_fake.replace('face', 'mask'))]) self.orig_transform = transforms.get_mask_transform(opt, for_val=is_val) self.real_mask_size = len(self.real_mask_paths) self.fake_mask_size = len(self.fake_mask_paths) assert (self.real_mask_size == self.real_size) assert (self.fake_mask_size == self.fake_size) self.opt = opt def __getitem__(self, index): 'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n ' real_path = self.real_paths[(index % self.real_size)] fake_path = self.fake_paths[(index % self.fake_size)] real_img = Image.open(real_path).convert('RGB') fake_img = Image.open(fake_path).convert('RGB') real = self.transform(real_img) fake = self.transform(fake_img) if self.with_mask: real_mask_path = self.real_mask_paths[(index % self.real_mask_size)] fake_mask_path = self.fake_mask_paths[(index % self.fake_mask_size)] real_mask = Image.open(real_mask_path).convert('L') fake_mask = Image.open(fake_mask_path).convert('L') real_mask = self.orig_transform(real_mask) fake_mask = self.orig_transform(fake_mask) if self.with_mask: return {'manipulated': fake, 'original': real, 'path_manipulated': fake_path, 'path_original': real_path, 'mask_original': real_mask, 'mask_manipulated': fake_mask} else: return {'manipulated': fake, 'original': real, 'path_manipulated': fake_path, 'path_original': real_path} def __len__(self): return max(self.real_size, self.fake_size)
class UnpairedDataset(data.Dataset): 'A dataset class for loading images within a single folder\n ' def __init__(self, opt, im_path, is_val=False): 'Initialize this dataset class.\n\n Parameters:\n opt -- experiment options\n im_path -- path to folder of images\n is_val -- is this training or validation? used to determine\n transform\n ' super().__init__() self.dir = im_path self.paths = sorted(make_dataset(self.dir, opt.max_dataset_size)) self.size = len(self.paths) assert (self.size > 0) self.transform = transforms.get_transform(opt, for_val=is_val) self.opt = opt def __getitem__(self, index): 'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n ' path = self.paths[index] img = Image.open(path).convert('RGB') img = self.transform(img) return {'img': img, 'path': path} def __len__(self): return self.size
def get_available_masks(): ' Return a list of the available masks for cli ' masks = sorted([name for (name, obj) in inspect.getmembers(sys.modules[__name__]) if (inspect.isclass(obj) and (name != 'Mask'))]) masks.append('none') return masks
def get_default_mask(): ' Set the default mask for cli ' masks = get_available_masks() default = 'dfl_full' default = (default if (default in masks) else masks[0]) return default
class Mask(): ' Parent class for masks\n the output mask will be <mask_type>.mask\n channels: 1, 3 or 4:\n 1 - Returns a single channel mask\n 3 - Returns a 3 channel mask\n 4 - Returns the original image with the mask in the alpha channel ' def __init__(self, landmarks, face, channels=4): self.landmarks = landmarks self.face = face self.channels = channels mask = self.build_mask() self.mask = self.merge_mask(mask) def build_mask(self): ' Override to build the mask ' raise NotImplementedError def merge_mask(self, mask): ' Return the mask in requested shape ' assert (self.channels in (1, 3, 4)), 'Channels should be 1, 3 or 4' assert ((mask.shape[2] == 1) and (mask.ndim == 3)), 'Input mask be 3 dimensions with 1 channel' if (self.channels == 3): retval = np.tile(mask, 3) elif (self.channels == 4): retval = np.concatenate((self.face, mask), (- 1)) else: retval = mask return retval
class dfl_full(Mask): ' DFL facial mask ' def build_mask(self): mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32) nose_ridge = (self.landmarks[27:31], self.landmarks[33:34]) jaw = (self.landmarks[0:17], self.landmarks[48:68], self.landmarks[0:1], self.landmarks[8:9], self.landmarks[16:17]) eyes = (self.landmarks[17:27], self.landmarks[0:1], self.landmarks[27:28], self.landmarks[16:17], self.landmarks[33:34]) parts = [jaw, nose_ridge, eyes] for item in parts: merged = np.concatenate(item) cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0) return mask
class components(Mask): ' Component model mask ' def build_mask(self): mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32) r_jaw = (self.landmarks[0:9], self.landmarks[17:18]) l_jaw = (self.landmarks[8:17], self.landmarks[26:27]) r_cheek = (self.landmarks[17:20], self.landmarks[8:9]) l_cheek = (self.landmarks[24:27], self.landmarks[8:9]) nose_ridge = (self.landmarks[19:25], self.landmarks[8:9]) r_eye = (self.landmarks[17:22], self.landmarks[27:28], self.landmarks[31:36], self.landmarks[8:9]) l_eye = (self.landmarks[22:27], self.landmarks[27:28], self.landmarks[31:36], self.landmarks[8:9]) nose = (self.landmarks[27:31], self.landmarks[31:36]) parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose] for item in parts: merged = np.concatenate(item) cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0) return mask
class extended(Mask): ' Extended mask\n Based on components mask. Attempts to extend the eyebrow points up the forehead\n ' def build_mask(self): mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32) landmarks = self.landmarks.copy() ml_pnt = ((landmarks[36] + landmarks[0]) // 2) mr_pnt = ((landmarks[16] + landmarks[45]) // 2) ql_pnt = ((landmarks[36] + ml_pnt) // 2) qr_pnt = ((landmarks[45] + mr_pnt) // 2) bot_l = np.array((ql_pnt, landmarks[36], landmarks[37], landmarks[38], landmarks[39])) bot_r = np.array((landmarks[42], landmarks[43], landmarks[44], landmarks[45], qr_pnt)) top_l = landmarks[17:22] top_r = landmarks[22:27] landmarks[17:22] = (top_l + ((top_l - bot_l) // 2)) landmarks[22:27] = (top_r + ((top_r - bot_r) // 2)) r_jaw = (landmarks[0:9], landmarks[17:18]) l_jaw = (landmarks[8:17], landmarks[26:27]) r_cheek = (landmarks[17:20], landmarks[8:9]) l_cheek = (landmarks[24:27], landmarks[8:9]) nose_ridge = (landmarks[19:25], landmarks[8:9]) r_eye = (landmarks[17:22], landmarks[27:28], landmarks[31:36], landmarks[8:9]) l_eye = (landmarks[22:27], landmarks[27:28], landmarks[31:36], landmarks[8:9]) nose = (landmarks[27:31], landmarks[31:36]) parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose] for item in parts: merged = np.concatenate(item) cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0) return mask
class facehull(Mask): ' Basic face hull mask ' def build_mask(self): mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32) hull = cv2.convexHull(np.array(self.landmarks).reshape(((- 1), 2))) cv2.fillConvexPoly(mask, hull, 255.0, lineType=cv2.LINE_AA) return mask
class random_components(Mask): ' Extended mask\n Based on components mask. Attempts to extend the eyebrow points up the forehead\n ' def build_mask(self): mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32) landmarks = self.landmarks.copy() ml_pnt = ((landmarks[36] + landmarks[0]) // 2) mr_pnt = ((landmarks[16] + landmarks[45]) // 2) ql_pnt = ((landmarks[36] + ml_pnt) // 2) qr_pnt = ((landmarks[45] + mr_pnt) // 2) bot_l = np.array((ql_pnt, landmarks[36], landmarks[37], landmarks[38], landmarks[39])) bot_r = np.array((landmarks[42], landmarks[43], landmarks[44], landmarks[45], qr_pnt)) top_l = landmarks[17:22] top_r = landmarks[22:27] landmarks[17:22] = (top_l + ((top_l - bot_l) // 2)) landmarks[22:27] = (top_r + ((top_r - bot_r) // 2)) r_jaw = (landmarks[0:9], landmarks[17:18]) l_jaw = (landmarks[8:17], landmarks[26:27]) r_cheek = (landmarks[17:20], landmarks[8:9]) l_cheek = (landmarks[24:27], landmarks[8:9]) nose_ridge = (landmarks[19:25], landmarks[8:9]) r_eye = (landmarks[17:22], landmarks[27:28], landmarks[31:36], landmarks[8:9]) l_eye = (landmarks[22:27], landmarks[27:28], landmarks[31:36], landmarks[8:9]) nose = (landmarks[27:31], landmarks[31:36]) parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose] parts = random.sample(parts, random.randint(0, (len(parts) - 1))) for item in parts: merged = np.concatenate(item) cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0) return mask
def simple_transform(): t = Compose([Resize(256, 256)]) return t
def strong_aug_pixel(p=0.5): print('[DATA]: strong aug pixel') from albumentations import Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue, MultiplicativeNoise, IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast, IAAPiecewiseAffine, IAASharpen, IAAEmboss, Flip, OneOf, Compose, JpegCompression, CLAHE return Compose([OneOf([MultiplicativeNoise(multiplier=[0.5, 1.5], per_channel=True), JpegCompression(quality_lower=39, quality_upper=80)], p=0.2), OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2), OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2), OneOf([CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3), HueSaturationValue(p=0.3)], p=p)
def pixel_aug(p=0.5): print('[DATA]: pixel aug') from albumentations import JpegCompression, Blur, Downscale, CLAHE, HueSaturationValue, RandomBrightnessContrast, IAAAdditiveGaussianNoise, GaussNoise, GaussianBlur, MedianBlur, MotionBlur, Compose, OneOf from random import sample, randint, uniform return Compose([OneOf([JpegCompression(quality_lower=20, quality_upper=99, p=1)], p=0.2), OneOf([IAAAdditiveGaussianNoise(loc=randint(1, 9), p=1), GaussNoise(mean=uniform(0, 10.0), p=1)], p=0.3), OneOf([GaussianBlur(blur_limit=15, p=1), MotionBlur(blur_limit=19, p=1), Downscale(scale_min=0.3, scale_max=0.99, p=1), Blur(blur_limit=15, p=1), MedianBlur(blur_limit=9, p=1)], p=0.4), OneOf([CLAHE(clip_limit=4.0, p=1), HueSaturationValue(p=1), RandomBrightnessContrast(p=1)], p=0.1)], p=p)
def spatial_aug(p=0.5): print('[DATA] spatial aug') from albumentations import GridDropout, RandomResizedCrop, Rotate, HorizontalFlip, Compose aug = Compose([GridDropout(holes_number_x=3, holes_number_y=3, random_offset=True, p=0.5), RandomResizedCrop(256, 256, scale=(0.7, 1.0), p=1.0), HorizontalFlip(p=0.5), Rotate(limit=90, p=0.5)], p=p) return aug
def pixel_aug_mild(p=0.5): print('[DATA]: pixel aug mild') from albumentations import JpegCompression, Blur, Downscale, CLAHE, HueSaturationValue, RandomBrightnessContrast, IAAAdditiveGaussianNoise, GaussNoise, GaussianBlur, MedianBlur, MotionBlur, Compose, OneOf from random import sample, randint, uniform return Compose([OneOf([JpegCompression(quality_lower=60, quality_upper=99, p=1)], p=0.2), OneOf([IAAAdditiveGaussianNoise(loc=randint(1, 5), p=1), GaussNoise(mean=uniform(0, 5.0), p=1)], p=0.3), OneOf([GaussianBlur(blur_limit=7, p=1), MotionBlur(blur_limit=9, p=1), Downscale(scale_min=0.6, scale_max=0.99, p=1), Blur(blur_limit=7, p=1), MedianBlur(blur_limit=3, p=1)], p=0.4), OneOf([CLAHE(clip_limit=2.0, p=1), HueSaturationValue(p=1), RandomBrightnessContrast(p=1)], p=0.1)], p=p)
class Augmentator(): def __init__(self, augment_fn=''): if (augment_fn == 'pixel_aug'): self.augment_fn = pixel_aug() elif (augment_fn == 'simple'): self.augment_fn = simple_transform() elif (augment_fn == 'pixel_mild'): self.augment_fn = pixel_aug_mild() elif (augment_fn == 'spatial'): self.augment_fn = spatial_aug() else: raise NotImplementedError(augment_fn) def __call__(self, img, mask=None): if (mask is None): return self.augment_fn(image=img)['image'] else: augmented = self.augment_fn(image=img, mask=mask) return (augmented['image'], augmented['mask'])
def data_transform(size=256, normalize=True): if normalize: t = Compose([Resize(size, size), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ToTensor()]) else: t = Compose([Resize(size, size), ToTensor()]) return t
def color_transfer(source, target, clip=True, preserve_paper=True, mask=None): '\n\tTransfers the color distribution from the source to the target\n\timage using the mean and standard deviations of the L*a*b*\n\tcolor space.\n\tThis implementation is (loosely) based on to the "Color Transfer\n\tbetween Images" paper by Reinhard et al., 2001.\n\tParameters:\n\t-------\n\tsource: NumPy array\n\t\tOpenCV image in BGR color space (the source image)\n\ttarget: NumPy array\n\t\tOpenCV image in BGR color space (the target image)\n\tclip: Should components of L*a*b* image be scaled by np.clip before \n\t\tconverting back to BGR color space?\n\t\tIf False then components will be min-max scaled appropriately.\n\t\tClipping will keep target image brightness truer to the input.\n\t\tScaling will adjust image brightness to avoid washed out portions\n\t\tin the resulting color transfer that can be caused by clipping.\n\tpreserve_paper: Should color transfer strictly follow methodology\n\t\tlayed out in original paper? The method does not always produce\n\t\taesthetically pleasing results.\n\t\tIf False then L*a*b* components will scaled using the reciprocal of\n\t\tthe scaling factor proposed in the paper. This method seems to produce\n\t\tmore consistently aesthetically pleasing results \n\tReturns:\n\t-------\n\ttransfer: NumPy array\n\t\tOpenCV image (w, h, 3) NumPy array (uint8)\n\t' source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype('float32') target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype('float32') (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source, mask) (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target, mask) (l, a, b) = cv2.split(target) l -= lMeanTar a -= aMeanTar b -= bMeanTar if preserve_paper: l = ((lStdTar / lStdSrc) * l) a = ((aStdTar / aStdSrc) * a) b = ((bStdTar / bStdSrc) * b) else: l = ((lStdSrc / lStdTar) * l) a = ((aStdSrc / aStdTar) * a) b = ((bStdSrc / bStdTar) * b) l += lMeanSrc a += aMeanSrc b += bMeanSrc l = _scale_array(l, clip=clip) a = _scale_array(a, clip=clip) b = _scale_array(b, clip=clip) transfer = cv2.merge([l, a, b]) transfer = cv2.cvtColor(transfer.astype('uint8'), cv2.COLOR_LAB2BGR) return transfer
def image_stats(image, mask=None): '\n\tParameters:\n\t-------\n\timage: NumPy array\n\t\tOpenCV image in L*a*b* color space\n\tReturns:\n\t-------\n\tTuple of mean and standard deviations for the L*, a*, and b*\n\tchannels, respectively\n\t' (l, a, b) = cv2.split(image) if (mask is not None): (l, a, b) = (l.reshape((- 1)), a.reshape((- 1)), b.reshape((- 1))) mask = mask.reshape((- 1)) (l, a, b) = (l[mask], a[mask], b[mask]) (lMean, lStd) = (l.mean(), l.std()) (aMean, aStd) = (a.mean(), a.std()) (bMean, bStd) = (b.mean(), b.std()) return (lMean, lStd, aMean, aStd, bMean, bStd)
def _min_max_scale(arr, new_range=(0, 255)): '\n\tPerform min-max scaling to a NumPy array\n\tParameters:\n\t-------\n\tarr: NumPy array to be scaled to [new_min, new_max] range\n\tnew_range: tuple of form (min, max) specifying range of\n\t\ttransformed array\n\tReturns:\n\t-------\n\tNumPy array that has been scaled to be in\n\t[new_range[0], new_range[1]] range\n\t' mn = arr.min() mx = arr.max() if ((mn < new_range[0]) or (mx > new_range[1])): scaled = ((((new_range[1] - new_range[0]) * (arr - mn)) / (mx - mn)) + new_range[0]) else: scaled = arr return scaled
def _scale_array(arr, clip=True): '\n\tTrim NumPy array values to be in [0, 255] range with option of\n\tclipping or scaling.\n\tParameters:\n\t-------\n\tarr: array to be trimmed to [0, 255] range\n\tclip: should array be scaled by np.clip? if False then input\n\t\tarray will be min-max scaled to range\n\t\t[max([arr.min(), 0]), min([arr.max(), 255])]\n\tReturns:\n\t-------\n\tNumPy array that has been scaled to be in [0, 255] range\n\t' if clip: scaled = np.clip(arr, 0, 255) else: scale_range = (max([arr.min(), 0]), min([arr.max(), 255])) scaled = _min_max_scale(arr, new_range=scale_range) return scaled
def colorTransfer(src, dst, mask): transferredDst = np.copy(dst) maskIndices = np.where((mask != 0)) maskedSrc = src[(maskIndices[0], maskIndices[1])].astype(np.int32) maskedDst = dst[(maskIndices[0], maskIndices[1])].astype(np.int32) meanSrc = np.mean(maskedSrc, axis=0) meanDst = np.mean(maskedDst, axis=0) maskedDst = (maskedDst - meanDst) maskedDst = (maskedDst + meanSrc) maskedDst = np.clip(maskedDst, 0, 255) transferredDst[(maskIndices[0], maskIndices[1])] = maskedDst return transferredDst
def color_transfer(source, target, clip=None, preserve_paper=None, mask=None): return colorTransfer(src=source, dst=target, mask=mask)
def mkdir_p(path): try: os.makedirs(os.path.abspath(path)) except OSError as exc: if ((exc.errno == errno.EEXIST) and os.path.isdir(path)): pass else: raise
def files(path, exts=None, r=False): if os.path.isfile(path): if ((exts is None) or ((exts is not None) and (splitext(path)[(- 1)] in exts))): (yield path) elif os.path.isdir(path): for (p, _, fs) in os.walk(path): for f in sorted(fs): if (exts is not None): if (splitext(f)[1] in exts): (yield join(p, f)) else: (yield join(p, f)) if (not r): break
def rect_to_bb(rect): x = rect.left() y = rect.top() w = (rect.right() - x) h = (rect.bottom() - y) return (x, y, w, h)
def shape_to_np(shape, dtype='int'): if isinstance(shape, np.ndarray): return shape.astype(dtype) coords = np.zeros((68, 2), dtype=dtype) for i in range(0, 68): coords[i] = (shape.part(i).x, shape.part(i).y) return coords
def shape_to_np(shape, dtype='int'): coords = np.zeros((68, 2), dtype=dtype) for i in range(0, 68): coords[i] = (shape.part(i).x, shape.part(i).y) return coords
def rot90(v): return np.array([(- v[1]), v[0]])
def find_face_cvhull(im): gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) rects = detector(gray, 1) if (not rects): return None shape = predictor(gray, rects[0]) shape = shape_to_np(shape) hull = cv2.convexHull(shape) return hull
def find_face_landmark(im): gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) rects = detector(gray, 1) if (not rects): return None shape = predictor(gray, rects[0]) shape = shape_to_np(shape) return shape
class Masks4D(object): def __call__(self, masks): first_w = True first_h = True first_c = True for (k, mask) in enumerate(masks): (h, w) = mask.shape real_mask = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(mask, 0), 0), 0) for (i, mask_h) in enumerate(mask): for (j, mask_w) in enumerate(mask_h): curr_mask = (1 - torch.abs((mask_w - real_mask))) if first_w: total_mask_w = real_mask first_w = False else: total_mask_w = torch.cat((total_mask_w, curr_mask), dim=2) if first_h: total_mask_h = total_mask_w first_h = False else: total_mask_h = torch.cat((total_mask_h, total_mask_w), dim=1) first_w = True if first_c: total_mask_c = total_mask_h first_c = False else: total_mask_c = torch.cat((total_mask_c, total_mask_h), dim=0) first_h = True return total_mask_c
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--model_path', help='path to pretrained model') parser.add_argument('--pretrained', help='downloads pretrained model [celebahq]') parser.add_argument('--output_path', required=True, help='path to save generated samples') parser.add_argument('--num_samples', type=int, default=100, help='number of samples') parser.add_argument('--seed', type=int, default=0, help='random seed for sampling') parser.add_argument('--batch_size', type=int, default=64, help='batch size for generating samples') parser.add_argument('--gpu', default='', type=str, help='GPUs to use (leave blank for CPU only)') parser.add_argument('--manipulate', action='store_true', help='add random manipulations to face') parser.add_argument('--format', default='jpg', type=str, help='file format to save generated images') parser.add_argument('--resize', type=int, help='resizes images to this size before saving') opt = parser.parse_args() print(opt) return opt
def sample(opt): tf.InteractiveSession() assert (opt.model_path or opt.pretrained), 'specify weights path or pretrained model' if opt.model_path: raise NotImplementedError elif opt.pretrained: assert (opt.pretrained == 'celebahq') sys.path.append('resources/glow/demo') import model eps_std = 0.7 eps_size = model.eps_size rng = np.random.RandomState(opt.seed) attr = np.random.RandomState((opt.seed + 1)) tags = [] amts = [] for batch_start in tqdm(range(0, opt.num_samples, opt.batch_size)): bs = (min(opt.num_samples, (batch_start + opt.batch_size)) - batch_start) feps = rng.normal(scale=eps_std, size=[bs, eps_size]) if opt.manipulate: tag = attr.randint(len(model._TAGS), size=bs) amt = attr.uniform((- 1), 1, size=(bs, 1)) dzs = model.z_manipulate[tag] feps = (feps + (amt * dzs)) tags.append(tag) amts.append(amt) images = model.decode(feps) for idx in range(images.shape[0]): filename = os.path.join(opt.output_path, ('seed%03d_sample%06d.%s' % (opt.seed, (batch_start + idx), opt.format))) im = PIL.Image.fromarray(images[idx], 'RGB') if opt.resize: im = im.resize((opt.resize, opt.resize), PIL.Image.LANCZOS) im.save(filename) if opt.manipulate: outfile = os.path.join(opt.output_path, 'manipulations.npz') np.savez(outfile, tags=np.concatenate(tags), amts=np.concatenate(amts))
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--model_path', required=True, help='path to pretrained model') parser.add_argument('--output_path', required=True, help='path to save generated samples') parser.add_argument('--num_samples', type=int, default=100, help='number of samples') parser.add_argument('--seed', type=int, default=0, help='random seed for sampling') parser.add_argument('--batch_size', type=int, default=64, help='batch size for generating samples') parser.add_argument('--gpu', default='', type=str, help='GPUs to use (leave blank for CPU only)') parser.add_argument('--format', default='jpg', type=str, help='file format to save generated images') parser.add_argument('--resize', type=int, help='resizes images to this size before saving') parser.add_argument('--quality', type=int, help='compression quality') opt = parser.parse_args() print(opt) return opt
def sample(opt): tf.InteractiveSession() with open(opt.model_path, 'rb') as file: (G, D, Gs) = pickle.load(file) rng = np.random.RandomState(opt.seed) for batch_start in tqdm(range(0, opt.num_samples, opt.batch_size)): bs = (min(opt.num_samples, (batch_start + opt.batch_size)) - batch_start) latents = rng.randn(bs, *Gs.input_shapes[0][1:]) labels = np.zeros(([latents.shape[0]] + Gs.input_shapes[1][1:])) images = Gs.run(latents, labels) images = np.clip(np.rint((((images + 1.0) / 2.0) * 255.0)), 0.0, 255.0).astype(np.uint8) images = images.transpose(0, 2, 3, 1) for idx in range(images.shape[0]): filename = os.path.join(opt.output_path, ('seed%03d_sample%06d.%s' % (opt.seed, (batch_start + idx), opt.format))) im = PIL.Image.fromarray(images[idx], 'RGB') if opt.resize: im = im.resize((opt.resize, opt.resize), PIL.Image.LANCZOS) if opt.quality: aug = A.augmentations.transforms.JpegCompression(p=1) (w, h) = im.size im_np = np.asarray(im.resize((1024, 1024), PIL.Image.LANCZOS)) im = PIL.Image.fromarray(aug.apply(im_np, quality=opt.quality)) im = im.resize((w, h), PIL.Image.LANCZOS) im.save(filename)
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--model_path', help='path to pretrained model') parser.add_argument('--pretrained', help='downloads pretrained model [ffhq, celebahq]') parser.add_argument('--output_path', required=True, help='path to save generated samples') parser.add_argument('--num_samples', type=int, default=100, help='number of samples') parser.add_argument('--seed', type=int, default=0, help='random seed for sampling') parser.add_argument('--batch_size', type=int, default=64, help='batch size for generating samples') parser.add_argument('--gpu', default='', type=str, help='GPUs to use (leave blank for CPU only)') parser.add_argument('--format', default='jpg', type=str, help='file format to save generated images') parser.add_argument('--resize', type=int, help='resizes images to this size before saving') opt = parser.parse_args() print(opt) return opt
def sample(opt): tf.InteractiveSession() assert (opt.model_path or opt.pretrained), 'specify weights path or pretrained model' if opt.model_path: with open(opt.model_path, 'rb') as file: (G, D, Gs) = pickle.load(file) elif opt.pretrained: urls = dict(ffhq='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', celebahq='https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf') url = urls[opt.pretrained] with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: (_G, _D, Gs) = pickle.load(f) rng = np.random.RandomState(opt.seed) for batch_start in tqdm(range(0, opt.num_samples, opt.batch_size)): bs = (min(opt.num_samples, (batch_start + opt.batch_size)) - batch_start) latents = rng.randn(bs, *Gs.input_shapes[0][1:]) labels = np.zeros(([latents.shape[0]] + Gs.input_shapes[1][1:])) images = Gs.run(latents, labels) images = np.clip(np.rint((((images + 1.0) / 2.0) * 255.0)), 0.0, 255.0).astype(np.uint8) images = images.transpose(0, 2, 3, 1) for idx in range(images.shape[0]): filename = os.path.join(opt.output_path, ('seed%03d_sample%06d.%s' % (opt.seed, (batch_start + idx), opt.format))) im = PIL.Image.fromarray(images[idx], 'RGB') if opt.resize: im = im.resize((opt.resize, opt.resize), PIL.Image.LANCZOS) im.save(filename)
def get_transform(opt, for_val=False): transform_list = [] if for_val: transform_list.append(transforms.Resize(opt.loadSize, interpolation=PIL.Image.LANCZOS)) transform_list.append(transforms.CenterCrop(opt.loadSize)) transform_list.append(transforms.ToTensor()) else: transform_list.append(transforms.Resize(opt.loadSize, interpolation=PIL.Image.LANCZOS)) transform_list.append(transforms.CenterCrop(opt.fineSize)) transform_list.append(AllAugmentations()) transform_list.append(transforms.ToTensor()) transform_list.append(transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))) if (not for_val): transform_list.append(transforms.RandomErasing()) transform = transforms.Compose(transform_list) print(transform) logging.info(transform) return transform
def get_mask_transform(opt, for_val=False): transform_list = [] transform_list.append(transforms.ToTensor()) transform = transforms.Compose(transform_list) return transform
class AllAugmentations(object): def __init__(self): import albumentations self.transform = albumentations.Compose([albumentations.Blur(blur_limit=3), albumentations.JpegCompression(quality_lower=30, quality_upper=100, p=0.5), albumentations.RandomBrightnessContrast(), albumentations.augmentations.transforms.ColorJitter()]) def __call__(self, image): image_np = np.array(image) augmented = self.transform(image=image_np) image_pil = PIL.Image.fromarray(augmented['image']) return image_pil
class JPEGCompression(object): def __init__(self, level): import albumentations as A self.level = level self.transform = A.augmentations.transforms.JpegCompression(p=1) def __call__(self, image): image_np = np.array(image) image_out = self.transform.apply(image_np, quality=self.level) image_pil = PIL.Image.fromarray(image_out) return image_pil
class Blur(object): def __init__(self, level): import albumentations as A self.level = level self.transform = A.Blur(blur_limit=(self.level, self.level), always_apply=True) def __call__(self, image): image_np = np.array(image) augmented = self.transform(image=image_np) image_pil = PIL.Image.fromarray(augmented['image']) return image_pil
class Gamma(object): def __init__(self, level): import albumentations as A self.level = level self.transform = A.augmentations.transforms.RandomGamma(p=1) def __call__(self, image): image_np = np.array(image) image_out = self.transform.apply(image_np, gamma=(self.level / 100)) image_pil = PIL.Image.fromarray(image_out) return image_pil