Spaces:
Sleeping
Sleeping
Delete pretrain_text_dataloader.py
Browse files- pretrain_text_dataloader.py +0 -226
pretrain_text_dataloader.py
DELETED
|
@@ -1,226 +0,0 @@
|
|
| 1 |
-
# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
-
#
|
| 3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
-
# you may not use this file except in compliance with the License.
|
| 5 |
-
# You may obtain a copy of the License at
|
| 6 |
-
#
|
| 7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
-
#
|
| 9 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
-
# See the License for the specific language governing permissions and
|
| 13 |
-
# limitations under the License.
|
| 14 |
-
|
| 15 |
-
"""Loads text dataset for the BERT pretraining task."""
|
| 16 |
-
import dataclasses
|
| 17 |
-
from typing import List, Mapping, Optional, Text
|
| 18 |
-
|
| 19 |
-
import tensorflow as tf, tf_keras
|
| 20 |
-
import tensorflow_text as tf_text
|
| 21 |
-
|
| 22 |
-
from official.common import dataset_fn
|
| 23 |
-
from official.core import config_definitions as cfg
|
| 24 |
-
from official.core import input_reader
|
| 25 |
-
from official.nlp.data import data_loader
|
| 26 |
-
from official.nlp.data import data_loader_factory
|
| 27 |
-
from official.nlp.modeling.ops import segment_extractor
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
@dataclasses.dataclass
|
| 31 |
-
class BertPretrainTextDataConfig(cfg.DataConfig):
|
| 32 |
-
"""Data config for BERT pretraining task (tasks/masked_lm) from text."""
|
| 33 |
-
input_path: str = ""
|
| 34 |
-
doc_batch_size: int = 8
|
| 35 |
-
global_batch_size: int = 512
|
| 36 |
-
is_training: bool = True
|
| 37 |
-
seq_length: int = 512
|
| 38 |
-
max_predictions_per_seq: int = 76
|
| 39 |
-
use_next_sentence_label: bool = True
|
| 40 |
-
# The name of the text feature fields. The text features will be
|
| 41 |
-
# concatenated in order.
|
| 42 |
-
# Note: More than 1 field name is not compatible with NSP.
|
| 43 |
-
text_field_names: Optional[List[str]] = dataclasses.field(
|
| 44 |
-
default_factory=lambda: ["text"])
|
| 45 |
-
vocab_file_path: str = ""
|
| 46 |
-
masking_rate: float = 0.15
|
| 47 |
-
use_whole_word_masking: bool = False
|
| 48 |
-
file_type: str = "tfrecord"
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
_CLS_TOKEN = b"[CLS]"
|
| 52 |
-
_SEP_TOKEN = b"[SEP]"
|
| 53 |
-
_MASK_TOKEN = b"[MASK]"
|
| 54 |
-
_NUM_OOV_BUCKETS = 1
|
| 55 |
-
# Accounts for [CLS] and 2 x [SEP] tokens
|
| 56 |
-
_NUM_SPECIAL_TOKENS = 3
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
@data_loader_factory.register_data_loader_cls(BertPretrainTextDataConfig)
|
| 60 |
-
class BertPretrainTextDataLoader(data_loader.DataLoader):
|
| 61 |
-
"""A class to load text dataset for BERT pretraining task."""
|
| 62 |
-
|
| 63 |
-
def __init__(self, params):
|
| 64 |
-
"""Inits `BertPretrainTextDataLoader` class.
|
| 65 |
-
|
| 66 |
-
Args:
|
| 67 |
-
params: A `BertPretrainTextDataConfig` object.
|
| 68 |
-
"""
|
| 69 |
-
if len(params.text_field_names) > 1 and params.use_next_sentence_label:
|
| 70 |
-
raise ValueError("Currently there is no support for more than text field "
|
| 71 |
-
"while generating next sentence labels.")
|
| 72 |
-
|
| 73 |
-
self._params = params
|
| 74 |
-
self._seq_length = params.seq_length
|
| 75 |
-
self._max_predictions_per_seq = params.max_predictions_per_seq
|
| 76 |
-
self._use_next_sentence_label = params.use_next_sentence_label
|
| 77 |
-
self._masking_rate = params.masking_rate
|
| 78 |
-
self._use_whole_word_masking = params.use_whole_word_masking
|
| 79 |
-
|
| 80 |
-
lookup_table_init = tf.lookup.TextFileInitializer(
|
| 81 |
-
params.vocab_file_path,
|
| 82 |
-
key_dtype=tf.string,
|
| 83 |
-
key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
|
| 84 |
-
value_dtype=tf.int64,
|
| 85 |
-
value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
|
| 86 |
-
self._vocab_lookup_table = tf.lookup.StaticVocabularyTable(
|
| 87 |
-
lookup_table_init,
|
| 88 |
-
num_oov_buckets=_NUM_OOV_BUCKETS,
|
| 89 |
-
lookup_key_dtype=tf.string)
|
| 90 |
-
|
| 91 |
-
self._cls_token = self._vocab_lookup_table.lookup(tf.constant(_CLS_TOKEN))
|
| 92 |
-
self._sep_token = self._vocab_lookup_table.lookup(tf.constant(_SEP_TOKEN))
|
| 93 |
-
self._mask_token = self._vocab_lookup_table.lookup(tf.constant(_MASK_TOKEN))
|
| 94 |
-
|
| 95 |
-
# -_NUM_OOV_BUCKETS to offset unused OOV bucket.
|
| 96 |
-
self._vocab_size = self._vocab_lookup_table.size() - _NUM_OOV_BUCKETS
|
| 97 |
-
|
| 98 |
-
def _decode(self, record: tf.Tensor) -> Mapping[Text, tf.Tensor]:
|
| 99 |
-
"""Decodes a serialized tf.Example."""
|
| 100 |
-
name_to_features = {}
|
| 101 |
-
for text_field_name in self._params.text_field_names:
|
| 102 |
-
name_to_features[text_field_name] = tf.io.FixedLenFeature([], tf.string)
|
| 103 |
-
return tf.io.parse_single_example(record, name_to_features)
|
| 104 |
-
|
| 105 |
-
def _tokenize(self, segments):
|
| 106 |
-
"""Tokenize the input segments."""
|
| 107 |
-
# Tokenize segments
|
| 108 |
-
tokenizer = tf_text.BertTokenizer(
|
| 109 |
-
self._vocab_lookup_table, token_out_type=tf.int64)
|
| 110 |
-
|
| 111 |
-
if self._use_whole_word_masking:
|
| 112 |
-
# tokenize the segments which should have the shape:
|
| 113 |
-
# [num_sentence, (num_words), (num_wordpieces)]
|
| 114 |
-
segments = [tokenizer.tokenize(s) for s in segments]
|
| 115 |
-
else:
|
| 116 |
-
# tokenize the segments and merge out the token dimension so that each
|
| 117 |
-
# segment has the shape: [num_sentence, (num_wordpieces)]
|
| 118 |
-
segments = [tokenizer.tokenize(s).merge_dims(-2, -1) for s in segments]
|
| 119 |
-
|
| 120 |
-
# Truncate inputs
|
| 121 |
-
trimmer = tf_text.WaterfallTrimmer(
|
| 122 |
-
self._seq_length - _NUM_SPECIAL_TOKENS, axis=-1)
|
| 123 |
-
truncated_segments = trimmer.trim(segments)
|
| 124 |
-
|
| 125 |
-
# Combine segments, get segment ids and add special tokens
|
| 126 |
-
return tf_text.combine_segments(
|
| 127 |
-
truncated_segments,
|
| 128 |
-
start_of_sequence_id=self._cls_token,
|
| 129 |
-
end_of_segment_id=self._sep_token)
|
| 130 |
-
|
| 131 |
-
def _bert_preprocess(self, record: Mapping[str, tf.Tensor]):
|
| 132 |
-
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
|
| 133 |
-
if self._use_next_sentence_label:
|
| 134 |
-
input_text = record[self._params.text_field_names[0]]
|
| 135 |
-
# Split sentences
|
| 136 |
-
sentence_breaker = tf_text.RegexSplitter()
|
| 137 |
-
sentences = sentence_breaker.split(input_text)
|
| 138 |
-
|
| 139 |
-
# Extract next-sentence-prediction labels and segments
|
| 140 |
-
next_or_random_segment, is_next = (
|
| 141 |
-
segment_extractor.get_next_sentence_labels(sentences))
|
| 142 |
-
# merge dims to change shape from [num_docs, (num_segments)] to
|
| 143 |
-
# [total_num_segments]
|
| 144 |
-
is_next = is_next.merge_dims(-2, -1)
|
| 145 |
-
|
| 146 |
-
# construct segments with shape [(num_sentence)]
|
| 147 |
-
segments = [
|
| 148 |
-
sentences.merge_dims(-2, -1),
|
| 149 |
-
next_or_random_segment.merge_dims(-2, -1)
|
| 150 |
-
]
|
| 151 |
-
else:
|
| 152 |
-
segments = [record[name] for name in self._params.text_field_names]
|
| 153 |
-
|
| 154 |
-
segments_combined, segment_ids = self._tokenize(segments)
|
| 155 |
-
|
| 156 |
-
# Dynamic masking
|
| 157 |
-
item_selector = tf_text.RandomItemSelector(
|
| 158 |
-
self._max_predictions_per_seq,
|
| 159 |
-
selection_rate=self._masking_rate,
|
| 160 |
-
unselectable_ids=[self._cls_token, self._sep_token],
|
| 161 |
-
shuffle_fn=(tf.identity if self._params.deterministic else None))
|
| 162 |
-
values_chooser = tf_text.MaskValuesChooser(
|
| 163 |
-
vocab_size=self._vocab_size, mask_token=self._mask_token)
|
| 164 |
-
masked_input_ids, masked_lm_positions, masked_lm_ids = (
|
| 165 |
-
tf_text.mask_language_model(
|
| 166 |
-
segments_combined,
|
| 167 |
-
item_selector=item_selector,
|
| 168 |
-
mask_values_chooser=values_chooser,
|
| 169 |
-
))
|
| 170 |
-
|
| 171 |
-
# Pad out to fixed shape and get input mask.
|
| 172 |
-
seq_lengths = {
|
| 173 |
-
"input_word_ids": self._seq_length,
|
| 174 |
-
"input_type_ids": self._seq_length,
|
| 175 |
-
"masked_lm_positions": self._max_predictions_per_seq,
|
| 176 |
-
"masked_lm_ids": self._max_predictions_per_seq,
|
| 177 |
-
}
|
| 178 |
-
model_inputs = {
|
| 179 |
-
"input_word_ids": masked_input_ids,
|
| 180 |
-
"input_type_ids": segment_ids,
|
| 181 |
-
"masked_lm_positions": masked_lm_positions,
|
| 182 |
-
"masked_lm_ids": masked_lm_ids,
|
| 183 |
-
}
|
| 184 |
-
padded_inputs_and_mask = tf.nest.map_structure(tf_text.pad_model_inputs,
|
| 185 |
-
model_inputs, seq_lengths)
|
| 186 |
-
model_inputs = {
|
| 187 |
-
k: padded_inputs_and_mask[k][0] for k in padded_inputs_and_mask
|
| 188 |
-
}
|
| 189 |
-
model_inputs["masked_lm_weights"] = tf.cast(
|
| 190 |
-
padded_inputs_and_mask["masked_lm_ids"][1], tf.float32)
|
| 191 |
-
model_inputs["input_mask"] = padded_inputs_and_mask["input_word_ids"][1]
|
| 192 |
-
|
| 193 |
-
if self._use_next_sentence_label:
|
| 194 |
-
model_inputs["next_sentence_labels"] = is_next
|
| 195 |
-
|
| 196 |
-
for name in model_inputs:
|
| 197 |
-
t = model_inputs[name]
|
| 198 |
-
if t.dtype == tf.int64:
|
| 199 |
-
t = tf.cast(t, tf.int32)
|
| 200 |
-
model_inputs[name] = t
|
| 201 |
-
|
| 202 |
-
return model_inputs
|
| 203 |
-
|
| 204 |
-
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
|
| 205 |
-
"""Returns a tf.dataset.Dataset."""
|
| 206 |
-
|
| 207 |
-
def _batch_docs(dataset, input_context):
|
| 208 |
-
per_core_doc_batch_size = (
|
| 209 |
-
input_context.get_per_replica_batch_size(self._params.doc_batch_size)
|
| 210 |
-
if input_context else self._params.doc_batch_size)
|
| 211 |
-
return dataset.batch(per_core_doc_batch_size)
|
| 212 |
-
|
| 213 |
-
reader = input_reader.InputReader(
|
| 214 |
-
params=self._params,
|
| 215 |
-
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
|
| 216 |
-
decoder_fn=self._decode if self._params.input_path else None,
|
| 217 |
-
transform_and_batch_fn=_batch_docs
|
| 218 |
-
if self._use_next_sentence_label else None,
|
| 219 |
-
postprocess_fn=self._bert_preprocess)
|
| 220 |
-
transformed_inputs = reader.read(input_context)
|
| 221 |
-
per_core_example_batch_size = (
|
| 222 |
-
input_context.get_per_replica_batch_size(self._params.global_batch_size)
|
| 223 |
-
if input_context else self._params.global_batch_size)
|
| 224 |
-
batched_inputs = transformed_inputs.unbatch().batch(
|
| 225 |
-
per_core_example_batch_size, self._params.drop_remainder)
|
| 226 |
-
return batched_inputs.prefetch(tf.data.experimental.AUTOTUNE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|