code stringlengths 17 6.64M |
|---|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens]
|
def get_world_size():
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def synchronize():
'\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n '
if (not dist.is_available()):
return
if (not dist.is_initialized()):
return
world_size = dist.get_world_size()
if (world_size == 1):
return
dist.barrier()
|
def all_gather(data):
'\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n '
world_size = get_world_size()
if (world_size == 1):
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.LongTensor([tensor.numel()]).to('cuda')
size_list = [torch.LongTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if (local_size != max_size):
padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda')
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
|
def reduce_dict(input_dict, average=True):
'\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n '
world_size = get_world_size()
if (world_size < 2):
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if ((dist.get_rank() == 0) and average):
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def setup_logger(name, save_dir, dist_rank, filename='log.txt'):
logger = logging.getLogger(name)
logger.setLevel(logging.ERROR)
if (dist_rank > 0):
return logger
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s %(name)s %(lineno)s %(levelname)s]: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg))
return self.delimiter.join(loss_str)
|
def input_fn(features, labels, shuffle=True, batch_size=64, repeat=False, seed=None):
'\n A function for converting data into training/evaluation tf.Dataset\n\n inputs:\n features: np.ndarray containing features.\n labels : np.ndarray containing labels for all examples.\n shuffle : bool indicates whether to shuffle the dataset.\n batch_size : int indicating the desired batch size.\n repeat: bool specifying whether to repeat dataset.\n seed: int seed used for shuffling dataset.\n outputs:\n ds : dataset ready for training/evaluation\n '
shuffle_buffer_size = 100
ds = tf_data.Dataset.from_tensor_slices((features, labels))
if shuffle:
ds = ds.shuffle(shuffle_buffer_size, seed=seed).batch(batch_size)
else:
ds = ds.batch(batch_size)
if repeat:
ds = ds.repeat()
return ds
|
def create_model(model_type='state_estimator', model_opt='best_noise_opt'):
"\n inputs:\n model_type: str specifying either 'state_estimator' or \n 'quality_control' type machine learning model.\n model_opt: str specifying dataset the model parameters were optimized \n on. Valid options for 'state_estimator' model_type: \n 'noiseless_opt' or 'best_noise_opt'. Valid options for \n 'quality_control' type: 'uniform_noise_dist_opt'.\n "
valid_model_types = ['state_estimator', 'quality_control']
if (model_type not in valid_model_types):
raise ValueError('model_type not recognized: ', model_type, ' Valid values: ', valid_model_types)
valid_model_opts = {'state_estimator': ['noiseless_opt', 'best_noise_opt'], 'quality_control': ['uniform_noise_dist_opt']}
if (model_opt not in valid_model_opts[model_type]):
raise ValueError('model_opt not recognized: ', model_opt, ' Valid values: ', valid_model_opts[model_type])
if ((model_type == 'state_estimator') and (model_opt == 'best_noise_opt')):
lr = 0.00121
k_size = [[7, 7], [7, 7]]
cnn_maxpool = False
cnn_stack = 2
n_cnn = 2
n_filters = [[22, 22], [35, 35]]
drop_rates = [[0.655, 0.655], [0.194, 0.194]]
layer_norm = False
ave_pool = True
activation = 'relu'
dense_n = 0
elif ((model_type == 'state_estimator') and (model_opt == 'noiseless_opt')):
lr = 0.00345
k_size = [[5], [5], [5]]
cnn_maxpool = False
cnn_stack = 1
n_cnn = 3
n_filters = [[23], [7], [18]]
drop_rates = [[0.12], [0.28], [0.3]]
layer_norm = True
ave_pool = True
activation = 'relu'
dense_n = 0
elif ((model_type == 'quality_control') and (model_opt == 'uniform_noise_dist_opt')):
lr = 0.000265
k_size = [[7, 3]]
cnn_maxpool = True
cnn_stack = 2
n_cnn = 1
n_filters = [[184, 249]]
drop_rates = [[0.05, 0.0]]
layer_norm = True
ave_pool = True
activation = 'swish'
dense_n = 1
dense_dropout = [0.6]
dense_units = [161]
if cnn_maxpool:
cnn_stride = 1
else:
cnn_stride = 2
inputs = tf_layers.Input(shape=(config.SUB_SIZE, config.SUB_SIZE, 1))
x = inputs
for i in range(n_cnn):
for j in range(cnn_stack):
if (j == (cnn_stack - 1)):
stride = cnn_stride
else:
stride = 1
x = tf_layers.Conv2D(filters=n_filters[i][j], kernel_size=k_size[i][j], padding='same', strides=stride)(x)
x = tf_layers.Dropout(rate=drop_rates[i][j])(x)
if layer_norm:
x = tf_layers.LayerNormalization()(x)
x = tf_layers.Activation(activation)(x)
if cnn_maxpool:
x = tf_layers.MaxPooling2D(pool_size=(2, 2), strides=2)(x)
if ave_pool:
x = tf_layers.GlobalAvgPool2D()(x)
x = tf_layers.Flatten()(x)
for i in range(dense_n):
x = tf_layers.Dense(units=dense_units[i], activation=activation)(x)
x = tf_layers.Dropout(rate=dense_dropout[i])(x)
if (model_type == 'state_estimator'):
outputs = tf_layers.Dense(units=config.NUM_STATES, activation='softmax')(x)
model = tf_Model(inputs, outputs, name=('device_state_estimator_' + model_opt))
model.compile(optimizer=tf_Adam(learning_rate=lr), loss='categorical_crossentropy', metrics=['accuracy'])
elif (model_type == 'quality_control'):
outputs = tf_layers.Dense(units=config.NUM_QUALITY_CLASSES, activation='softmax')(x)
model = tf_Model(inputs=inputs, outputs=outputs, name=('data_quality_control_' + model_opt))
model.compile(optimizer=tf_Adam(learning_rate=lr), loss='categorical_crossentropy', metrics=['accuracy'])
return model
|
def get_num_min_class(labels):
'\n Get the number of the minimum represented class in label vector.\n Used for resampling data.\n\n input:\n labels: np.ndarray of labels\n \n outputs:\n num_samples: int number of samples for minimum class\n '
argmax_labels = np.argmax(labels, axis=(- 1))
num_samples = labels.shape[0]
for i in range(labels.shape[(- 1)]):
lab_elems = np.sum((argmax_labels == i))
if (lab_elems < num_samples):
num_samples = lab_elems
return num_samples
|
def resample_data(features, state_labels, labels=None, seed=None):
'\n Resample data to be evenly distributed across classes in labels by cutting\n number of examples for each class to be equal to the number of examples\n in the least represented class. (classes assumed to be last axis of\n labels). Shuffles after resampling.\n\n inputs:\n features: ndarray of features to be resampled. Resample along first axis.\n state_labels: ndarray of labels to be used for resampling\n labels: ndarray of labels to be resampled.\n return_state: bool specifying whether to return state labels\n seed: Seed of random number generator for shuffling idxs during resample\n and for shuffling resampled features and labels.\n \n outputs:\n features: list of resampled features\n labels: list of resampled labels\n '
rng = np.random.default_rng(seed)
num_samples = get_num_min_class(state_labels)
features_resamp = []
state_labels_resamp = []
labels_resamp = []
for i in range(state_labels.shape[(- 1)]):
s_idxs = (state_labels.argmax(axis=(- 1)) == i)
features_s_full = features[s_idxs]
state_labels_s_full = state_labels[s_idxs]
if (labels is not None):
labels_s_full = labels[s_idxs]
idxs = list(range(features_s_full.shape[0]))
rng.shuffle(idxs)
features_resamp.append(features_s_full[idxs[:num_samples]])
state_labels_resamp.append(state_labels_s_full[idxs[:num_samples]])
if (labels is not None):
labels_resamp.append(labels_s_full[idxs[:num_samples]])
features_resamp_arr = np.concatenate(features_resamp, axis=0)
state_labels_resamp_arr = np.concatenate(state_labels_resamp, axis=0)
if (labels is not None):
labels_resamp_arr = np.concatenate(labels_resamp, axis=0)
idxs = list(range(features_resamp_arr.shape[0]))
rng.shuffle(idxs)
if (labels is not None):
return (features_resamp_arr[idxs], labels_resamp_arr[idxs])
elif (labels is None):
return (features_resamp_arr[idxs], state_labels_resamp_arr[idxs])
|
def noise_mag_to_class(state_labels, noise_mags, low_thresholds=None, high_thresholds=None):
'\n Function to convert noise magnitudes to noise classes.\n Noise class thresholds are defined here. Thresholds for states\n order is: no dot, left dot, central dot, right dot, double dot\n Default low thresholds is the linear extrapolation to 100 % accuracy\n of an average noisy-trained model vs. noise_mag. Default high\n thresholds are from linear extrapolation to 0 % accuracy of an\n average noisy trained model vs. noise_mag.\n \n inputs:\n state_labels: list of state labels. shape assumed to be\n (num_examples, num_states).\n noise_mags: list of float noise_mags for state_labels. shape assumed\n to be (num_examples, ).\n low_thresholds: list of floats of shape (num_state, ) specifying\n high signal to noise class thresholds. \n high_thresholds: list of floats of shape (num_state, ) specifying\n high signal to noise class thresholds. \n '
num_quality_classes = config.NUM_QUALITY_CLASSES
num_states = config.NUM_STATES
if (high_thresholds is None):
high_thresholds = [1.22, 1.0, 1.21, 0.68, 2.0]
if (low_thresholds is None):
low_thresholds = [0.31, 0.32, 0.41, 0.05, 0.47]
low_thresholds = np.array(low_thresholds)
high_thresholds = np.array(high_thresholds)
quality_classes = np.zeros((noise_mags.shape + (num_quality_classes,)))
num_states = state_labels.shape[(- 1)]
per_state_classes = np.zeros(((noise_mags.shape + (num_quality_classes,)) + (num_states,)))
for i in range(num_states):
per_state_classes[((noise_mags <= low_thresholds[i]), 0, i)] = 1
per_state_classes[(((noise_mags > low_thresholds[i]) & (noise_mags <= high_thresholds[i])), 1, i)] = 1
per_state_classes[((noise_mags > high_thresholds[i]), 2, i)] = 1
quality_classes = np.einsum('ijk,ik->ij', per_state_classes, state_labels)
return quality_classes
|
def get_data(f, train_test_split=0.9, dat_key='sensor', label_key='state', resample=True, seed=None, low_thresholds=None, high_thresholds=None):
"\n Reads in the subregion data and converts it to a format useful for training\n Note that the data is shuffled after reading in.\n\n inputs:\n f: one of: \n str path to .npz file containing cropped data\n dict of cropped data.\n train_test_split: float fraction of data to use for training.\n resample: bool specifying whether to resample data to get even state\n representation.\n seed: int random seed for file shuffling.\n label_key: string key for data used for the label. One of: \n 'data_quality', 'noise_mag_factor', 'state'.\n low_threshold: list of noise levels to use for high/moderate signal\n to noise ratio threshold.\n high_threshold: list of noise levels to use for moderate/low signal\n to noise ratio threshold.\n\n outputs:\n train_data: np.ndarray of training data.\n train_labels: np.ndarray of training labels.\n eval_data: np.ndarray of training data.\n eval_labels: np.ndarray of training labels.\n "
try:
dict_of_dicts = np.load(f, allow_pickle=True)
file_on_disk = True
except TypeError:
dict_of_dicts = f
file_on_disk = False
files = list(dict_of_dicts.keys())
random.Random(seed).shuffle(files)
inp = []
oup_state = []
if (label_key != 'state'):
oup_labels = []
else:
oup_labels = None
train_labels = None
eval_labels = None
if (label_key == 'data_quality'):
data_quality = True
label_key = 'noise_mag_factor'
else:
data_quality = False
for file in files:
if file_on_disk:
data_dict = dict_of_dicts[file].item()
else:
data_dict = dict_of_dicts[file]
dat = data_dict[dat_key]
inp.append(dat.reshape(config.SUB_SIZE, config.SUB_SIZE, 1))
oup_state.append(data_dict['state'])
if (oup_labels is not None):
oup_labels.append(data_dict[label_key])
inp = np.array(inp)
oup_state = np.array(oup_state)
if (oup_labels is not None):
oup_labels = np.array(oup_labels)
n_samples = inp.shape[0]
print('Total number of samples :', n_samples)
n_train = int((train_test_split * n_samples))
train_data = inp[:n_train]
print('Training data info:', train_data.shape)
train_states = oup_state[:n_train]
if (oup_labels is not None):
train_labels = oup_labels[:n_train]
eval_data = inp[n_train:]
print('Evaluation data info:', eval_data.shape)
eval_states = oup_state[n_train:]
if (oup_labels is not None):
eval_labels = oup_labels[n_train:]
if data_quality:
train_labels = noise_mag_to_class(train_states, train_labels, low_thresholds=low_thresholds, high_thresholds=high_thresholds)
eval_labels = noise_mag_to_class(eval_states, eval_labels, low_thresholds=low_thresholds, high_thresholds=high_thresholds)
if resample:
(train_data, train_labels) = resample_data(train_data, train_states, train_labels)
(eval_data, eval_labels) = resample_data(eval_data, eval_states, eval_labels)
elif ((not resample) and (label_key == 'state')):
train_labels = train_states
eval_labels = eval_states
if ((oup_labels is not None) and (len(train_labels.shape) == 1)):
np.expand_dims(train_labels, 1)
if ((oup_labels is not None) and (len(eval_labels.shape) == 1)):
np.expand_dims(eval_labels, 1)
return (train_data, train_labels, eval_data, eval_labels)
|
def gradient(x):
'\n Take gradient of an ndarray in specified direction. Thin wrapper around\n np.gradient(). Also note that x -> axis=1 and y-> axis=0\n \n input:\n x: An numpy ndarray to take the gradient of \n output:\n numpy ndarray containing gradient in x direction.\n '
return np.gradient(x, axis=1)
|
def apply_threshold(x, threshold_val=10, threshold_to=0):
'\n Thresholds an numpy ndarray to remove\n Args:\n x = numpy array with data to be filtered\n threshold_val = percentile below which to set values to zero\n '
x[(x < np.abs(np.percentile(x.flatten(), threshold_val)))] = threshold_to
return x
|
def apply_clipping(x, clip_val=3, clip_to='clip_val'):
'\n Clip input symmetrically at clip_val number of std devs.\n Do not zscore norm x, but apply thresholds using normed x\n '
x_clipped = np.copy(x)
mean = np.mean(x)
std = np.std(x)
norm_x = ((x - mean) / std)
if (clip_to.lower() == 'clip_val'):
x_clipped[(norm_x < (- clip_val))] = (((- clip_val) * std) + mean)
x_clipped[(norm_x > clip_val)] = ((clip_val * std) + mean)
elif (clip_to.lower() == 'mean'):
x_clipped[(norm_x < (- clip_val))] = mean
x_clipped[(norm_x > clip_val)] = mean
else:
raise KeyError((('"clip_to" option not valid: ' + str(clip_to)) + 'Valid options: clip_val, mean'))
return x_clipped
|
def autoflip_skew(data):
'\n Autoflip a numpy ndarray based on the skew of the values \n (effective for gradient data).\n '
skew_sign = np.sign(scipy_skew(np.ravel(data)))
return (data * skew_sign)
|
def zscore_norm(x):
'\n Takes a numpy ndarray and returns a z-score normalized version\n '
return ((x - x.mean()) / x.std())
|
class Preprocessor():
def __init__(self, autoflip=False, denoising=[], clip_val=None, thresh_val=None):
"\n Class for doing preprocessing of data.\n\n inputs:\n autoflip: bool specifying whether to autoflip data.\n denoising: list of str specifying denoising to apply to data.\n clip_val: value for clipping denoising. Unused if 'clip' not in\n denoising.\n thresh_val\n "
self.autoflip = autoflip
valid_denoising = ['threshold', 'clip']
if (not set(denoising).issubset(valid_denoising)):
raise ValueError('invalid denoising ', denoising, ' Valid values:', valid_denoising)
self.denoising = denoising
self.clip_val = clip_val
self.thresh_val = thresh_val
def proc_subimage(self, x):
'\n Takes the gradient of the measured data, applies denoising if specified,\n normalizes, autoflips if specified,\n and then adjusts the size (if necessary)\n Args:\n x = an array with data\n '
x = gradient(x)
if ('threshold' in self.denoising):
if (self.threshold_val is not None):
grad_x = apply_threshold(x, self.threshold_val)
else:
grad_x = apply_threshold(x)
if ('clip' in self.denoising):
if (self.clip_val is not None):
grad_x = apply_clipping(grad_x, self.clip_val)
else:
grad_x = apply_clipping(grad_x)
x = zscore_norm(x)
if self.autoflip:
x = autoflip_skew(x)
target_shape = (config.SUB_SIZE, config.SUB_SIZE, 1)
if (x.shape != target_shape):
x = skimage_resize(x, target_shape)
return x
def proc_subimage_set(self, x_arr):
'\n Loop through subimages and apply preprocessing to each one.\n\n inputs:\n x: full dataset of images. First axis assumed to be example index.\n returns:\n Full dataset of images with same shape, processed.\n '
return np.array([self.proc_subimage(x) for x in x_arr])
|
def cnn_model_fn(features, labels, mode):
'Model function for CNN.'
input_layer = tf.cast(tf.reshape(features['x'], [(- 1), qf.SUB_SIZE, qf.SUB_SIZE, 1]), tf.float32)
conv1 = tf.layers.conv2d(inputs=input_layer, filters=16, kernel_size=[5, 5], padding='same', activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
conv2 = tf.layers.conv2d(inputs=pool1, filters=32, kernel_size=[5, 5], padding='same', activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2_flat = tf.contrib.layers.flatten(pool2)
dense0 = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout0 = tf.layers.dropout(inputs=dense0, rate=0.5, training=(mode == tf.estimator.ModeKeys.TRAIN))
dense1 = tf.layers.dense(inputs=dropout0, units=512, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=dense1, rate=0.5, training=(mode == tf.estimator.ModeKeys.TRAIN))
dense2 = tf.layers.dense(inputs=dropout1, units=256, activation=tf.nn.relu)
dropout2 = tf.layers.dropout(inputs=dense2, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN))
logits = tf.layers.dense(inputs=dropout2, units=3)
predictions = {'state': tf.argmax(input=logits, axis=1), 'probabilities': tf.cast(tf.nn.softmax(logits, name='softmax_tensor'), tf.float64)}
if (mode == tf.estimator.ModeKeys.PREDICT):
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.mean_squared_error(labels=labels, predictions=tf.nn.softmax(logits))
if (mode == tf.estimator.ModeKeys.TRAIN):
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels=tf.argmax(labels, axis=1), predictions=predictions['state'])}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
|
def load_model(model, args):
if args.custom_model:
from scaled_rope.modeling_llama_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
model_cls = LlamaForCausalLM
config_cls = LlamaConfig
elif args.custom_model_together:
from scaled_rope.modeling_llama_together_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
model_cls = LlamaForCausalLM
config_cls = LlamaConfig
elif args.custom_model_mistral:
from scaled_rope.modeling_mistral_yarn import MistralForCausalLM
from scaled_rope.configuration_mistral import MistralConfig
model_cls = MistralForCausalLM
config_cls = MistralConfig
else:
model_cls = AutoModelForCausalLM
config_cls = AutoConfig
config = config_cls.from_pretrained(model, trust_remote_code=(not args.custom_model))
if args.max_position_embeddings:
config.max_position_embeddings = args.max_position_embeddings
if args.factor:
config.rope_scaling['factor'] = args.factor
if args.no_use_cache:
config.use_cache = False
else:
config.use_cache = True
if args.sliding_window_attention:
config.sliding_window = args.sliding_window_attention
if (args.custom_model or args.custom_model_together or args.custom_model_mistral):
if args.linear:
config.rope_scaling = {'type': 'linear', 'factor': args.linear}
elif args.dynamic_ntk:
config.rope_scaling = {'type': 'dynamic', 'factor': args.dynamic_ntk}
elif args.part_ntk:
config.rope_scaling = {'type': 'ntk-by-parts', 'factor': args.part_ntk}
elif args.yarn:
config.rope_scaling = {'type': 'yarn', 'factor': args.yarn, 'original_max_position_embeddings': args.original_max_position_embeddings}
elif args.dynamic_yarn:
config.rope_scaling = {'type': 'dynamic-yarn', 'factor': (args.factor if args.factor else (config.rope_scaling.get('factor', 1.0) if (config.rope_scaling is not None) else 1.0)), 'original_max_position_embeddings': (args.original_max_position_embeddings if args.original_max_position_embeddings else config.rope_scaling['original_max_position_embeddings']), 'finetuned': (args.finetuned if args.finetuned else (config.rope_scaling.get('finetuned', False) if (config.rope_scaling is not None) else False))}
elif args.rerope:
assert ((not args.custom_model) and (not args.custom_model_together))
from transformers.models.llama.modeling_llama import LlamaAttention
from scaled_rope.LlamaReRoPE import forward_with_rerope
LlamaAttention.forward = forward_with_rerope
if (args.load_in_8bit or args.load_in_4bit):
quantization_config = BitsAndBytesConfig(load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4')
torch_dtype = None
config.pretraining_tp = 1
else:
quantization_config = None
torch_dtype = torch.bfloat16
loaded = model_cls.from_pretrained(model, torch_dtype=torch_dtype, device_map='auto', trust_remote_code=(not args.custom_model), config=config, quantization_config=quantization_config, use_flash_attention_2=args.flash_attention)
return loaded
|
def add_args(parser: ArgumentParser):
parser.add_argument('--dynamic-linear', action='store_true')
parser.add_argument('--dynamic-ntk', type=float)
parser.add_argument('--dynamic-part-ntk', action='store_true')
parser.add_argument('--dynamic-yarn', action='store_true')
parser.add_argument('--ntk', type=float)
parser.add_argument('--part-ntk', type=float)
parser.add_argument('--linear', type=float)
parser.add_argument('--yarn', type=float)
parser.add_argument('--rerope', type=float)
parser.add_argument('--factor', type=float)
parser.add_argument('--load-in-8bit', action='store_true')
parser.add_argument('--load-in-4bit', action='store_true')
parser.add_argument('--finetuned', action='store_true')
parser.add_argument('--gpt-neox-max-length', type=int)
parser.add_argument('--adapter', type=str)
parser.add_argument('--max-position-embeddings', type=int)
parser.add_argument('--original-max-position-embeddings', type=int)
parser.add_argument('--sliding-window-attention', type=int)
parser.add_argument('--custom-model', action='store_true')
parser.add_argument('--custom-model-together', action='store_true')
parser.add_argument('--custom-model-mistral', action='store_true')
parser.add_argument('--flash-attention', action='store_true')
parser.add_argument('--no-use-cache', action='store_true')
return parser
|
def apply_patches(model, args):
if ((not args.custom_model) and (not args.custom_model_together) and (not args.custom_model_mistral)):
if ('GPTNeoXForCausalLM' in model.config.architectures):
assert (args.gpt_neox_max_length is not None)
patch_gptneox_for_longer_sequences(model, args.gpt_neox_max_length)
if args.dynamic_linear:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_scaled_rotary_embeddings(model)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic linear')
elif args.dynamic_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk=args.dynamic_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic ntk')
elif args.dynamic_part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, args.finetuned)
elif ('RWForCausalLM' in model.config.architectures):
patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic part ntk')
elif args.dynamic_yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_yarn_rotary_embeddings(model, args.original_max_position_embeddings, args.finetuned)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic yarn')
elif args.ntk:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_ntk_scaled_rotary_embeddings(model, args.ntk)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_ntk_scaled_rotary_embeddings(model, args.ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for ntk')
elif args.linear:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_linear_scaled_rotary_embeddings(model, scale=args.linear)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for linear')
elif args.part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_part_ntk_scaled_rotary_embeddings(model, scale=args.part_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for part ntk')
elif args.yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_yarn_scaled_rotary_embeddings(model, scale=args.yarn, original_max_position_embeddings=args.original_max_position_embeddings)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
elif args.rerope:
if ('LlamaForCausalLM' in model.config.architectures):
training_length = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
window = args.rerope
patch_llama_for_rerope(model, training_length=training_length, window=window)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
if args.adapter:
from peft import PeftModel
model = PeftModel.from_pretrained(model, args.adapter)
model = model.merge_and_unload()
return model
|
def load_model_and_apply_patches(model, args):
return apply_patches(load_model(model, args), args)
|
def generate_prompt(n_garbage):
'Generates a text file and inserts an execute line at a random position.'
n_garbage_prefix = random.randint(0, n_garbage)
n_garbage_suffix = (n_garbage - n_garbage_prefix)
task_description = 'There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.'
garbage = 'The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again.'
garbage_inf = ' '.join(([garbage] * 10000))
assert (len(garbage_inf) >= n_garbage)
garbage_prefix = garbage_inf[:n_garbage_prefix]
garbage_suffix = garbage_inf[:n_garbage_suffix]
pass_key = random.randint(1, 50000)
information_line = f'The pass key is {pass_key}. Remember it. {pass_key} is the pass key.'
final_question = 'What is the pass key? The pass key is'
lines = [task_description, garbage_prefix, information_line, garbage_suffix, final_question]
return ('\n'.join(lines), pass_key)
|
def test_model(pipe, prompt_text, pass_key):
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=10)[0]['generated_text'][len(prompt_text):]
assert (f'The pass key is {pass_key}' in prompt_text)
try:
pass_key = int(re.search('\\d+', response).group())
except:
pass_key = response[:20]
return pass_key
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
if args.fixed_length:
lengths = [args.fixed_length]
tokens = [len(tokenizer.encode(generate_prompt(args.fixed_length)[0]))]
print(f'Prompt is {tokens[0]} tokens')
else:
if args.tokens_step:
tokens = [x for x in range(args.min_tokens, (args.max_tokens + 1), args.tokens_step)]
else:
tokens = [args.min_tokens]
while (args.min_tokens < args.max_tokens):
point = (tokens[(- 1)] * 2)
if (point <= args.max_tokens):
tokens.append(point)
else:
break
lengths = []
last_n = 0
for target in tqdm(tokens, desc='Determining sequence lengths'):
num_tokens = 0
n = last_n
while (num_tokens < target):
last_n = n
n += args.length_step
prompt = generate_prompt(n)[0]
num_tokens = len(tokenizer.encode(prompt))
lengths.append(last_n)
results = []
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
result = ([0] * len(lengths))
for (i, length) in tenumerate(lengths, desc='Lengths', leave=False):
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_key) = generate_prompt(length)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
answer = test_model(pipe, prompt_text, pass_key)
if (answer == pass_key):
result[i] += 1
result[i] /= args.iterations
print(f'{model}: {tokens[i]}={int((result[i] * 100))}%')
result.insert(0, model)
results.append(result)
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write(f''',{','.join([str(x) for x in tokens])}
''')
for result in results:
f.write(f'''{','.join([str(x) for x in result])}
''')
|
def order(i):
if (((i % 10) == 1) and ((i % 10) != 11)):
return (str(i) + 'st')
elif (((i % 10) == 2) and ((i % 10) != 12)):
return (str(i) + 'nd')
elif (((i % 19) == 3) and ((i % 10) != 13)):
return (str(i) + 'rd')
else:
return (str(i) + 'th')
|
def generate_prompt(docs, num_keys=1):
task_description = 'There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.'
pass_keys = [random.randint(1, 50000) for _ in range(num_keys)]
start_pos = sorted([random.randint(1, len(docs)) for _ in range(num_keys)])
information_lines = [f'The {order((i + 1))} pass key is {pass_key}. Remember it. {pass_key} is the {order((i + 1))} pass key.' for (i, pass_key) in enumerate(pass_keys)]
retrieve_number = random.randint(0, (num_keys - 1))
final_question = f'What is the {order((retrieve_number + 1))} pass key? The {order((retrieve_number + 1))} pass key is'
lines = [task_description]
prev = 0
for (line, pos) in zip(information_lines, start_pos):
lines.append(''.join(docs[prev:pos]))
lines.append(line)
prev = pos
lines.append(''.join(docs[prev:]))
lines.append(final_question)
return ('\n'.join(lines), pass_keys, start_pos, retrieve_number)
|
def test_model(pipe, prompt_text):
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=10)[0]['generated_text'][len(prompt_text):]
try:
pass_key = int(re.search('\\d+', response).group())
except:
pass_key = response[:20]
return (pass_key, response)
|
def construct_junk(data, length, tokenizer):
token_count = 0
docs = []
length = (length or 8192)
while (token_count < length):
sample = random.choice(data)['text']
toks = tokenizer(sample, return_offsets_mapping=True)
offsets = [(i, j) for (i, j) in toks['offset_mapping'] if (i < j)]
num_tok_to_add = min((length - token_count), len(offsets))
pretokenized = [sample[i:j] for (i, j) in offsets[:num_tok_to_add]]
docs.extend(pretokenized)
token_count += num_tok_to_add
return docs
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
data = load_dataset(args.dataset)[args.split]
junks = construct_junk(data, args.fixed_length, tokenizer)
if args.restrict_tokens:
vocab = tokenizer.vocab
escape_char = '▁'
digit_tokens = [vocab[a] for a in vocab.keys() if a.lstrip(escape_char).isdigit()]
digit_tokens.append(vocab[tokenizer.eos_token])
extra = [vocab[a] for a in vocab.keys() if (a.strip((' \n' + escape_char)) == '')]
digit_tokens.extend(extra)
mask = torch.ones(tokenizer.vocab_size, dtype=torch.bool)
mask[digit_tokens] = 0
def filter_digits(module, input, output):
output.logits[(..., mask[:output.logits.size((- 1))])] = (- 10000.0)
print(f'Decoding restricted to {len(digit_tokens)} tokens.')
results = []
success_count = 0
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
if args.restrict_tokens:
loaded.register_forward_hook(filter_digits)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_keys, start_pos, target) = generate_prompt(junks, args.num_keys)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
(answer, return_text) = test_model(pipe, prompt_text)
passed = str(answer).startswith(str(pass_keys[target]))
result = {'prompt_text': prompt_text, 'start_pos': start_pos, 'pass_keys': pass_keys, 'return_text': return_text, 'passed': passed}
success_count += passed
results.append(result)
results.append({'original_prompt': junks})
print(f'Iteration: {args.iterations}')
print(f'Successes: {success_count}')
if args.output_file:
with open(args.output_file, 'w') as f:
json.dump(results, f)
|
def main(args):
tokenizer = AutoTokenizer.from_pretrained(args.model, model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
model = load_model_and_apply_patches(args.model, args)
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id, temperature=args.temperature, repetition_penalty=args.repetition_penalty, top_k=args.top_k, penalty_alpha=args.penalty_alpha, do_sample=(args.temperature is not None))
while True:
if (args.input_file is None):
prompt_text = input('> ')
else:
input(f'Press enter to read {args.input_file} ')
prompt_text = open(args.input_file, encoding='utf=8').read()
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=args.max_new_tokens)[0]['generated_text'][len(prompt_text):]
print(f'< {response}')
|
def get_prompt(sample):
options = sample['options']
instruction = ZERO_SCROLLS_QUALITY_PROMPT.format(story=sample['article'], question=sample['question'], a=options[0], b=options[1], c=options[2], d=options[3])
return f'''{instruction}
Answer: ('''
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
dataset = load_dataset('emozilla/quality', split=args.split)
dataset = dataset.map((lambda sample: {'prompt': get_prompt(sample)}))
if args.max_tokens:
dataset = dataset.filter((lambda sample: (len(tokenizer(sample['prompt']).input_ids) <= args.max_tokens)))
choice_tokens = [x[0] for x in tokenizer(CHOICES, add_special_tokens=False).input_ids]
decoded_choice = tokenizer.decode(choice_tokens, clean_up_tokenization_spaces=True)
results = []
for model in models:
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
correct_answers = 0
i = 0
max = (len(dataset) if (args.limit is None) else args.limit)
bar = tqdm(total=max)
while (i < max):
sample = dataset[i]
tokenized_prompt = tokenizer(sample['prompt'], return_tensors='pt')
input_ids = tokenized_prompt.input_ids.to('cuda')
attention_mask = tokenized_prompt.attention_mask.to('cuda')
output = loaded.generate(input_ids, attention_mask=attention_mask, max_new_tokens=1, return_dict_in_generate=True, output_scores=True, pad_token_id=tokenizer.eos_token_id)
scores = output.scores[0][0]
choice_scores = [x.cpu() for x in [scores[choice_tokens[0]], scores[choice_tokens[1]], scores[choice_tokens[2]], scores[choice_tokens[3]]]]
selection = numpy.argmax([x.float().cpu() for x in choice_scores])
correct_answers += (1 if (selection == sample['answer']) else 0)
if args.print_choices:
print(f"Choice: {CHOICES[selection]} Correct: {CHOICES[sample['answer']]}")
i += 1
percent = ((correct_answers / i) * 100.0)
bar.desc = f'{model}: {percent:.1f}%'
bar.update()
percent = (correct_answers / max)
results.append(str(percent))
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write((','.join(models) + '\n'))
f.write((','.join(results) + '\n'))
|
def find_all_linear_names(model):
lora_module_names = set()
for (name, module) in model.named_modules():
if isinstance(module, torch.nn.Linear):
names = name.split('.')
lora_module_names.add((names[0] if (len(names) == 1) else names[(- 1)]))
if ('lm_head' in lora_module_names):
lora_module_names.remove('lm_head')
return list(lora_module_names)
|
def main(args):
if args.output_dir:
os.makedirs(args.output_dir, exist_ok=True)
if args.wandb:
import wandb
wandb.login()
set_seed(args.seed)
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1000000))
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulate_every, mixed_precision='bf16', log_with=('wandb' if args.wandb else None), kwargs_handlers=[timeout])
accelerator.init_trackers(project_name=(args.wandb if args.wandb else 'yarn'))
accelerator.print(f'Total GPUS: {accelerator.num_processes}')
if (args.architecture == 'llama'):
from scaled_rope.modeling_llama_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
config_cls = LlamaConfig
model_cls = LlamaForCausalLM
original_max_position_embeddings = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
elif (args.architecture == 'mistral'):
from scaled_rope.modeling_mistral_yarn import MistralForCausalLM
from scaled_rope.configuration_mistral import MistralConfig
config_cls = MistralConfig
model_cls = MistralForCausalLM
original_max_position_embeddings = (args.original_max_position_embeddings if args.original_max_position_embeddings else 8192)
config = config_cls.from_pretrained(args.model)
config.rope_scaling = {'type': args.scaling_type, 'factor': args.scaling_factor, 'original_max_position_embeddings': original_max_position_embeddings}
config.rope_theta = args.rope_theta
config.max_position_embeddings = (int((args.scaling_factor * original_max_position_embeddings)) if (not args.max_position_embeddings) else args.max_position_embeddings)
sliding_window_attention_schedule = ([int(x) for x in args.sliding_window_attention_schedule.split(',')] if args.sliding_window_attention_schedule else None)
if ((sliding_window_attention_schedule is not None) and (len(sliding_window_attention_schedule) == 1)):
config.sliding_window = sliding_window_attention_schedule[0]
accelerator.print(f'Sliding attention window set to {config.sliding_window}')
model = model_cls.from_pretrained(args.model, torch_dtype=torch.bfloat16, config=config, use_flash_attention_2=True)
try:
train_dataset = load_dataset(args.dataset)
except:
train_dataset = load_from_disk(args.dataset)
if isinstance(train_dataset, DatasetDict):
train_dataset = train_dataset['train']
if ('input_ids' not in train_dataset.column_names):
raise RuntimeError('Dataset must include an `input_ids` feature')
if ('labels' not in train_dataset.column_names):
def add_labels(sample):
sample['labels'] = copy.deepcopy(sample['input_ids'])
return sample
train_dataset = train_dataset.map(add_labels, desc='Adding labels', num_proc=args.num_proc)
if ('attention_mask' not in train_dataset.column_names):
def add_attention_mask(sample):
sample['attention_mask'] = torch.ones(len(sample['input_ids']), dtype=torch.int8)
return sample
train_dataset = train_dataset.map(add_attention_mask, desc='Adding attention mask', num_proc=args.num_proc)
if args.truncate:
def truncate(sample):
sample['input_ids'] = sample['input_ids'][0:args.truncate]
sample['labels'] = sample['labels'][0:args.truncate]
sample['attention_mask'] = sample['attention_mask'][0:args.truncate]
return sample
train_dataset = train_dataset.map(truncate, desc='Truncating', num_proc=args.num_proc)
train_loader = DataLoader(train_dataset, collate_fn=default_data_collator, shuffle=True, batch_size=args.batch_size)
if args.lora:
from peft import get_peft_model, LoraConfig, TaskType
target_modules = find_all_linear_names(model)
accelerator.print(f'LoRA target modules: {target_modules}')
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=16, lora_alpha=64, lora_dropout=0.05, target_modules=target_modules)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
if args.deepspeed:
optim = DummyOptim(model.parameters(), lr=args.learning_rate)
scheduler = DummyScheduler(optim, num_training_steps=args.max_train_steps, num_warmup_steps=args.warmup_steps)
(model, optim, train_loader, scheduler) = accelerator.prepare(model, optim, train_loader, scheduler)
else:
model = accelerator.prepare(model)
optim = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
if (args.lr_schedule == 'linear'):
scheduler = get_linear_schedule_with_warmup(optim, num_training_steps=args.max_train_steps, num_warmup_steps=args.warmup_steps)
elif (args.lr_schedule == 'constant'):
scheduler = get_constant_schedule_with_warmup(optim, num_warmup_steps=args.warmup_steps)
(optim, train_loader, scheduler) = accelerator.prepare(optim, train_loader, scheduler)
if (not args.lora):
model.gradient_checkpointing_enable()
accelerator.register_for_checkpointing(scheduler)
total_batch_size = ((args.batch_size * accelerator.num_processes) * args.gradient_accumulate_every)
accelerator.print(f'Max train steps: {args.max_train_steps}')
accelerator.print(f'Total batch size: {total_batch_size}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
if args.resume_from_checkpoint:
if ((args.resume_from_checkpoint is not None) or (args.resume_from_checkpoint != '')):
accelerator.print(f'Resuming from checkpoint {args.resume_from_checkpoint}')
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
training_difference = os.path.splitext(path)[0]
resume_step = int(training_difference.replace('step_', ''))
if (args.resume_from_checkpoint and (resume_step is not None)):
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
accelerator.print(f'Resuming training from step {resume_step}')
loss_file = (open(args.log_loss, ('a' if args.resume_from_checkpoint else 'w')) if (args.log_loss and accelerator.is_main_process) else None)
if (not args.save_only):
model.train()
for (step, batch) in enumerate(train_loader):
if (sliding_window_attention_schedule is not None):
model.config.sliding_window = sliding_window_attention_schedule[(completed_steps % len(sliding_window_attention_schedule))]
loss_log = None
with accelerator.accumulate(model):
loss = model(**batch).loss
accelerator.backward(loss)
if accelerator.sync_gradients:
loss_log = {'loss': loss.item()}
accelerator.log(loss_log, step=completed_steps)
if (loss_file is not None):
loss_file.write(f"{loss_log['loss']},")
loss_file.flush()
if isinstance(args.grad_norm, float):
accelerator.clip_grad_norm_(model.parameters(), args.grad_norm)
optim.step()
scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
if (loss_log is not None):
progress_bar.set_postfix(loss_log)
completed_steps += 1
if (isinstance(args.checkpointing_steps, int) and (completed_steps > 0)):
if ((completed_steps % args.checkpointing_steps) == 0):
output_dir = f'step_{completed_steps}'
if (args.output_dir is not None):
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if (completed_steps >= args.max_train_steps):
break
accelerator.print(f'Training Finished')
accelerator.end_training()
if (args.output_dir is not None):
accelerator.print(f'Saving model to {args.output_dir}')
accelerator.wait_for_everyone()
if args.deepspeed:
state_dict = accelerator.get_state_dict(model)
else:
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
state_dict = accelerator.get_state_dict(model, unwrap=False)
accelerator.unwrap_model(model).save_pretrained(f'{args.output_dir}', is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=state_dict)
accelerator.print(f'Saving Finished')
|
def main(args):
obj = json.load(open(args.file, 'r', encoding='utf-8'))
results = [result['acc'] for result in obj['results'].values()]
print(numpy.average(results))
|
def main(args):
data = pd.read_csv(args.csv)
(fig, ax) = plt.subplots(figsize=(10, 5))
x_data = [float(x) for x in data.columns[1:]]
for row in data.values:
label = row[0].replace('NousResearch/', '')
ax.plot(x_data, [float(x) for x in row[1:]], label=label)
ax.set_xlabel('Context Window')
ax.set_ylabel('Perplexity (lower is better)')
ax.set_xlim(args.xmin, args.xmax)
ax.set_ylim(args.ymin, args.ymax)
ax.legend(loc='upper right')
fig.savefig((args.csv + '.png'))
fig.savefig((args.csv + '.pdf'), transparent=True)
|
class LlamaConfig(PretrainedConfig):
'\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don\'t update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n\n Example:\n\n ```python\n >>> from transformers import LlamaModel, LlamaConfig\n\n >>> # Initializing a LLaMA llama-7b style configuration\n >>> configuration = LlamaConfig()\n\n >>> # Initializing a model from the llama-7b style configuration\n >>> model = LlamaModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```'
model_type = 'llama'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000, rope_scaling=None, attention_bias=False, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if (num_key_value_heads is None):
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.attention_bias = attention_bias
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _rope_scaling_validation(self):
'\n Validate the `rope_scaling` configuration.\n '
if (self.rope_scaling is None):
return
if (not isinstance(self.rope_scaling, dict)):
raise ValueError(f'`rope_scaling` must be a dictionary, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if ((rope_scaling_type is None) or (rope_scaling_type not in ['linear', 'dynamic', 'yarn', 'dynamic-yarn'])):
raise ValueError(f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'yarn', 'dynamic-yarn'], got {rope_scaling_type}")
if ((rope_scaling_factor is None) or (not isinstance(rope_scaling_factor, float)) or (rope_scaling_factor <= 1.0)):
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
if ((rope_scaling_type == 'yarn') or (rope_scaling_type == 'dynamic-yarn')):
original_max_position_embeddings = self.rope_scaling.get('original_max_position_embeddings', None)
if ((original_max_position_embeddings is None) or (not isinstance(original_max_position_embeddings, int))):
raise ValueError(f'`rope_scaling.original_max_position_embeddings` must be set to an int when using yarn, and dynamic-yarn')
|
class MistralConfig(PretrainedConfig):
'\n This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an\n Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.\n\n [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)\n [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`MistralModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 14336):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*, defaults to 8):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.\n hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to `4096*32`):\n The maximum sequence length that this model might ever be used with. Mistral\'s sliding window attention\n allows sequence of up to 4096*32 tokens.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n pad_token_id (`int`, *optional*):\n The id of the padding token.\n bos_token_id (`int`, *optional*, defaults to 1):\n The id of the "beginning-of-sequence" token.\n eos_token_id (`int`, *optional*, defaults to 2):\n The id of the "end-of-sequence" token.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether the model\'s input and output word embeddings should be tied.\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{"type": strategy name, "factor": scaling factor}`.\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n sliding_window (`int`, *optional*, defaults to 4096):\n Sliding window attention window size. If not specified, will default to `4096`.\n\n\n ```python\n >>> from transformers import MistralModel, MistralConfig\n\n >>> # Initializing a Mistral 7B style configuration\n >>> configuration = MistralConfig()\n\n >>> # Initializing a model from the Mistral 7B style configuration\n >>> model = MistralModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```'
model_type = 'mistral'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act='silu', max_position_embeddings=(4096 * 32), initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_scaling=None, rope_theta=10000.0, sliding_window=4096, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
if (num_key_value_heads is None):
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.rope_theta = rope_theta
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _rope_scaling_validation(self):
'\n Validate the `rope_scaling` configuration.\n '
if (self.rope_scaling is None):
return
if (not isinstance(self.rope_scaling, dict)):
raise ValueError(f'`rope_scaling` must be a dictionary, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if ((rope_scaling_type is None) or (rope_scaling_type not in ['linear', 'dynamic', 'yarn', 'dynamic-yarn'])):
raise ValueError(f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'yarn', 'dynamic-yarn'], got {rope_scaling_type}")
if ((rope_scaling_factor is None) or (not isinstance(rope_scaling_factor, float)) or (rope_scaling_factor <= 1.0)):
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
if ((rope_scaling_type == 'yarn') or (rope_scaling_type == 'dynamic-yarn')):
original_max_position_embeddings = self.rope_scaling.get('original_max_position_embeddings', None)
if ((original_max_position_embeddings is None) or (not isinstance(original_max_position_embeddings, int))):
raise ValueError(f'`rope_scaling.original_max_position_embeddings` must be set to an int when using yarn, and dynamic-yarn')
|
def patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk):
from .LlamaDynamicScaledRotaryEmbedding import LlamaDynamicScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicScaledRotaryEmbedding(each.self_attn.head_dim, device=each.self_attn.rotary_emb.inv_freq.device, ntk=ntk)
|
def patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, finetuned):
from .LlamaDynamicPartNTKScaledRotaryEmbedding import LlamaDynamicPartNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicPartNTKScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_dynamic_yarn_rotary_embeddings(model, original_max_position_embeddings, finetuned):
from .LlamaDynamicYaRNScaledRotaryEmbedding import LlamaDynamicYaRNScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicYaRNScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, original_max_position_embeddings=original_max_position_embeddings, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model):
from .FalconDynamicPartNTKScaledRotaryEmbedding import FalconDynamicPartNTKScaledRotaryEmbedding
for each in model.transformer.h:
each.self_attention.maybe_rotary = FalconDynamicPartNTKScaledRotaryEmbedding(each.self_attention.head_dim)
|
def patch_llama_for_ntk_scaled_rotary_embeddings(model, alpha):
from .LlamaNTKScaledRotaryEmbedding import LlamaNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaNTKScaledRotaryEmbedding(each.self_attn.head_dim, alpha=alpha, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_linear_scaled_rotary_embeddings(model, scale):
from .LlamaLinearScaledRotaryEmbedding import LlamaLinearScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaLinearScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_part_ntk_scaled_rotary_embeddings(model, scale):
from .LlamaPartNTKScaledRotaryEmbedding import LlamaPartNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaPartNTKScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_yarn_scaled_rotary_embeddings(model, scale, original_max_position_embeddings):
from .LlamaYaRNScaledRotaryEmbedding import LlamaYaRNScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaYaRNScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, original_max_position_embeddings=original_max_position_embeddings, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_scaled_rotary_embeddings(model):
from .GPTNeoXDynamicScaledRotaryEmbedding import GPTNeoXDynamicScaledRotaryEmbedding
for each in model.gpt_neox.layers:
each.attention.rotary_emb = GPTNeoXDynamicScaledRotaryEmbedding(each.attention.rotary_ndims, model.config.max_position_embeddings, device=each.attention.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_ntk_scaled_rotary_embeddings(model, alpha):
from .GPTNeoXNTKScaledRotaryEmbedding import GPTNeoXNTKScaledRotaryEmbedding
for each in model.gpt_neox.layers:
each.attention.rotary_emb = GPTNeoXNTKScaledRotaryEmbedding(each.attention.rotary_ndims, model.config.max_position_embeddings, alpha=alpha, device=each.attention.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_longer_sequences(model, max_positions):
for each in model.gpt_neox.layers:
each.attention.bias = torch.tril(torch.ones((max_positions, max_positions), dtype=each.attention.bias.dtype, device=each.attention.bias.device)).view(1, 1, max_positions, max_positions)
|
def patch_llama_for_rerope(model, training_length, window):
from .LlamaReRoPE import forward_with_rerope
for each in model.model.layers:
def forward(*args, **kwargs):
return forward_with_rerope(each.self_attn, *args, **kwargs)
each.self_attn.training_length = int(training_length)
each.self_attn.window = int(window)
|
def main(args):
if ((args.dataset is None) or (len(args.dataset[0]) == 0)):
raise RuntimeError('No datasets provided')
datasets = args.dataset[0]
splits = [(x.split(',')[1] if (len(x.split(',')) == 2) else '') for x in datasets]
datasets = [x.split(',')[0] for x in datasets]
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
if args.json:
dataset = load_dataset('json', data_files=datasets)[args.split]
if reduce((lambda x, y: (x or (len(y) > 0))), splits, False):
if (len(datasets) > 1):
raise RuntimeError('Can only use splitting on json datasets if there is exactly one input file')
dataset = dataset.train_test_split(train_size=float(splits[0]), seed=args.seed)['train']
else:
to_concatenate = []
for i in range(0, len(datasets)):
try:
loaded = load_from_disk(datasets[i])
except:
loaded = load_dataset([i])[args.split]
if (len(splits[i]) > 0):
loaded = loaded.train_test_split(train_size=float(splits[i]), seed=args.seed)['train']
to_concatenate.append(loaded)
dataset = concatenate_datasets(to_concatenate)
dataset = dataset.remove_columns([x for x in dataset.column_names if (x not in [args.feature])])
tokenized_dataset = dataset.map((lambda example: tokenizer([(t + tokenizer.eos_token) for t in example[args.feature]])), batched=True, num_proc=args.num_proc, remove_columns=[args.feature])
block_size = args.sequence_length
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if (total_length >= block_size):
total_length = ((total_length // block_size) * block_size)
result = {k: [t[i:(i + block_size)] for i in range(0, total_length, block_size)] for (k, t) in concatenated_examples.items()}
return result
train_dataset = tokenized_dataset.map(group_texts, batched=True, num_proc=args.num_proc)
if args.output:
train_dataset.save_to_disk(args.output)
if args.push_to_hub:
train_dataset.push_to_hub(args.push_to_hub, private=True)
|
def main(args):
dataset = load_dataset(args.dataset, split='train')
def truncate(sample):
sample['input_ids'] = sample['input_ids'][0:args.truncate]
sample['labels'] = sample['labels'][0:args.truncate]
sample['attention_mask'] = sample['attention_mask'][0:args.truncate]
return sample
dataset = dataset.map(truncate, desc='Truncating', num_proc=args.num_proc)
dataset.save_to_disk(args.output)
|
def load_model(model, args):
if args.custom_model:
from scaled_rope.modeling_llama_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
model_cls = LlamaForCausalLM
config_cls = LlamaConfig
elif args.custom_model_together:
from scaled_rope.modeling_llama_together_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
model_cls = LlamaForCausalLM
config_cls = LlamaConfig
elif args.custom_model_mistral:
from scaled_rope.modeling_mistral_yarn import MistralForCausalLM
from scaled_rope.configuration_mistral import MistralConfig
model_cls = MistralForCausalLM
config_cls = MistralConfig
else:
model_cls = AutoModelForCausalLM
config_cls = AutoConfig
config = config_cls.from_pretrained(model, trust_remote_code=(not args.custom_model))
if args.max_position_embeddings:
config.max_position_embeddings = args.max_position_embeddings
if args.factor:
config.rope_scaling['factor'] = args.factor
if args.no_use_cache:
config.use_cache = False
else:
config.use_cache = True
if args.sliding_window_attention:
config.sliding_window = args.sliding_window_attention
if (args.custom_model or args.custom_model_together or args.custom_model_mistral):
if args.linear:
config.rope_scaling = {'type': 'linear', 'factor': args.linear}
elif args.dynamic_ntk:
config.rope_scaling = {'type': 'dynamic', 'factor': args.dynamic_ntk}
elif args.part_ntk:
config.rope_scaling = {'type': 'ntk-by-parts', 'factor': args.part_ntk}
elif args.yarn:
config.rope_scaling = {'type': 'yarn', 'factor': args.yarn, 'original_max_position_embeddings': args.original_max_position_embeddings}
elif args.dynamic_yarn:
config.rope_scaling = {'type': 'dynamic-yarn', 'factor': (args.factor if args.factor else (config.rope_scaling.get('factor', 1.0) if (config.rope_scaling is not None) else 1.0)), 'original_max_position_embeddings': (args.original_max_position_embeddings if args.original_max_position_embeddings else config.rope_scaling['original_max_position_embeddings']), 'finetuned': (args.finetuned if args.finetuned else (config.rope_scaling.get('finetuned', False) if (config.rope_scaling is not None) else False))}
elif args.rerope:
assert ((not args.custom_model) and (not args.custom_model_together))
from transformers.models.llama.modeling_llama import LlamaAttention
from scaled_rope.LlamaReRoPE import forward_with_rerope
LlamaAttention.forward = forward_with_rerope
if (args.load_in_8bit or args.load_in_4bit):
quantization_config = BitsAndBytesConfig(load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4')
torch_dtype = None
config.pretraining_tp = 1
else:
quantization_config = None
torch_dtype = torch.bfloat16
loaded = model_cls.from_pretrained(model, torch_dtype=torch_dtype, device_map='auto', trust_remote_code=(not args.custom_model), config=config, quantization_config=quantization_config, use_flash_attention_2=args.flash_attention)
return loaded
|
def add_args(parser: ArgumentParser):
parser.add_argument('--dynamic-linear', action='store_true')
parser.add_argument('--dynamic-ntk', type=float)
parser.add_argument('--dynamic-part-ntk', action='store_true')
parser.add_argument('--dynamic-yarn', action='store_true')
parser.add_argument('--ntk', type=float)
parser.add_argument('--part-ntk', type=float)
parser.add_argument('--linear', type=float)
parser.add_argument('--yarn', type=float)
parser.add_argument('--rerope', type=float)
parser.add_argument('--factor', type=float)
parser.add_argument('--load-in-8bit', action='store_true')
parser.add_argument('--load-in-4bit', action='store_true')
parser.add_argument('--finetuned', action='store_true')
parser.add_argument('--gpt-neox-max-length', type=int)
parser.add_argument('--adapter', type=str)
parser.add_argument('--max-position-embeddings', type=int)
parser.add_argument('--original-max-position-embeddings', type=int)
parser.add_argument('--sliding-window-attention', type=int)
parser.add_argument('--custom-model', action='store_true')
parser.add_argument('--custom-model-together', action='store_true')
parser.add_argument('--custom-model-mistral', action='store_true')
parser.add_argument('--flash-attention', action='store_true')
parser.add_argument('--no-use-cache', action='store_true')
return parser
|
def apply_patches(model, args):
if ((not args.custom_model) and (not args.custom_model_together) and (not args.custom_model_mistral)):
if ('GPTNeoXForCausalLM' in model.config.architectures):
assert (args.gpt_neox_max_length is not None)
patch_gptneox_for_longer_sequences(model, args.gpt_neox_max_length)
if args.dynamic_linear:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_scaled_rotary_embeddings(model)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic linear')
elif args.dynamic_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk=args.dynamic_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic ntk')
elif args.dynamic_part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, args.finetuned)
elif ('RWForCausalLM' in model.config.architectures):
patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic part ntk')
elif args.dynamic_yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_yarn_rotary_embeddings(model, args.original_max_position_embeddings, args.finetuned)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic yarn')
elif args.ntk:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_ntk_scaled_rotary_embeddings(model, args.ntk)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_ntk_scaled_rotary_embeddings(model, args.ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for ntk')
elif args.linear:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_linear_scaled_rotary_embeddings(model, scale=args.linear)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for linear')
elif args.part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_part_ntk_scaled_rotary_embeddings(model, scale=args.part_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for part ntk')
elif args.yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_yarn_scaled_rotary_embeddings(model, scale=args.yarn, original_max_position_embeddings=args.original_max_position_embeddings)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
elif args.rerope:
if ('LlamaForCausalLM' in model.config.architectures):
training_length = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
window = args.rerope
patch_llama_for_rerope(model, training_length=training_length, window=window)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
if args.adapter:
from peft import PeftModel
model = PeftModel.from_pretrained(model, args.adapter)
model = model.merge_and_unload()
return model
|
def load_model_and_apply_patches(model, args):
return apply_patches(load_model(model, args), args)
|
def generate_prompt(n_garbage):
'Generates a text file and inserts an execute line at a random position.'
n_garbage_prefix = random.randint(0, n_garbage)
n_garbage_suffix = (n_garbage - n_garbage_prefix)
task_description = 'There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.'
garbage = 'The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again.'
garbage_inf = ' '.join(([garbage] * 10000))
assert (len(garbage_inf) >= n_garbage)
garbage_prefix = garbage_inf[:n_garbage_prefix]
garbage_suffix = garbage_inf[:n_garbage_suffix]
pass_key = random.randint(1, 50000)
information_line = f'The pass key is {pass_key}. Remember it. {pass_key} is the pass key.'
final_question = 'What is the pass key? The pass key is'
lines = [task_description, garbage_prefix, information_line, garbage_suffix, final_question]
return ('\n'.join(lines), pass_key)
|
def test_model(pipe, prompt_text, pass_key):
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=10)[0]['generated_text'][len(prompt_text):]
assert (f'The pass key is {pass_key}' in prompt_text)
try:
pass_key = int(re.search('\\d+', response).group())
except:
pass_key = response[:20]
return pass_key
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
if args.fixed_length:
lengths = [args.fixed_length]
tokens = [len(tokenizer.encode(generate_prompt(args.fixed_length)[0]))]
print(f'Prompt is {tokens[0]} tokens')
else:
if args.tokens_step:
tokens = [x for x in range(args.min_tokens, (args.max_tokens + 1), args.tokens_step)]
else:
tokens = [args.min_tokens]
while (args.min_tokens < args.max_tokens):
point = (tokens[(- 1)] * 2)
if (point <= args.max_tokens):
tokens.append(point)
else:
break
lengths = []
last_n = 0
for target in tqdm(tokens, desc='Determining sequence lengths'):
num_tokens = 0
n = last_n
while (num_tokens < target):
last_n = n
n += args.length_step
prompt = generate_prompt(n)[0]
num_tokens = len(tokenizer.encode(prompt))
lengths.append(last_n)
results = []
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
result = ([0] * len(lengths))
for (i, length) in tenumerate(lengths, desc='Lengths', leave=False):
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_key) = generate_prompt(length)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
answer = test_model(pipe, prompt_text, pass_key)
if (answer == pass_key):
result[i] += 1
result[i] /= args.iterations
print(f'{model}: {tokens[i]}={int((result[i] * 100))}%')
result.insert(0, model)
results.append(result)
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write(f''',{','.join([str(x) for x in tokens])}
''')
for result in results:
f.write(f'''{','.join([str(x) for x in result])}
''')
|
def order(i):
if (((i % 10) == 1) and ((i % 10) != 11)):
return (str(i) + 'st')
elif (((i % 10) == 2) and ((i % 10) != 12)):
return (str(i) + 'nd')
elif (((i % 19) == 3) and ((i % 10) != 13)):
return (str(i) + 'rd')
else:
return (str(i) + 'th')
|
def generate_prompt(docs, num_keys=1):
task_description = 'There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.'
pass_keys = [random.randint(1, 50000) for _ in range(num_keys)]
start_pos = sorted([random.randint(1, len(docs)) for _ in range(num_keys)])
information_lines = [f'The {order((i + 1))} pass key is {pass_key}. Remember it. {pass_key} is the {order((i + 1))} pass key.' for (i, pass_key) in enumerate(pass_keys)]
retrieve_number = random.randint(0, (num_keys - 1))
final_question = f'What is the {order((retrieve_number + 1))} pass key? The {order((retrieve_number + 1))} pass key is'
lines = [task_description]
prev = 0
for (line, pos) in zip(information_lines, start_pos):
lines.append(''.join(docs[prev:pos]))
lines.append(line)
prev = pos
lines.append(''.join(docs[prev:]))
lines.append(final_question)
return ('\n'.join(lines), pass_keys, start_pos, retrieve_number)
|
def test_model(pipe, prompt_text):
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=10)[0]['generated_text'][len(prompt_text):]
try:
pass_key = int(re.search('\\d+', response).group())
except:
pass_key = response[:20]
return (pass_key, response)
|
def construct_junk(data, length, tokenizer):
token_count = 0
docs = []
length = (length or 8192)
while (token_count < length):
sample = random.choice(data)['text']
toks = tokenizer(sample, return_offsets_mapping=True)
offsets = [(i, j) for (i, j) in toks['offset_mapping'] if (i < j)]
num_tok_to_add = min((length - token_count), len(offsets))
pretokenized = [sample[i:j] for (i, j) in offsets[:num_tok_to_add]]
docs.extend(pretokenized)
token_count += num_tok_to_add
return docs
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
data = load_dataset(args.dataset)[args.split]
junks = construct_junk(data, args.fixed_length, tokenizer)
if args.restrict_tokens:
vocab = tokenizer.vocab
escape_char = '▁'
digit_tokens = [vocab[a] for a in vocab.keys() if a.lstrip(escape_char).isdigit()]
digit_tokens.append(vocab[tokenizer.eos_token])
extra = [vocab[a] for a in vocab.keys() if (a.strip((' \n' + escape_char)) == '')]
digit_tokens.extend(extra)
mask = torch.ones(tokenizer.vocab_size, dtype=torch.bool)
mask[digit_tokens] = 0
def filter_digits(module, input, output):
output.logits[(..., mask[:output.logits.size((- 1))])] = (- 10000.0)
print(f'Decoding restricted to {len(digit_tokens)} tokens.')
results = []
success_count = 0
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
if args.restrict_tokens:
loaded.register_forward_hook(filter_digits)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_keys, start_pos, target) = generate_prompt(junks, args.num_keys)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
(answer, return_text) = test_model(pipe, prompt_text)
passed = str(answer).startswith(str(pass_keys[target]))
result = {'prompt_text': prompt_text, 'start_pos': start_pos, 'pass_keys': pass_keys, 'return_text': return_text, 'passed': passed}
success_count += passed
results.append(result)
results.append({'original_prompt': junks})
print(f'Iteration: {args.iterations}')
print(f'Successes: {success_count}')
if args.output_file:
with open(args.output_file, 'w') as f:
json.dump(results, f)
|
def main(args):
tokenizer = AutoTokenizer.from_pretrained(args.model, model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
model = load_model_and_apply_patches(args.model, args)
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id, temperature=args.temperature, repetition_penalty=args.repetition_penalty, top_k=args.top_k, penalty_alpha=args.penalty_alpha, do_sample=(args.temperature is not None))
while True:
if (args.input_file is None):
prompt_text = input('> ')
else:
input(f'Press enter to read {args.input_file} ')
prompt_text = open(args.input_file, encoding='utf=8').read()
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=args.max_new_tokens)[0]['generated_text'][len(prompt_text):]
print(f'< {response}')
|
def get_prompt(sample):
options = sample['options']
instruction = ZERO_SCROLLS_QUALITY_PROMPT.format(story=sample['article'], question=sample['question'], a=options[0], b=options[1], c=options[2], d=options[3])
return f'''{instruction}
Answer: ('''
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
dataset = load_dataset('emozilla/quality', split=args.split)
dataset = dataset.map((lambda sample: {'prompt': get_prompt(sample)}))
if args.max_tokens:
dataset = dataset.filter((lambda sample: (len(tokenizer(sample['prompt']).input_ids) <= args.max_tokens)))
choice_tokens = [x[0] for x in tokenizer(CHOICES, add_special_tokens=False).input_ids]
decoded_choice = tokenizer.decode(choice_tokens, clean_up_tokenization_spaces=True)
results = []
for model in models:
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
correct_answers = 0
i = 0
max = (len(dataset) if (args.limit is None) else args.limit)
bar = tqdm(total=max)
while (i < max):
sample = dataset[i]
tokenized_prompt = tokenizer(sample['prompt'], return_tensors='pt')
input_ids = tokenized_prompt.input_ids.to('cuda')
attention_mask = tokenized_prompt.attention_mask.to('cuda')
output = loaded.generate(input_ids, attention_mask=attention_mask, max_new_tokens=1, return_dict_in_generate=True, output_scores=True, pad_token_id=tokenizer.eos_token_id)
scores = output.scores[0][0]
choice_scores = [x.cpu() for x in [scores[choice_tokens[0]], scores[choice_tokens[1]], scores[choice_tokens[2]], scores[choice_tokens[3]]]]
selection = numpy.argmax([x.float().cpu() for x in choice_scores])
correct_answers += (1 if (selection == sample['answer']) else 0)
if args.print_choices:
print(f"Choice: {CHOICES[selection]} Correct: {CHOICES[sample['answer']]}")
i += 1
percent = ((correct_answers / i) * 100.0)
bar.desc = f'{model}: {percent:.1f}%'
bar.update()
percent = (correct_answers / max)
results.append(str(percent))
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write((','.join(models) + '\n'))
f.write((','.join(results) + '\n'))
|
def find_all_linear_names(model):
lora_module_names = set()
for (name, module) in model.named_modules():
if isinstance(module, torch.nn.Linear):
names = name.split('.')
lora_module_names.add((names[0] if (len(names) == 1) else names[(- 1)]))
if ('lm_head' in lora_module_names):
lora_module_names.remove('lm_head')
return list(lora_module_names)
|
def main(args):
if args.output_dir:
os.makedirs(args.output_dir, exist_ok=True)
if args.wandb:
import wandb
wandb.login()
set_seed(args.seed)
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1000000))
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulate_every, mixed_precision='bf16', log_with=('wandb' if args.wandb else None), kwargs_handlers=[timeout])
accelerator.init_trackers(project_name=(args.wandb if args.wandb else 'yarn'))
accelerator.print(f'Total GPUS: {accelerator.num_processes}')
if (args.architecture == 'llama'):
from scaled_rope.modeling_llama_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
config_cls = LlamaConfig
model_cls = LlamaForCausalLM
original_max_position_embeddings = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
elif (args.architecture == 'mistral'):
from scaled_rope.modeling_mistral_yarn import MistralForCausalLM
from scaled_rope.configuration_mistral import MistralConfig
config_cls = MistralConfig
model_cls = MistralForCausalLM
original_max_position_embeddings = (args.original_max_position_embeddings if args.original_max_position_embeddings else 8192)
config = config_cls.from_pretrained(args.model)
config.rope_scaling = {'type': args.scaling_type, 'factor': args.scaling_factor, 'original_max_position_embeddings': original_max_position_embeddings}
config.rope_theta = args.rope_theta
config.max_position_embeddings = (int((args.scaling_factor * original_max_position_embeddings)) if (not args.max_position_embeddings) else args.max_position_embeddings)
sliding_window_attention_schedule = ([int(x) for x in args.sliding_window_attention_schedule.split(',')] if args.sliding_window_attention_schedule else None)
if ((sliding_window_attention_schedule is not None) and (len(sliding_window_attention_schedule) == 1)):
config.sliding_window = sliding_window_attention_schedule[0]
accelerator.print(f'Sliding attention window set to {config.sliding_window}')
model = model_cls.from_pretrained(args.model, torch_dtype=torch.bfloat16, config=config, use_flash_attention_2=True)
try:
train_dataset = load_dataset(args.dataset)
except:
train_dataset = load_from_disk(args.dataset)
if isinstance(train_dataset, DatasetDict):
train_dataset = train_dataset['train']
if ('input_ids' not in train_dataset.column_names):
raise RuntimeError('Dataset must include an `input_ids` feature')
if ('labels' not in train_dataset.column_names):
def add_labels(sample):
sample['labels'] = copy.deepcopy(sample['input_ids'])
return sample
train_dataset = train_dataset.map(add_labels, desc='Adding labels', num_proc=args.num_proc)
if ('attention_mask' not in train_dataset.column_names):
def add_attention_mask(sample):
sample['attention_mask'] = torch.ones(len(sample['input_ids']), dtype=torch.int8)
return sample
train_dataset = train_dataset.map(add_attention_mask, desc='Adding attention mask', num_proc=args.num_proc)
if args.truncate:
def truncate(sample):
sample['input_ids'] = sample['input_ids'][0:args.truncate]
sample['labels'] = sample['labels'][0:args.truncate]
sample['attention_mask'] = sample['attention_mask'][0:args.truncate]
return sample
train_dataset = train_dataset.map(truncate, desc='Truncating', num_proc=args.num_proc)
train_loader = DataLoader(train_dataset, collate_fn=default_data_collator, shuffle=True, batch_size=args.batch_size)
if args.lora:
from peft import get_peft_model, LoraConfig, TaskType
target_modules = find_all_linear_names(model)
accelerator.print(f'LoRA target modules: {target_modules}')
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=16, lora_alpha=64, lora_dropout=0.05, target_modules=target_modules)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
if args.deepspeed:
optim = DummyOptim(model.parameters(), lr=args.learning_rate)
scheduler = DummyScheduler(optim, num_training_steps=args.max_train_steps, num_warmup_steps=args.warmup_steps)
(model, optim, train_loader, scheduler) = accelerator.prepare(model, optim, train_loader, scheduler)
else:
model = accelerator.prepare(model)
optim = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
if (args.lr_schedule == 'linear'):
scheduler = get_linear_schedule_with_warmup(optim, num_training_steps=args.max_train_steps, num_warmup_steps=args.warmup_steps)
elif (args.lr_schedule == 'constant'):
scheduler = get_constant_schedule_with_warmup(optim, num_warmup_steps=args.warmup_steps)
(optim, train_loader, scheduler) = accelerator.prepare(optim, train_loader, scheduler)
if (not args.lora):
model.gradient_checkpointing_enable()
accelerator.register_for_checkpointing(scheduler)
total_batch_size = ((args.batch_size * accelerator.num_processes) * args.gradient_accumulate_every)
accelerator.print(f'Max train steps: {args.max_train_steps}')
accelerator.print(f'Total batch size: {total_batch_size}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
if args.resume_from_checkpoint:
if ((args.resume_from_checkpoint is not None) or (args.resume_from_checkpoint != '')):
accelerator.print(f'Resuming from checkpoint {args.resume_from_checkpoint}')
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
training_difference = os.path.splitext(path)[0]
resume_step = int(training_difference.replace('step_', ''))
if (args.resume_from_checkpoint and (resume_step is not None)):
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
accelerator.print(f'Resuming training from step {resume_step}')
loss_file = (open(args.log_loss, ('a' if args.resume_from_checkpoint else 'w')) if (args.log_loss and accelerator.is_main_process) else None)
if (not args.save_only):
model.train()
for (step, batch) in enumerate(train_loader):
if (sliding_window_attention_schedule is not None):
model.config.sliding_window = sliding_window_attention_schedule[(completed_steps % len(sliding_window_attention_schedule))]
loss_log = None
with accelerator.accumulate(model):
loss = model(**batch).loss
accelerator.backward(loss)
if accelerator.sync_gradients:
loss_log = {'loss': loss.item()}
accelerator.log(loss_log, step=completed_steps)
if (loss_file is not None):
loss_file.write(f"{loss_log['loss']},")
loss_file.flush()
if isinstance(args.grad_norm, float):
accelerator.clip_grad_norm_(model.parameters(), args.grad_norm)
optim.step()
scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
if (loss_log is not None):
progress_bar.set_postfix(loss_log)
completed_steps += 1
if (isinstance(args.checkpointing_steps, int) and (completed_steps > 0)):
if ((completed_steps % args.checkpointing_steps) == 0):
output_dir = f'step_{completed_steps}'
if (args.output_dir is not None):
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if (completed_steps >= args.max_train_steps):
break
accelerator.print(f'Training Finished')
accelerator.end_training()
if (args.output_dir is not None):
accelerator.print(f'Saving model to {args.output_dir}')
accelerator.wait_for_everyone()
if args.deepspeed:
state_dict = accelerator.get_state_dict(model)
else:
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
state_dict = accelerator.get_state_dict(model, unwrap=False)
accelerator.unwrap_model(model).save_pretrained(f'{args.output_dir}', is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=state_dict)
accelerator.print(f'Saving Finished')
|
def main(args):
obj = json.load(open(args.file, 'r', encoding='utf-8'))
results = [result['acc'] for result in obj['results'].values()]
print(numpy.average(results))
|
def main(args):
data = pd.read_csv(args.csv)
(fig, ax) = plt.subplots(figsize=(10, 5))
x_data = [float(x) for x in data.columns[1:]]
for row in data.values:
label = row[0].replace('NousResearch/', '')
ax.plot(x_data, [float(x) for x in row[1:]], label=label)
ax.set_xlabel('Context Window')
ax.set_ylabel('Perplexity (lower is better)')
ax.set_xlim(args.xmin, args.xmax)
ax.set_ylim(args.ymin, args.ymax)
ax.legend(loc='upper right')
fig.savefig((args.csv + '.png'))
fig.savefig((args.csv + '.pdf'), transparent=True)
|
class LlamaConfig(PretrainedConfig):
'\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don\'t update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n\n Example:\n\n ```python\n >>> from transformers import LlamaModel, LlamaConfig\n\n >>> # Initializing a LLaMA llama-7b style configuration\n >>> configuration = LlamaConfig()\n\n >>> # Initializing a model from the llama-7b style configuration\n >>> model = LlamaModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```'
model_type = 'llama'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000, rope_scaling=None, attention_bias=False, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if (num_key_value_heads is None):
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.attention_bias = attention_bias
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _rope_scaling_validation(self):
'\n Validate the `rope_scaling` configuration.\n '
if (self.rope_scaling is None):
return
if (not isinstance(self.rope_scaling, dict)):
raise ValueError(f'`rope_scaling` must be a dictionary, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if ((rope_scaling_type is None) or (rope_scaling_type not in ['linear', 'dynamic', 'yarn', 'dynamic-yarn'])):
raise ValueError(f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'yarn', 'dynamic-yarn'], got {rope_scaling_type}")
if ((rope_scaling_factor is None) or (not isinstance(rope_scaling_factor, float)) or (rope_scaling_factor <= 1.0)):
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
if ((rope_scaling_type == 'yarn') or (rope_scaling_type == 'dynamic-yarn')):
original_max_position_embeddings = self.rope_scaling.get('original_max_position_embeddings', None)
if ((original_max_position_embeddings is None) or (not isinstance(original_max_position_embeddings, int))):
raise ValueError(f'`rope_scaling.original_max_position_embeddings` must be set to an int when using yarn, and dynamic-yarn')
|
class MistralConfig(PretrainedConfig):
'\n This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an\n Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.\n\n [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)\n [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`MistralModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 14336):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*, defaults to 8):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.\n hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to `4096*32`):\n The maximum sequence length that this model might ever be used with. Mistral\'s sliding window attention\n allows sequence of up to 4096*32 tokens.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n pad_token_id (`int`, *optional*):\n The id of the padding token.\n bos_token_id (`int`, *optional*, defaults to 1):\n The id of the "beginning-of-sequence" token.\n eos_token_id (`int`, *optional*, defaults to 2):\n The id of the "end-of-sequence" token.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether the model\'s input and output word embeddings should be tied.\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{"type": strategy name, "factor": scaling factor}`.\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n sliding_window (`int`, *optional*, defaults to 4096):\n Sliding window attention window size. If not specified, will default to `4096`.\n\n\n ```python\n >>> from transformers import MistralModel, MistralConfig\n\n >>> # Initializing a Mistral 7B style configuration\n >>> configuration = MistralConfig()\n\n >>> # Initializing a model from the Mistral 7B style configuration\n >>> model = MistralModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```'
model_type = 'mistral'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act='silu', max_position_embeddings=(4096 * 32), initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_scaling=None, rope_theta=10000.0, sliding_window=4096, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
if (num_key_value_heads is None):
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.rope_theta = rope_theta
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _rope_scaling_validation(self):
'\n Validate the `rope_scaling` configuration.\n '
if (self.rope_scaling is None):
return
if (not isinstance(self.rope_scaling, dict)):
raise ValueError(f'`rope_scaling` must be a dictionary, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if ((rope_scaling_type is None) or (rope_scaling_type not in ['linear', 'dynamic', 'yarn', 'dynamic-yarn'])):
raise ValueError(f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'yarn', 'dynamic-yarn'], got {rope_scaling_type}")
if ((rope_scaling_factor is None) or (not isinstance(rope_scaling_factor, float)) or (rope_scaling_factor <= 1.0)):
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
if ((rope_scaling_type == 'yarn') or (rope_scaling_type == 'dynamic-yarn')):
original_max_position_embeddings = self.rope_scaling.get('original_max_position_embeddings', None)
if ((original_max_position_embeddings is None) or (not isinstance(original_max_position_embeddings, int))):
raise ValueError(f'`rope_scaling.original_max_position_embeddings` must be set to an int when using yarn, and dynamic-yarn')
|
def patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk):
from .LlamaDynamicScaledRotaryEmbedding import LlamaDynamicScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicScaledRotaryEmbedding(each.self_attn.head_dim, device=each.self_attn.rotary_emb.inv_freq.device, ntk=ntk)
|
def patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, finetuned):
from .LlamaDynamicPartNTKScaledRotaryEmbedding import LlamaDynamicPartNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicPartNTKScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_dynamic_yarn_rotary_embeddings(model, original_max_position_embeddings, finetuned):
from .LlamaDynamicYaRNScaledRotaryEmbedding import LlamaDynamicYaRNScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicYaRNScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, original_max_position_embeddings=original_max_position_embeddings, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model):
from .FalconDynamicPartNTKScaledRotaryEmbedding import FalconDynamicPartNTKScaledRotaryEmbedding
for each in model.transformer.h:
each.self_attention.maybe_rotary = FalconDynamicPartNTKScaledRotaryEmbedding(each.self_attention.head_dim)
|
def patch_llama_for_ntk_scaled_rotary_embeddings(model, alpha):
from .LlamaNTKScaledRotaryEmbedding import LlamaNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaNTKScaledRotaryEmbedding(each.self_attn.head_dim, alpha=alpha, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_linear_scaled_rotary_embeddings(model, scale):
from .LlamaLinearScaledRotaryEmbedding import LlamaLinearScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaLinearScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_part_ntk_scaled_rotary_embeddings(model, scale):
from .LlamaPartNTKScaledRotaryEmbedding import LlamaPartNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaPartNTKScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_yarn_scaled_rotary_embeddings(model, scale, original_max_position_embeddings):
from .LlamaYaRNScaledRotaryEmbedding import LlamaYaRNScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaYaRNScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, original_max_position_embeddings=original_max_position_embeddings, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_scaled_rotary_embeddings(model):
from .GPTNeoXDynamicScaledRotaryEmbedding import GPTNeoXDynamicScaledRotaryEmbedding
for each in model.gpt_neox.layers:
each.attention.rotary_emb = GPTNeoXDynamicScaledRotaryEmbedding(each.attention.rotary_ndims, model.config.max_position_embeddings, device=each.attention.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_ntk_scaled_rotary_embeddings(model, alpha):
from .GPTNeoXNTKScaledRotaryEmbedding import GPTNeoXNTKScaledRotaryEmbedding
for each in model.gpt_neox.layers:
each.attention.rotary_emb = GPTNeoXNTKScaledRotaryEmbedding(each.attention.rotary_ndims, model.config.max_position_embeddings, alpha=alpha, device=each.attention.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_longer_sequences(model, max_positions):
for each in model.gpt_neox.layers:
each.attention.bias = torch.tril(torch.ones((max_positions, max_positions), dtype=each.attention.bias.dtype, device=each.attention.bias.device)).view(1, 1, max_positions, max_positions)
|
def patch_llama_for_rerope(model, training_length, window):
from .LlamaReRoPE import forward_with_rerope
for each in model.model.layers:
def forward(*args, **kwargs):
return forward_with_rerope(each.self_attn, *args, **kwargs)
each.self_attn.training_length = int(training_length)
each.self_attn.window = int(window)
|
def main(args):
if ((args.dataset is None) or (len(args.dataset[0]) == 0)):
raise RuntimeError('No datasets provided')
datasets = args.dataset[0]
splits = [(x.split(',')[1] if (len(x.split(',')) == 2) else '') for x in datasets]
datasets = [x.split(',')[0] for x in datasets]
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
if args.json:
dataset = load_dataset('json', data_files=datasets)[args.split]
if reduce((lambda x, y: (x or (len(y) > 0))), splits, False):
if (len(datasets) > 1):
raise RuntimeError('Can only use splitting on json datasets if there is exactly one input file')
dataset = dataset.train_test_split(train_size=float(splits[0]), seed=args.seed)['train']
else:
to_concatenate = []
for i in range(0, len(datasets)):
try:
loaded = load_from_disk(datasets[i])
except:
loaded = load_dataset([i])[args.split]
if (len(splits[i]) > 0):
loaded = loaded.train_test_split(train_size=float(splits[i]), seed=args.seed)['train']
to_concatenate.append(loaded)
dataset = concatenate_datasets(to_concatenate)
dataset = dataset.remove_columns([x for x in dataset.column_names if (x not in [args.feature])])
tokenized_dataset = dataset.map((lambda example: tokenizer([(t + tokenizer.eos_token) for t in example[args.feature]])), batched=True, num_proc=args.num_proc, remove_columns=[args.feature])
block_size = args.sequence_length
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if (total_length >= block_size):
total_length = ((total_length // block_size) * block_size)
result = {k: [t[i:(i + block_size)] for i in range(0, total_length, block_size)] for (k, t) in concatenated_examples.items()}
return result
train_dataset = tokenized_dataset.map(group_texts, batched=True, num_proc=args.num_proc)
if args.output:
train_dataset.save_to_disk(args.output)
if args.push_to_hub:
train_dataset.push_to_hub(args.push_to_hub, private=True)
|
def main(args):
dataset = load_dataset(args.dataset, split='train')
def truncate(sample):
sample['input_ids'] = sample['input_ids'][0:args.truncate]
sample['labels'] = sample['labels'][0:args.truncate]
sample['attention_mask'] = sample['attention_mask'][0:args.truncate]
return sample
dataset = dataset.map(truncate, desc='Truncating', num_proc=args.num_proc)
dataset.save_to_disk(args.output)
|
def get_gaussian_dataset(role, size, dim, std):
x = (std * torch.randn(size, dim))
y = torch.zeros(size).long()
return SupervisedDataset(f'gaussian-dim{dim}-std{std}', role, x, y)
|
def get_well_conditioned_gaussian_datasets(dim, std, oos_std):
train_dset = get_gaussian_dataset(role='train', size=50000, dim=dim, std=std)
valid_dset = get_gaussian_dataset(role='valid', size=5000, dim=dim, std=std)
test_dsets = [get_gaussian_dataset(role='test', size=10000, dim=dim, std=std), get_gaussian_dataset(role='test', size=10000, dim=dim, std=oos_std)]
return (train_dset, valid_dset, test_dsets)
|
def get_linear_gaussian_dataset(role, size):
A = torch.tensor([[(- 4.0)], [1.0]])
b = torch.tensor([1.0, (- 3.0)])
sigma = 0.1
z = torch.randn(size, A.shape[1], 1)
Az = torch.matmul(A, z).view(size, A.shape[0])
x = ((Az + b) + (sigma * torch.randn_like(Az)))
return SupervisedDataset(name='linear-gaussian', role=role, x=x)
|
def get_linear_gaussian_datasets():
train_dset = get_linear_gaussian_dataset(role='train', size=100000)
valid_dset = get_linear_gaussian_dataset(role='valid', size=10000)
test_dset = get_linear_gaussian_dataset(role='test', size=10000)
return (train_dset, valid_dset, test_dset)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.