code stringlengths 17 6.64M |
|---|
class LossNN(Module, abc.ABC):
'Loss-oriented neural network used as an algorithm based on designing loss.\n '
def __init__(self):
super(LossNN, self).__init__()
def forward(self, x):
return x
@abc.abstractmethod
def criterion(self, X, y):
pass
@abc.abstractmethod
def predict(self):
pass
|
def timing(func):
@wraps(func)
def wrapper(*args, **kwargs):
t = time.time()
result = func(*args, **kwargs)
print(((("'" + func.__name__) + "'") + ' took {} s'.format((time.time() - t))))
return result
return wrapper
|
class lazy_property():
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
val = self.func(instance)
setattr(instance, self.func.__name__, val)
return val
|
def softmax(x):
e_x = np.exp((x - np.max(x, axis=(- 1), keepdims=True)))
return (e_x / np.sum(e_x, axis=(- 1), keepdims=True))
|
def cross_entropy_loss(y_pred, y_label):
if (y_pred.size() == y_label.size()):
return torch.mean((- torch.sum((torch.log_softmax(y_pred, dim=(- 1)) * y_label), dim=(- 1))))
else:
return torch.nn.CrossEntropyLoss()(y_pred, y_label.long())
|
def grad(y, x, create_graph=True, keepdim=False):
'\n y: [N, Ny] or [Ny]\n x: [N, Nx] or [Nx]\n Return dy/dx ([N, Ny, Nx] or [Ny, Nx]).\n '
N = (y.size(0) if (len(y.size()) == 2) else 1)
Ny = y.size((- 1))
Nx = x.size((- 1))
z = torch.ones_like(y[(..., 0)])
dy = []
for i in range(Ny):
dy.append(torch.autograd.grad(y[(..., i)], x, grad_outputs=z, create_graph=create_graph)[0])
shape = np.array([N, Ny])[(2 - len(y.size())):]
shape = (list(shape) if keepdim else list(shape[(shape > 1)]))
return torch.cat(dy, dim=(- 1)).view((shape + [Nx]))
|
def input_fn(features, labels, shuffle=True, batch_size=64, repeat=False, seed=None):
'\n A function for converting data into training/evaluation tf.Dataset\n\n inputs:\n features: np.ndarray containing features.\n labels : np.ndarray containing labels for all examples.\n shuffle : bool indicates whether to shuffle the dataset.\n batch_size : int indicating the desired batch size.\n repeat: bool specifying whether to repeat dataset.\n seed: int seed used for shuffling dataset.\n outputs:\n ds : dataset ready for training/evaluation\n '
shuffle_buffer_size = 100
ds = tf_data.Dataset.from_tensor_slices((features, labels))
if shuffle:
ds = ds.shuffle(shuffle_buffer_size, seed=seed).batch(batch_size)
else:
ds = ds.batch(batch_size)
if repeat:
ds = ds.repeat()
return ds
|
def create_model(model_type='state_estimator', model_opt='best_noise_opt'):
"\n inputs:\n model_type: str specifying either 'state_estimator' or \n 'quality_control' type machine learning model.\n model_opt: str specifying dataset the model parameters were optimized \n on. Valid options for 'state_estimator' model_type: \n 'noiseless_opt' or 'best_noise_opt'. Valid options for \n 'quality_control' type: 'uniform_noise_dist_opt'.\n "
valid_model_types = ['state_estimator', 'quality_control']
if (model_type not in valid_model_types):
raise ValueError('model_type not recognized: ', model_type, ' Valid values: ', valid_model_types)
valid_model_opts = {'state_estimator': ['noiseless_opt', 'best_noise_opt'], 'quality_control': ['uniform_noise_dist_opt']}
if (model_opt not in valid_model_opts[model_type]):
raise ValueError('model_opt not recognized: ', model_opt, ' Valid values: ', valid_model_opts[model_type])
if ((model_type == 'state_estimator') and (model_opt == 'best_noise_opt')):
lr = 0.00121
k_size = [[7, 7], [7, 7]]
cnn_maxpool = False
cnn_stack = 2
n_cnn = 2
n_filters = [[22, 22], [35, 35]]
drop_rates = [[0.655, 0.655], [0.194, 0.194]]
layer_norm = False
ave_pool = True
activation = 'relu'
dense_n = 0
elif ((model_type == 'state_estimator') and (model_opt == 'noiseless_opt')):
lr = 0.00345
k_size = [[5], [5], [5]]
cnn_maxpool = False
cnn_stack = 1
n_cnn = 3
n_filters = [[23], [7], [18]]
drop_rates = [[0.12], [0.28], [0.3]]
layer_norm = True
ave_pool = True
activation = 'relu'
dense_n = 0
elif ((model_type == 'quality_control') and (model_opt == 'uniform_noise_dist_opt')):
lr = 0.000265
k_size = [[7, 3]]
cnn_maxpool = True
cnn_stack = 2
n_cnn = 1
n_filters = [[184, 249]]
drop_rates = [[0.05, 0.0]]
layer_norm = True
ave_pool = True
activation = 'swish'
dense_n = 1
dense_dropout = [0.6]
dense_units = [161]
if cnn_maxpool:
cnn_stride = 1
else:
cnn_stride = 2
inputs = tf_layers.Input(shape=(config.SUB_SIZE, config.SUB_SIZE, 1))
x = inputs
for i in range(n_cnn):
for j in range(cnn_stack):
if (j == (cnn_stack - 1)):
stride = cnn_stride
else:
stride = 1
x = tf_layers.Conv2D(filters=n_filters[i][j], kernel_size=k_size[i][j], padding='same', strides=stride)(x)
x = tf_layers.Dropout(rate=drop_rates[i][j])(x)
if layer_norm:
x = tf_layers.LayerNormalization()(x)
x = tf_layers.Activation(activation)(x)
if cnn_maxpool:
x = tf_layers.MaxPooling2D(pool_size=(2, 2), strides=2)(x)
if ave_pool:
x = tf_layers.GlobalAvgPool2D()(x)
x = tf_layers.Flatten()(x)
for i in range(dense_n):
x = tf_layers.Dense(units=dense_units[i], activation=activation)(x)
x = tf_layers.Dropout(rate=dense_dropout[i])(x)
if (model_type == 'state_estimator'):
outputs = tf_layers.Dense(units=config.NUM_STATES, activation='softmax')(x)
model = tf_Model(inputs, outputs, name=('device_state_estimator_' + model_opt))
model.compile(optimizer=tf_Adam(learning_rate=lr), loss='categorical_crossentropy', metrics=['accuracy'])
elif (model_type == 'quality_control'):
outputs = tf_layers.Dense(units=config.NUM_QUALITY_CLASSES, activation='softmax')(x)
model = tf_Model(inputs=inputs, outputs=outputs, name=('data_quality_control_' + model_opt))
model.compile(optimizer=tf_Adam(learning_rate=lr), loss='categorical_crossentropy', metrics=['accuracy'])
return model
|
def get_num_min_class(labels):
'\n Get the number of the minimum represented class in label vector.\n Used for resampling data.\n\n input:\n labels: np.ndarray of labels\n \n outputs:\n num_samples: int number of samples for minimum class\n '
argmax_labels = np.argmax(labels, axis=(- 1))
num_samples = labels.shape[0]
for i in range(labels.shape[(- 1)]):
lab_elems = np.sum((argmax_labels == i))
if (lab_elems < num_samples):
num_samples = lab_elems
return num_samples
|
def resample_data(features, state_labels, labels=None, seed=None):
'\n Resample data to be evenly distributed across classes in labels by cutting\n number of examples for each class to be equal to the number of examples\n in the least represented class. (classes assumed to be last axis of\n labels). Shuffles after resampling.\n\n inputs:\n features: ndarray of features to be resampled. Resample along first axis.\n state_labels: ndarray of labels to be used for resampling\n labels: ndarray of labels to be resampled.\n return_state: bool specifying whether to return state labels\n seed: Seed of random number generator for shuffling idxs during resample\n and for shuffling resampled features and labels.\n \n outputs:\n features: list of resampled features\n labels: list of resampled labels\n '
rng = np.random.default_rng(seed)
num_samples = get_num_min_class(state_labels)
features_resamp = []
state_labels_resamp = []
labels_resamp = []
for i in range(state_labels.shape[(- 1)]):
s_idxs = (state_labels.argmax(axis=(- 1)) == i)
features_s_full = features[s_idxs]
state_labels_s_full = state_labels[s_idxs]
if (labels is not None):
labels_s_full = labels[s_idxs]
idxs = list(range(features_s_full.shape[0]))
rng.shuffle(idxs)
features_resamp.append(features_s_full[idxs[:num_samples]])
state_labels_resamp.append(state_labels_s_full[idxs[:num_samples]])
if (labels is not None):
labels_resamp.append(labels_s_full[idxs[:num_samples]])
features_resamp_arr = np.concatenate(features_resamp, axis=0)
state_labels_resamp_arr = np.concatenate(state_labels_resamp, axis=0)
if (labels is not None):
labels_resamp_arr = np.concatenate(labels_resamp, axis=0)
idxs = list(range(features_resamp_arr.shape[0]))
rng.shuffle(idxs)
if (labels is not None):
return (features_resamp_arr[idxs], labels_resamp_arr[idxs])
elif (labels is None):
return (features_resamp_arr[idxs], state_labels_resamp_arr[idxs])
|
def noise_mag_to_class(state_labels, noise_mags, low_thresholds=None, high_thresholds=None):
'\n Function to convert noise magnitudes to noise classes.\n Noise class thresholds are defined here. Thresholds for states\n order is: no dot, left dot, central dot, right dot, double dot\n Default low thresholds is the linear extrapolation to 100 % accuracy\n of an average noisy-trained model vs. noise_mag. Default high\n thresholds are from linear extrapolation to 0 % accuracy of an\n average noisy trained model vs. noise_mag.\n \n inputs:\n state_labels: list of state labels. shape assumed to be\n (num_examples, num_states).\n noise_mags: list of float noise_mags for state_labels. shape assumed\n to be (num_examples, ).\n low_thresholds: list of floats of shape (num_state, ) specifying\n high signal to noise class thresholds. \n high_thresholds: list of floats of shape (num_state, ) specifying\n high signal to noise class thresholds. \n '
num_quality_classes = config.NUM_QUALITY_CLASSES
num_states = config.NUM_STATES
if (high_thresholds is None):
high_thresholds = [1.22, 1.0, 1.21, 0.68, 2.0]
if (low_thresholds is None):
low_thresholds = [0.31, 0.32, 0.41, 0.05, 0.47]
low_thresholds = np.array(low_thresholds)
high_thresholds = np.array(high_thresholds)
quality_classes = np.zeros((noise_mags.shape + (num_quality_classes,)))
num_states = state_labels.shape[(- 1)]
per_state_classes = np.zeros(((noise_mags.shape + (num_quality_classes,)) + (num_states,)))
for i in range(num_states):
per_state_classes[((noise_mags <= low_thresholds[i]), 0, i)] = 1
per_state_classes[(((noise_mags > low_thresholds[i]) & (noise_mags <= high_thresholds[i])), 1, i)] = 1
per_state_classes[((noise_mags > high_thresholds[i]), 2, i)] = 1
quality_classes = np.einsum('ijk,ik->ij', per_state_classes, state_labels)
return quality_classes
|
def get_data(f, train_test_split=0.9, dat_key='sensor', label_key='state', resample=True, seed=None, low_thresholds=None, high_thresholds=None):
"\n Reads in the subregion data and converts it to a format useful for training\n Note that the data is shuffled after reading in.\n\n inputs:\n f: one of: \n str path to .npz file containing cropped data\n dict of cropped data.\n train_test_split: float fraction of data to use for training.\n resample: bool specifying whether to resample data to get even state\n representation.\n seed: int random seed for file shuffling.\n label_key: string key for data used for the label. One of: \n 'data_quality', 'noise_mag_factor', 'state'.\n low_threshold: list of noise levels to use for high/moderate signal\n to noise ratio threshold.\n high_threshold: list of noise levels to use for moderate/low signal\n to noise ratio threshold.\n\n outputs:\n train_data: np.ndarray of training data.\n train_labels: np.ndarray of training labels.\n eval_data: np.ndarray of training data.\n eval_labels: np.ndarray of training labels.\n "
try:
dict_of_dicts = np.load(f, allow_pickle=True)
file_on_disk = True
except TypeError:
dict_of_dicts = f
file_on_disk = False
files = list(dict_of_dicts.keys())
random.Random(seed).shuffle(files)
inp = []
oup_state = []
if (label_key != 'state'):
oup_labels = []
else:
oup_labels = None
train_labels = None
eval_labels = None
if (label_key == 'data_quality'):
data_quality = True
label_key = 'noise_mag_factor'
else:
data_quality = False
for file in files:
if file_on_disk:
data_dict = dict_of_dicts[file].item()
else:
data_dict = dict_of_dicts[file]
dat = data_dict[dat_key]
inp.append(dat.reshape(config.SUB_SIZE, config.SUB_SIZE, 1))
oup_state.append(data_dict['state'])
if (oup_labels is not None):
oup_labels.append(data_dict[label_key])
inp = np.array(inp)
oup_state = np.array(oup_state)
if (oup_labels is not None):
oup_labels = np.array(oup_labels)
n_samples = inp.shape[0]
print('Total number of samples :', n_samples)
n_train = int((train_test_split * n_samples))
train_data = inp[:n_train]
print('Training data info:', train_data.shape)
train_states = oup_state[:n_train]
if (oup_labels is not None):
train_labels = oup_labels[:n_train]
eval_data = inp[n_train:]
print('Evaluation data info:', eval_data.shape)
eval_states = oup_state[n_train:]
if (oup_labels is not None):
eval_labels = oup_labels[n_train:]
if data_quality:
train_labels = noise_mag_to_class(train_states, train_labels, low_thresholds=low_thresholds, high_thresholds=high_thresholds)
eval_labels = noise_mag_to_class(eval_states, eval_labels, low_thresholds=low_thresholds, high_thresholds=high_thresholds)
if resample:
(train_data, train_labels) = resample_data(train_data, train_states, train_labels)
(eval_data, eval_labels) = resample_data(eval_data, eval_states, eval_labels)
elif ((not resample) and (label_key == 'state')):
train_labels = train_states
eval_labels = eval_states
if ((oup_labels is not None) and (len(train_labels.shape) == 1)):
np.expand_dims(train_labels, 1)
if ((oup_labels is not None) and (len(eval_labels.shape) == 1)):
np.expand_dims(eval_labels, 1)
return (train_data, train_labels, eval_data, eval_labels)
|
def gradient(x):
'\n Take gradient of an ndarray in specified direction. Thin wrapper around\n np.gradient(). Also note that x -> axis=1 and y-> axis=0\n \n input:\n x: An numpy ndarray to take the gradient of \n output:\n numpy ndarray containing gradient in x direction.\n '
return np.gradient(x, axis=1)
|
def apply_threshold(x, threshold_val=10, threshold_to=0):
'\n Thresholds an numpy ndarray to remove\n Args:\n x = numpy array with data to be filtered\n threshold_val = percentile below which to set values to zero\n '
x[(x < np.abs(np.percentile(x.flatten(), threshold_val)))] = threshold_to
return x
|
def apply_clipping(x, clip_val=3, clip_to='clip_val'):
'\n Clip input symmetrically at clip_val number of std devs.\n Do not zscore norm x, but apply thresholds using normed x\n '
x_clipped = np.copy(x)
mean = np.mean(x)
std = np.std(x)
norm_x = ((x - mean) / std)
if (clip_to.lower() == 'clip_val'):
x_clipped[(norm_x < (- clip_val))] = (((- clip_val) * std) + mean)
x_clipped[(norm_x > clip_val)] = ((clip_val * std) + mean)
elif (clip_to.lower() == 'mean'):
x_clipped[(norm_x < (- clip_val))] = mean
x_clipped[(norm_x > clip_val)] = mean
else:
raise KeyError((('"clip_to" option not valid: ' + str(clip_to)) + 'Valid options: clip_val, mean'))
return x_clipped
|
def autoflip_skew(data):
'\n Autoflip a numpy ndarray based on the skew of the values \n (effective for gradient data).\n '
skew_sign = np.sign(scipy_skew(np.ravel(data)))
return (data * skew_sign)
|
def zscore_norm(x):
'\n Takes a numpy ndarray and returns a z-score normalized version\n '
return ((x - x.mean()) / x.std())
|
class Preprocessor():
def __init__(self, autoflip=False, denoising=[], clip_val=None, thresh_val=None):
"\n Class for doing preprocessing of data.\n\n inputs:\n autoflip: bool specifying whether to autoflip data.\n denoising: list of str specifying denoising to apply to data.\n clip_val: value for clipping denoising. Unused if 'clip' not in\n denoising.\n thresh_val\n "
self.autoflip = autoflip
valid_denoising = ['threshold', 'clip']
if (not set(denoising).issubset(valid_denoising)):
raise ValueError('invalid denoising ', denoising, ' Valid values:', valid_denoising)
self.denoising = denoising
self.clip_val = clip_val
self.thresh_val = thresh_val
def proc_subimage(self, x):
'\n Takes the gradient of the measured data, applies denoising if specified,\n normalizes, autoflips if specified,\n and then adjusts the size (if necessary)\n Args:\n x = an array with data\n '
x = gradient(x)
if ('threshold' in self.denoising):
if (self.threshold_val is not None):
grad_x = apply_threshold(x, self.threshold_val)
else:
grad_x = apply_threshold(x)
if ('clip' in self.denoising):
if (self.clip_val is not None):
grad_x = apply_clipping(grad_x, self.clip_val)
else:
grad_x = apply_clipping(grad_x)
x = zscore_norm(x)
if self.autoflip:
x = autoflip_skew(x)
target_shape = (config.SUB_SIZE, config.SUB_SIZE, 1)
if (x.shape != target_shape):
x = skimage_resize(x, target_shape)
return x
def proc_subimage_set(self, x_arr):
'\n Loop through subimages and apply preprocessing to each one.\n\n inputs:\n x: full dataset of images. First axis assumed to be example index.\n returns:\n Full dataset of images with same shape, processed.\n '
return np.array([self.proc_subimage(x) for x in x_arr])
|
def cnn_model_fn(features, labels, mode):
'Model function for CNN.'
input_layer = tf.cast(tf.reshape(features['x'], [(- 1), qf.SUB_SIZE, qf.SUB_SIZE, 1]), tf.float32)
conv1 = tf.layers.conv2d(inputs=input_layer, filters=16, kernel_size=[5, 5], padding='same', activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
conv2 = tf.layers.conv2d(inputs=pool1, filters=32, kernel_size=[5, 5], padding='same', activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2_flat = tf.contrib.layers.flatten(pool2)
dense0 = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout0 = tf.layers.dropout(inputs=dense0, rate=0.5, training=(mode == tf.estimator.ModeKeys.TRAIN))
dense1 = tf.layers.dense(inputs=dropout0, units=512, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=dense1, rate=0.5, training=(mode == tf.estimator.ModeKeys.TRAIN))
dense2 = tf.layers.dense(inputs=dropout1, units=256, activation=tf.nn.relu)
dropout2 = tf.layers.dropout(inputs=dense2, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN))
logits = tf.layers.dense(inputs=dropout2, units=3)
predictions = {'state': tf.argmax(input=logits, axis=1), 'probabilities': tf.cast(tf.nn.softmax(logits, name='softmax_tensor'), tf.float64)}
if (mode == tf.estimator.ModeKeys.PREDICT):
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.mean_squared_error(labels=labels, predictions=tf.nn.softmax(logits))
if (mode == tf.estimator.ModeKeys.TRAIN):
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels=tf.argmax(labels, axis=1), predictions=predictions['state'])}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
|
class CosEMA(nn.Module):
def __init__(self, total_steps, base_decay=0.996):
'Exponential moving average used in BYOL.\n\n :param base_decay: the base ema decay used to modulate\n :returns: EMA module\n :rtype: nn.Module\n\n '
super(CosEMA, self).__init__()
self.step = 0
self.total_steps = total_steps
self.base_decay = base_decay
self.register_buffer('mean', None)
def forward(self, x):
'Takes an input and updates internal running mean.\n\n :param x: input tensor\n :returns: same input tensor itself [tracks internally]\n :rtype: torch.Tensor\n\n '
if (self.mean is None):
self.mean = torch.zeros_like(x)
if self.training:
decay = (1 - (((1 - self.base_decay) * (np.cos(((np.pi * self.step) / self.total_steps)) + 1)) / 2.0))
self.mean = (((1 - decay) * x.detach()) + (decay * self.mean))
self.step += 1
return x
|
class BYOL(nn.Module):
'Simple BYOL implementation.'
def __init__(self, base_network_output_size, projection_output_size, classifier_output_size, total_training_steps, base_decay=0.996):
'BYOL model.\n\n :param base_network_output_size: output-size of resnet50 embedding\n :param projection_output_size: output size of projection and prediction heads\n :param classifier_output_size: number of classes in classifier problem\n :param total_training_steps: total steps for a single training epoch\n :param base_decay: the decay for the target network\n :returns: BYOL object\n :rtype: nn.Module\n\n '
super(BYOL, self).__init__()
self.base_network_output_size = base_network_output_size
model_fn = models.__dict__[args.arch]
self.base_network = nn.Sequential(*list(model_fn(pretrained=False).children())[:(- 1)])
self.head = nn.Sequential(nn.Linear(base_network_output_size, args.head_latent_size), nn.BatchNorm1d(args.head_latent_size), nn.ReLU(), nn.Linear(args.head_latent_size, projection_output_size))
self.predictor = nn.Sequential(nn.Linear(projection_output_size, args.head_latent_size), nn.BatchNorm1d(args.head_latent_size), nn.ReLU(), nn.Linear(args.head_latent_size, projection_output_size))
self.linear_classifier = nn.Linear(base_network_output_size, classifier_output_size)
self.target_network = CosEMA(total_training_steps, base_decay)
self.target_network(nn.utils.parameters_to_vector(self.parameters()))
def target_prediction(self, augmentation2):
'Produce a prediction using the target network.\n\n :param augmentation2: the second augmentation\n :returns: the same outputs as prediction\n :rtype: torch.Tensor, torch.Tensor, torch.Tensor\n\n '
mean = self.target_network.mean
original_params = nn.utils.parameters_to_vector(self.parameters())
nn.utils.vector_to_parameters(mean, self.parameters())
preds = self.prediction(augmentation2)
nn.utils.vector_to_parameters(original_params, self.parameters())
return preds
def prediction(self, augmentation):
'Simple helper to project a single augmentation\n\n :param augmentation: a single data augmentation\n :returns: representation, projection and prediction\n :rtype: torch.Tensor, torch.Tensor, torch.Tensor\n\n '
representation = self.base_network(augmentation).view((- 1), self.base_network_output_size)
projection = self.head(representation)
prediction = self.predictor(projection)
return (representation, projection, prediction)
def forward(self, augmentation1, augmentation2):
'Returns the online and target network representations, projections and predictions.'
(online_representation1, online_projection1, online_prediction1) = self.prediction(augmentation1)
(online_representation2, online_projection2, online_prediction2) = self.prediction(augmentation2)
(target_representation1, target_projection1, target_prediction1) = self.target_prediction(augmentation1)
(target_representation2, target_projection2, target_prediction2) = self.target_prediction(augmentation2)
repr_to_classifier = (torch.cat([online_representation1, online_representation2], 0) if self.training else online_representation1)
linear_preds = self.linear_classifier(repr_to_classifier.clone().detach())
self.target_network(nn.utils.parameters_to_vector(self.parameters()))
return {'linear_preds': linear_preds, 'online_representation1': online_representation1, 'online_projection1': online_projection1, 'online_prediction1': online_prediction1, 'online_representation2': online_representation2, 'online_projection2': online_projection2, 'online_prediction2': online_prediction2, 'target_representation1': target_representation1, 'target_projection1': target_projection1, 'target_prediction1': target_prediction1, 'target_representation2': target_representation2, 'target_projection2': target_projection2, 'target_prediction2': target_prediction2}
|
def build_lr_schedule(optimizer, last_epoch=(- 1)):
' adds a lr scheduler to the optimizer.\n\n :param optimizer: nn.Optimizer\n :returns: scheduler\n :rtype: optim.lr_scheduler\n\n '
if (args.lr_update_schedule == 'fixed'):
sched = optim.lr_scheduler.LambdaLR(optimizer, (lambda epoch: 1.0), last_epoch=last_epoch)
elif (args.lr_update_schedule == 'cosine'):
total_epochs = (args.epochs - args.warmup)
sched = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epochs, last_epoch=last_epoch)
else:
raise NotImplementedError('lr scheduler {} not implemented'.format(args.lr_update_schedule))
if (args.warmup > 0):
warmup = scheduler.LinearWarmup(optimizer, warmup_steps=args.warmup, last_epoch=last_epoch)
sched = scheduler.Scheduler(sched, warmup)
return sched
|
def build_optimizer(model, last_epoch=(- 1)):
' helper to build the optimizer and wrap model\n\n :param model: the model to wrap\n :returns: optimizer wrapping model provided\n :rtype: nn.Optim\n\n '
optim_map = {'rmsprop': optim.RMSprop, 'adam': optim.Adam, 'adadelta': optim.Adadelta, 'sgd': optim.SGD, 'momentum': functools.partial(optim.SGD, momentum=0.9), 'lbfgs': optim.LBFGS}
params_to_optimize = layers.add_weight_decay(model, args.weight_decay)
full_opt_name = args.optimizer.lower().strip()
is_lars = ('lars' in full_opt_name)
if (full_opt_name == 'lamb'):
assert args.half, 'Need fp16 precision to use Apex FusedLAMB.'
optim_map['lamb'] = optimizers.fused_lamb.FusedLAMB
opt_name = (full_opt_name.split('_')[(- 1)] if is_lars else full_opt_name)
print('using {} optimizer {} lars.'.format(opt_name, ('with' if is_lars else 'without')))
lr = args.lr
if (opt_name in ['momentum', 'sgd']):
lr = (args.lr * ((args.batch_size * args.num_replicas) / 256))
opt = optim_map[opt_name](params_to_optimize, lr=lr)
if is_lars:
opt = LARS(opt, eps=0.0)
sched = build_lr_schedule(opt, last_epoch=last_epoch)
return (opt, sched)
|
def build_train_and_test_transforms():
'Returns torchvision OR nvidia-dali transforms.\n\n :returns: train_transforms, test_transforms\n :rtype: list, list\n\n '
resize_shape = (args.image_size_override, args.image_size_override)
if ('dali' in args.task):
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from datasets.dali_imagefolder import ColorJitter, RandomHorizontalFlip, RandomGrayScale
train_transform = [ops.RandomResizedCrop(device=('gpu' if args.cuda else 'cpu'), size=resize_shape, random_area=(0.08, 1.0), random_aspect_ratio=((3.0 / 4), (4.0 / 3))), RandomHorizontalFlip(prob=0.2, cuda=args.cuda), ColorJitter(brightness=(0.8 * args.color_jitter_strength), contrast=(0.8 * args.color_jitter_strength), saturation=(0.2 * args.color_jitter_strength), hue=(0.2 * args.color_jitter_strength), prob=0.8, cuda=args.cuda), RandomGrayScale(prob=0.2, cuda=args.cuda)]
test_transform = [ops.Resize(resize_x=resize_shape[0], resize_y=resize_shape[1], device=('gpu' if args.cuda else 'cpu'), image_type=types.RGB, interp_type=types.INTERP_LINEAR)]
else:
from datasets.utils import GaussianBlur
train_transform = [transforms.RandomResizedCrop((args.image_size_override, args.image_size_override)), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply([transforms.ColorJitter(brightness=(0.8 * args.color_jitter_strength), contrast=(0.8 * args.color_jitter_strength), saturation=(0.8 * args.color_jitter_strength), hue=(0.2 * args.color_jitter_strength))], p=0.8), transforms.RandomGrayscale(p=0.2), GaussianBlur(kernel_size=int((0.1 * args.image_size_override)), p=0.5)]
test_transform = [transforms.Resize(resize_shape)]
return (train_transform, test_transform)
|
def build_loader_model_grapher(args):
'builds a model, a dataloader and a grapher\n\n :param args: argparse\n :param transform: the dataloader transform\n :returns: a dataloader, a grapher and a model\n :rtype: list\n\n '
(train_transform, test_transform) = build_train_and_test_transforms()
loader_dict = {'train_transform': train_transform, 'test_transform': test_transform, **vars(args)}
loader = get_loader(**loader_dict)
args.input_shape = loader.input_shape
args.num_train_samples = (loader.num_train_samples // args.num_replicas)
args.num_test_samples = loader.num_test_samples
args.num_valid_samples = (loader.num_valid_samples // args.num_replicas)
args.steps_per_train_epoch = (args.num_train_samples // args.batch_size)
args.total_train_steps = (args.epochs * args.steps_per_train_epoch)
network = BYOL(base_network_output_size=args.representation_size, projection_output_size=args.projection_size, classifier_output_size=loader.output_size, total_training_steps=args.total_train_steps, base_decay=args.base_decay)
network = (nn.SyncBatchNorm.convert_sync_batchnorm(network) if args.convert_to_sync_bn else network)
network = (network.cuda() if args.cuda else network)
lazy_generate_modules(network, loader.train_loader)
network = layers.init_weights(network, init=args.weight_initialization)
if (args.num_replicas > 1):
print('wrapping model with DDP...')
network = layers.DistributedDataParallelPassthrough(network, device_ids=[0], output_device=0, find_unused_parameters=True)
print(network)
print('model has {} million parameters.'.format((utils.number_of_parameters(network) / 1000000.0)))
grapher = None
if ((args.visdom_url is not None) and (args.distributed_rank == 0)):
grapher = Grapher('visdom', env=utils.get_name(args), server=args.visdom_url, port=args.visdom_port, log_folder=args.log_dir)
elif (args.distributed_rank == 0):
grapher = Grapher('tensorboard', logdir=os.path.join(args.log_dir, utils.get_name(args)))
return (loader, network, grapher)
|
def lazy_generate_modules(model, loader):
' A helper to build the modules that are lazily compiled\n\n :param model: the nn.Module\n :param loader: the dataloader\n :returns: None\n :rtype: None\n\n '
model.eval()
for (augmentation1, augmentation2, labels) in loader:
with torch.no_grad():
print('augmentation1 = {} / {} | augmentation2 = {} / {} | labels = {} / {}'.format(augmentation1.shape, augmentation1.dtype, augmentation2.shape, augmentation2.dtype, labels.shape, labels.dtype))
(aug1_min, aug1_max) = (augmentation1.min(), augmentation1.max())
(aug2_min, aug2_max) = (augmentation2.min(), augmentation2.max())
print('aug1 in range [min: {}, max: {}] | aug2 in range [min: {}, max: {}]'.format(aug1_min, aug1_max, aug2_min, aug2_max))
if ((aug1_max > 1.0) or (aug1_min < 0)):
raise ValueError('aug1 max > 1.0 or aug1 min < 0. You probably dont want this.')
if ((aug2_max > 1.0) or (aug2_min < 0)):
raise ValueError('aug2 max > 1.0 or aug2 min < 0. You probably dont want this.')
augmentation1 = (augmentation1.cuda(non_blocking=True) if args.cuda else augmentation1)
augmentation2 = (augmentation2.cuda(non_blocking=True) if args.cuda else augmentation2)
_ = model(augmentation1, augmentation2)
break
if (args.polyak_ema > 0):
layers.polyak_ema_parameters(model, args.polyak_ema)
|
def register_plots(loss, grapher, epoch, prefix='train'):
" Registers line plots with grapher.\n\n :param loss: the dict containing '*_mean' or '*_scalar' values\n :param grapher: the grapher object\n :param epoch: the current epoch\n :param prefix: prefix to append to the plot\n :returns: None\n :rtype: None\n\n "
if ((args.distributed_rank == 0) and (grapher is not None)):
for (k, v) in loss.items():
if isinstance(v, dict):
register_plots(loss[k], grapher, epoch, prefix=prefix)
if (('mean' in k) or ('scalar' in k)):
key_name = '-'.join(k.split('_')[0:(- 1)])
value = (v.item() if (not isinstance(v, (float, np.float32, np.float64))) else v)
grapher.add_scalar('{}_{}'.format(prefix, key_name), value, epoch)
|
def register_images(output_map, grapher, prefix='train'):
" Registers image with grapher. Overwrites the existing image due to space.\n\n :param output_map: the dict containing '*_img' of '*_imgs' as keys\n :param grapher: the grapher object\n :param prefix: prefix to attach to images\n :returns: None\n :rtype: None\n\n "
if ((args.distributed_rank == 0) and (grapher is not None)):
for (k, v) in output_map.items():
if isinstance(v, dict):
register_images(output_map[k], grapher, prefix=prefix)
if (('img' in k) or ('imgs' in k)):
key_name = '-'.join(k.split('_')[0:(- 1)])
img = torchvision.utils.make_grid(v, normalize=True, scale_each=True)
grapher.add_image('{}_{}'.format(prefix, key_name), img.detach(), global_step=0)
|
def _extract_sum_scalars(v1, v2):
'Simple helper to sum values in a struct using dm_tree.'
def chk(c):
'Helper to check if we have a primitive or tensor'
return (not isinstance(c, (int, float, np.int32, np.int64, np.float32, np.float64)))
v1_detached = (v1.detach() if chk(v1) else v1)
v2_detached = (v2.detach() if chk(v2) else v2)
return (v1_detached + v2_detached)
|
def execute_graph(epoch, model, loader, grapher, optimizer=None, prefix='test'):
" execute the graph; wphen 'train' is in the name the model runs the optimizer\n\n :param epoch: the current epoch number\n :param model: the torch model\n :param loader: the train or **TEST** loader\n :param grapher: the graph writing helper (eg: visdom / tf wrapper)\n :param optimizer: the optimizer\n :param prefix: 'train', 'test' or 'valid'\n :returns: dictionary with scalars\n :rtype: dict\n\n "
start_time = time.time()
is_eval = ('train' not in prefix)
(model.eval() if is_eval else model.train())
assert ((optimizer is None) if is_eval else (optimizer is not None))
(loss_map, num_samples) = ({}, 0)
for (num_minibatches, (augmentation1, augmentation2, labels)) in enumerate(loader):
augmentation1 = (augmentation1.cuda(non_blocking=True) if args.cuda else augmentation1)
augmentation2 = (augmentation2.cuda(non_blocking=True) if args.cuda else augmentation2)
labels = (labels.cuda(non_blocking=True) if args.cuda else labels)
with (torch.no_grad() if is_eval else utils.dummy_context()):
if (is_eval and (args.polyak_ema > 0)):
output_dict = layers.get_polyak_prediction(model, pred_fn=functools.partial(model, augmentation1, augmentation2))
else:
output_dict = model(augmentation1, augmentation2)
byol_loss = loss_function(online_prediction1=output_dict['online_prediction1'], online_prediction2=output_dict['online_prediction2'], target_projection1=output_dict['target_projection1'], target_projection2=output_dict['target_projection2'])
classifier_labels = (labels if is_eval else torch.cat([labels, labels], 0))
classifier_loss = F.cross_entropy(input=output_dict['linear_preds'], target=classifier_labels)
(acc1, acc5) = metrics.topk(output=output_dict['linear_preds'], target=classifier_labels, topk=(1, 5))
loss_t = {'loss_mean': (byol_loss + classifier_loss), 'byol_loss_mean': byol_loss, 'linear_loss_mean': classifier_loss, 'top1_mean': acc1, 'top5_mean': acc5}
loss_map = (loss_t if (not loss_map) else tree.map_structure(_extract_sum_scalars, loss_map, loss_t))
num_samples += augmentation1.size(0)
if (not is_eval):
optimizer.zero_grad()
if args.half:
with amp.scale_loss(loss_t['loss_mean'], optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss_t['loss_mean'].backward()
if (args.clip > 0):
nn.utils.clip_grad_value_(model.parameters(), args.clip)
optimizer.step()
if (args.polyak_ema > 0):
layers.polyak_ema_parameters(model, args.polyak_ema)
del loss_t
if args.debug_step:
break
loss_map = tree.map_structure((lambda v: (v / (num_minibatches + 1))), loss_map)
to_log = '{}-{}[Epoch {}][{} samples][{:.2f} sec]:\t Loss: {:.4f}\tTop-1: {:.4f}\tTop-5: {:.4f}'
print(to_log.format(prefix, args.distributed_rank, epoch, num_samples, (time.time() - start_time), loss_map['loss_mean'].item(), loss_map['top1_mean'].item(), loss_map['top5_mean'].item()))
register_plots({**loss_map}, grapher, epoch=epoch, prefix=prefix)
num_images_to_post = min(64, augmentation1.shape[0])
image_size_to_post = min(64, augmentation1.shape[(- 1)])
image_map = {'augmentation1_imgs': F.interpolate(augmentation1[0:num_images_to_post], size=(image_size_to_post, image_size_to_post)), 'augmentation2_imgs': F.interpolate(augmentation2[0:num_images_to_post], size=(image_size_to_post, image_size_to_post))}
register_images({**image_map}, grapher, prefix=prefix)
if (grapher is not None):
grapher.save()
loss_val = loss_map['loss_mean'].detach().item()
loss_map.clear()
return loss_val
|
def train(epoch, model, optimizer, train_loader, grapher, prefix='train'):
' Helper to run execute-graph for the train dataset\n\n :param epoch: the current epoch\n :param model: the model\n :param test_loader: the train data-loader\n :param grapher: the grapher object\n :param prefix: the default prefix; useful if we have multiple training types\n :returns: mean ELBO scalar\n :rtype: float32\n\n '
return execute_graph(epoch, model, train_loader, grapher, optimizer, prefix='train')
|
def test(epoch, model, test_loader, grapher, prefix='test'):
' Helper to run execute-graph for the test dataset\n\n :param epoch: the current epoch\n :param model: the model\n :param test_loader: the test data-loaderpp\n :param grapher: the grapher object\n :param prefix: the default prefix; useful if we have multiple test types\n :returns: mean ELBO scalar\n :rtype: float32\n\n '
return execute_graph(epoch, model, test_loader, grapher, prefix='test')
|
def init_multiprocessing_and_cuda(rank, args_from_spawn):
'Sets the appropriate flags for multi-process jobs.'
if args_from_spawn.multi_gpu_distributed:
os.environ['CUDA_VISIBLE_DEVICES'] = str(rank)
args_from_spawn.distributed_rank = rank
args_from_spawn.cuda = ((not args_from_spawn.no_cuda) and torch.cuda.is_available())
if args_from_spawn.cuda:
torch.backends.cudnn.benchmark = True
print('Replica {} / {} using GPU: {}'.format((rank + 1), args_from_spawn.num_replicas, torch.cuda.get_device_name(0)))
if (args_from_spawn.seed is not None):
print(('setting seed %d' % args_from_spawn.seed))
np.random.seed(args_from_spawn.seed)
torch.manual_seed(args_from_spawn.seed)
if args_from_spawn.cuda:
torch.cuda.manual_seed_all(args_from_spawn.seed)
if (args_from_spawn.num_replicas > 1):
torch.distributed.init_process_group(backend='nccl', init_method=os.environ['MASTER_ADDR'], world_size=args_from_spawn.num_replicas, rank=rank)
print('Successfully created DDP process group!')
args_from_spawn.batch_size = (args_from_spawn.batch_size // args_from_spawn.num_replicas)
global args
args = args_from_spawn
|
def run(rank, args):
' Main entry-point into the program\n\n :param rank: current device rank\n :param args: argparse\n :returns: None\n :rtype: None\n\n '
init_multiprocessing_and_cuda(rank, args)
(loader, model, grapher) = build_loader_model_grapher(args)
print(pprint.PrettyPrinter(indent=4).pformat(vars(args)))
(optimizer, scheduler) = build_optimizer(model)
if args.half:
(model, optimizer) = amp.initialize(model, optimizer, opt_level='O2')
model = layers.append_save_and_load_fns(model, optimizer, scheduler, grapher, args)
saver = layers.ModelSaver(model, early_stop=args.early_stop, rank=args.distributed_rank, burn_in_interval=int((0.1 * args.epochs)), larger_is_better=False, max_early_stop_steps=10)
restore_dict = saver.restore()
init_epoch = restore_dict['epoch']
for epoch in range(init_epoch, (args.epochs + 1)):
train(epoch, model, optimizer, loader.train_loader, grapher)
test_loss = test(epoch, model, loader.test_loader, grapher)
loader.set_all_epochs(epoch)
scheduler.step()
register_plots({'learning_rate_scalar': optimizer.param_groups[0]['lr']}, grapher, epoch)
if saver(test_loss):
saver.restore()
test_loss = test(epoch, model, loader.test_loader, grapher)
break
if ((epoch == 2) and (args.distributed_rank == 0)):
config_to_post = vars(args)
slurm_id = utils.get_slurm_id()
if (slurm_id is not None):
config_to_post['slurm_job_id'] = slurm_id
grapher.add_text('config', pprint.PrettyPrinter(indent=4).pformat(config_to_post), 0)
if (grapher is not None):
grapher.close()
|
def regression_loss(x, y):
'Pulled directly from BYOL'
(norm_x, norm_y) = (x.norm(), y.norm())
return (((- 2) * torch.sum((x * y), dim=(- 1))) / (norm_x * norm_y))
|
def loss_function(online_prediction1, online_prediction2, target_projection1, target_projection2):
'BYOL loss.\n\n :param online_prediction1: the output of the final MLP of the online model for augmentation 1\n :param online_prediction2: the output of the final MLP of the online model for augmentation 2\n :param target_projection1: the output of the second-to-last MLP of the target model for augmentation 1\n :param target_projection2: the output of the second-to-last MLP of the target model for augmentation 1\n :returns: scalar loss\n :rtype: float32\n\n '
loss_ab = regression_loss(online_prediction1, target_projection2.detach())
loss_ba = regression_loss(online_prediction2, target_projection1.detach())
return torch.mean((loss_ab + loss_ba))
|
class LARS(Optimizer):
"Implements 'LARS (Layer-wise Adaptive Rate Scaling)'__ as Optimizer a\n :class:`~torch.optim.Optimizer` wrapper.\n\n __ : https://arxiv.org/abs/1708.03888\n\n Wraps an arbitrary optimizer like :class:`torch.optim.SGD` to use LARS. If\n you want to the same performance obtained with small-batch training when\n you use large-batch training, LARS will be helpful::\n\n Args:\n optimizer (Optimizer):\n optimizer to wrap\n eps (float, optional):\n epsilon to help with numerical stability while calculating the\n adaptive learning rate\n trust_coef (float, optional):\n trust coefficient for calculating the adaptive learning rate\n\n Example::\n base_optimizer = optim.SGD(model.parameters(), lr=0.1)\n optimizer = LARS(optimizer=base_optimizer)\n\n output = model(input)\n loss = loss_fn(output, target)\n loss.backward()\n\n optimizer.step()\n\n "
def __init__(self, optimizer, eps=1e-08, trust_coef=0.001):
if (eps < 0.0):
raise ValueError(('invalid epsilon value: , %f' % eps))
if (trust_coef < 0.0):
raise ValueError(('invalid trust coefficient: %f' % trust_coef))
self.optim = optimizer
self.eps = eps
self.trust_coef = trust_coef
def __getstate__(self):
lars_dict = {}
lars_dict['eps'] = self.eps
lars_dict['trust_coef'] = self.trust_coef
return (self.optim, lars_dict)
def __setstate__(self, state):
(self.optim, lars_dict) = state
self.eps = lars_dict['eps']
self.trust_coef = lars_dict['trust_coef']
def __repr__(self):
return ('%s(%r)' % (self.__class__.__name__, self.optim))
@property
def param_groups(self):
return self.optim.param_groups
@property
def state(self):
return self.optim.state
def state_dict(self):
return self.optim.state_dict()
def load_state_dict(self, state_dict):
self.optim.load_state_dict(state_dict)
def zero_grad(self):
self.optim.zero_grad()
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
def apply_adaptive_lrs(self):
with torch.no_grad():
for group in self.optim.param_groups:
weight_decay = group['weight_decay']
ignore = group.get('ignore', None)
for p in group['params']:
if (p.grad is None):
continue
if (weight_decay > 0):
p.grad = p.grad.add(p, alpha=weight_decay)
if ((ignore is not None) and (not ignore)):
param_norm = p.norm()
grad_norm = p.grad.norm()
adaptive_lr = 1.0
if ((param_norm > 0) and (grad_norm > 0)):
adaptive_lr = ((self.trust_coef * param_norm) / (grad_norm + self.eps))
p.grad = p.grad.mul(adaptive_lr)
def step(self, *args, **kwargs):
self.apply_adaptive_lrs()
weight_decay_orig = [group['weight_decay'] for group in self.optim.param_groups]
for group in self.optim.param_groups:
group['weight_decay'] = 0
loss = self.optim.step(*args, **kwargs)
for (group, wo) in zip(self.optim.param_groups, weight_decay_orig):
group['weight_decay'] = wo
return loss
|
class Scheduler(object):
'Simple container for warmup and normal scheduler.'
def __init__(self, normal_schededuler, warmup_scheduler=None):
self.warmup = warmup_scheduler
self.sched = normal_schededuler
def get_last_lr(self):
' Return last computed learning rate by current scheduler.'
if ((self.warmup is not None) and (not self.warmup.complete)):
return self.warmup.get_last_lr()
return self.sched.get_last_lr()
def state_dict(self):
'Returns the state of each scheduler as a :class:`dict`.'
state_dict = {'warmup': (self.warmup.state_dict() if (self.warmup is not None) else {}), 'sched': self.sched.state_dict()}
return state_dict
def load_state_dict(self, state_dict):
'Loads the schedulers state.\n\n Arguments:\n state_dict (dict): scheduler state. Should be an object returned\n from a call to :meth:`state_dict`.\n '
if self.warmup:
self.warmup.load_state_dict(state_dict['warmup'])
self.sched.load_state_dict(state_dict['sched'])
def step(self, *args, **kwargs):
if ((self.warmup is not None) and (not self.warmup.complete)):
return self.warmup.step(*args, **kwargs)
return self.sched.step(*args, **kwargs)
|
class LinearWarmup(LambdaLR):
' Linear warmup and then constant.\n Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.\n Keeps learning rate schedule equal to 1. after warmup_steps.\n\n From https://bit.ly/39o2W1f\n '
def __init__(self, optimizer, warmup_steps, last_epoch=(- 1)):
self.warmup_steps = warmup_steps
self.complete = False
super(LinearWarmup, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if (step < self.warmup_steps):
return (float(step) / float(max(1.0, self.warmup_steps)))
self.complete = True
return 1.0
|
def load_model(model, args):
if args.custom_model:
from scaled_rope.modeling_llama_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
model_cls = LlamaForCausalLM
config_cls = LlamaConfig
elif args.custom_model_together:
from scaled_rope.modeling_llama_together_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
model_cls = LlamaForCausalLM
config_cls = LlamaConfig
elif args.custom_model_mistral:
from scaled_rope.modeling_mistral_yarn import MistralForCausalLM
from scaled_rope.configuration_mistral import MistralConfig
model_cls = MistralForCausalLM
config_cls = MistralConfig
else:
model_cls = AutoModelForCausalLM
config_cls = AutoConfig
config = config_cls.from_pretrained(model, trust_remote_code=(not args.custom_model))
if args.max_position_embeddings:
config.max_position_embeddings = args.max_position_embeddings
if args.factor:
config.rope_scaling['factor'] = args.factor
if args.no_use_cache:
config.use_cache = False
else:
config.use_cache = True
if args.sliding_window_attention:
config.sliding_window = args.sliding_window_attention
if (args.custom_model or args.custom_model_together or args.custom_model_mistral):
if args.linear:
config.rope_scaling = {'type': 'linear', 'factor': args.linear}
elif args.dynamic_ntk:
config.rope_scaling = {'type': 'dynamic', 'factor': args.dynamic_ntk}
elif args.part_ntk:
config.rope_scaling = {'type': 'ntk-by-parts', 'factor': args.part_ntk}
elif args.yarn:
config.rope_scaling = {'type': 'yarn', 'factor': args.yarn, 'original_max_position_embeddings': args.original_max_position_embeddings}
elif args.dynamic_yarn:
config.rope_scaling = {'type': 'dynamic-yarn', 'factor': (args.factor if args.factor else (config.rope_scaling.get('factor', 1.0) if (config.rope_scaling is not None) else 1.0)), 'original_max_position_embeddings': (args.original_max_position_embeddings if args.original_max_position_embeddings else config.rope_scaling['original_max_position_embeddings']), 'finetuned': (args.finetuned if args.finetuned else (config.rope_scaling.get('finetuned', False) if (config.rope_scaling is not None) else False))}
elif args.rerope:
assert ((not args.custom_model) and (not args.custom_model_together))
from transformers.models.llama.modeling_llama import LlamaAttention
from scaled_rope.LlamaReRoPE import forward_with_rerope
LlamaAttention.forward = forward_with_rerope
if (args.load_in_8bit or args.load_in_4bit):
quantization_config = BitsAndBytesConfig(load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4')
torch_dtype = None
config.pretraining_tp = 1
else:
quantization_config = None
torch_dtype = torch.bfloat16
loaded = model_cls.from_pretrained(model, torch_dtype=torch_dtype, device_map='auto', trust_remote_code=(not args.custom_model), config=config, quantization_config=quantization_config, use_flash_attention_2=args.flash_attention)
return loaded
|
def add_args(parser: ArgumentParser):
parser.add_argument('--dynamic-linear', action='store_true')
parser.add_argument('--dynamic-ntk', type=float)
parser.add_argument('--dynamic-part-ntk', action='store_true')
parser.add_argument('--dynamic-yarn', action='store_true')
parser.add_argument('--ntk', type=float)
parser.add_argument('--part-ntk', type=float)
parser.add_argument('--linear', type=float)
parser.add_argument('--yarn', type=float)
parser.add_argument('--rerope', type=float)
parser.add_argument('--factor', type=float)
parser.add_argument('--load-in-8bit', action='store_true')
parser.add_argument('--load-in-4bit', action='store_true')
parser.add_argument('--finetuned', action='store_true')
parser.add_argument('--gpt-neox-max-length', type=int)
parser.add_argument('--adapter', type=str)
parser.add_argument('--max-position-embeddings', type=int)
parser.add_argument('--original-max-position-embeddings', type=int)
parser.add_argument('--sliding-window-attention', type=int)
parser.add_argument('--custom-model', action='store_true')
parser.add_argument('--custom-model-together', action='store_true')
parser.add_argument('--custom-model-mistral', action='store_true')
parser.add_argument('--flash-attention', action='store_true')
parser.add_argument('--no-use-cache', action='store_true')
return parser
|
def apply_patches(model, args):
if ((not args.custom_model) and (not args.custom_model_together) and (not args.custom_model_mistral)):
if ('GPTNeoXForCausalLM' in model.config.architectures):
assert (args.gpt_neox_max_length is not None)
patch_gptneox_for_longer_sequences(model, args.gpt_neox_max_length)
if args.dynamic_linear:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_scaled_rotary_embeddings(model)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic linear')
elif args.dynamic_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk=args.dynamic_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic ntk')
elif args.dynamic_part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, args.finetuned)
elif ('RWForCausalLM' in model.config.architectures):
patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic part ntk')
elif args.dynamic_yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_yarn_rotary_embeddings(model, args.original_max_position_embeddings, args.finetuned)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic yarn')
elif args.ntk:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_ntk_scaled_rotary_embeddings(model, args.ntk)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_ntk_scaled_rotary_embeddings(model, args.ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for ntk')
elif args.linear:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_linear_scaled_rotary_embeddings(model, scale=args.linear)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for linear')
elif args.part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_part_ntk_scaled_rotary_embeddings(model, scale=args.part_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for part ntk')
elif args.yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_yarn_scaled_rotary_embeddings(model, scale=args.yarn, original_max_position_embeddings=args.original_max_position_embeddings)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
elif args.rerope:
if ('LlamaForCausalLM' in model.config.architectures):
training_length = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
window = args.rerope
patch_llama_for_rerope(model, training_length=training_length, window=window)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
if args.adapter:
from peft import PeftModel
model = PeftModel.from_pretrained(model, args.adapter)
model = model.merge_and_unload()
return model
|
def load_model_and_apply_patches(model, args):
return apply_patches(load_model(model, args), args)
|
def generate_prompt(n_garbage):
'Generates a text file and inserts an execute line at a random position.'
n_garbage_prefix = random.randint(0, n_garbage)
n_garbage_suffix = (n_garbage - n_garbage_prefix)
task_description = 'There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.'
garbage = 'The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again.'
garbage_inf = ' '.join(([garbage] * 10000))
assert (len(garbage_inf) >= n_garbage)
garbage_prefix = garbage_inf[:n_garbage_prefix]
garbage_suffix = garbage_inf[:n_garbage_suffix]
pass_key = random.randint(1, 50000)
information_line = f'The pass key is {pass_key}. Remember it. {pass_key} is the pass key.'
final_question = 'What is the pass key? The pass key is'
lines = [task_description, garbage_prefix, information_line, garbage_suffix, final_question]
return ('\n'.join(lines), pass_key)
|
def test_model(pipe, prompt_text, pass_key):
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=10)[0]['generated_text'][len(prompt_text):]
assert (f'The pass key is {pass_key}' in prompt_text)
try:
pass_key = int(re.search('\\d+', response).group())
except:
pass_key = response[:20]
return pass_key
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
if args.fixed_length:
lengths = [args.fixed_length]
tokens = [len(tokenizer.encode(generate_prompt(args.fixed_length)[0]))]
print(f'Prompt is {tokens[0]} tokens')
else:
if args.tokens_step:
tokens = [x for x in range(args.min_tokens, (args.max_tokens + 1), args.tokens_step)]
else:
tokens = [args.min_tokens]
while (args.min_tokens < args.max_tokens):
point = (tokens[(- 1)] * 2)
if (point <= args.max_tokens):
tokens.append(point)
else:
break
lengths = []
last_n = 0
for target in tqdm(tokens, desc='Determining sequence lengths'):
num_tokens = 0
n = last_n
while (num_tokens < target):
last_n = n
n += args.length_step
prompt = generate_prompt(n)[0]
num_tokens = len(tokenizer.encode(prompt))
lengths.append(last_n)
results = []
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
result = ([0] * len(lengths))
for (i, length) in tenumerate(lengths, desc='Lengths', leave=False):
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_key) = generate_prompt(length)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
answer = test_model(pipe, prompt_text, pass_key)
if (answer == pass_key):
result[i] += 1
result[i] /= args.iterations
print(f'{model}: {tokens[i]}={int((result[i] * 100))}%')
result.insert(0, model)
results.append(result)
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write(f''',{','.join([str(x) for x in tokens])}
''')
for result in results:
f.write(f'''{','.join([str(x) for x in result])}
''')
|
def order(i):
if (((i % 10) == 1) and ((i % 10) != 11)):
return (str(i) + 'st')
elif (((i % 10) == 2) and ((i % 10) != 12)):
return (str(i) + 'nd')
elif (((i % 19) == 3) and ((i % 10) != 13)):
return (str(i) + 'rd')
else:
return (str(i) + 'th')
|
def generate_prompt(docs, num_keys=1):
task_description = 'There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.'
pass_keys = [random.randint(1, 50000) for _ in range(num_keys)]
start_pos = sorted([random.randint(1, len(docs)) for _ in range(num_keys)])
information_lines = [f'The {order((i + 1))} pass key is {pass_key}. Remember it. {pass_key} is the {order((i + 1))} pass key.' for (i, pass_key) in enumerate(pass_keys)]
retrieve_number = random.randint(0, (num_keys - 1))
final_question = f'What is the {order((retrieve_number + 1))} pass key? The {order((retrieve_number + 1))} pass key is'
lines = [task_description]
prev = 0
for (line, pos) in zip(information_lines, start_pos):
lines.append(''.join(docs[prev:pos]))
lines.append(line)
prev = pos
lines.append(''.join(docs[prev:]))
lines.append(final_question)
return ('\n'.join(lines), pass_keys, start_pos, retrieve_number)
|
def test_model(pipe, prompt_text):
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=10)[0]['generated_text'][len(prompt_text):]
try:
pass_key = int(re.search('\\d+', response).group())
except:
pass_key = response[:20]
return (pass_key, response)
|
def construct_junk(data, length, tokenizer):
token_count = 0
docs = []
length = (length or 8192)
while (token_count < length):
sample = random.choice(data)['text']
toks = tokenizer(sample, return_offsets_mapping=True)
offsets = [(i, j) for (i, j) in toks['offset_mapping'] if (i < j)]
num_tok_to_add = min((length - token_count), len(offsets))
pretokenized = [sample[i:j] for (i, j) in offsets[:num_tok_to_add]]
docs.extend(pretokenized)
token_count += num_tok_to_add
return docs
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
data = load_dataset(args.dataset)[args.split]
junks = construct_junk(data, args.fixed_length, tokenizer)
if args.restrict_tokens:
vocab = tokenizer.vocab
escape_char = '▁'
digit_tokens = [vocab[a] for a in vocab.keys() if a.lstrip(escape_char).isdigit()]
digit_tokens.append(vocab[tokenizer.eos_token])
extra = [vocab[a] for a in vocab.keys() if (a.strip((' \n' + escape_char)) == '')]
digit_tokens.extend(extra)
mask = torch.ones(tokenizer.vocab_size, dtype=torch.bool)
mask[digit_tokens] = 0
def filter_digits(module, input, output):
output.logits[(..., mask[:output.logits.size((- 1))])] = (- 10000.0)
print(f'Decoding restricted to {len(digit_tokens)} tokens.')
results = []
success_count = 0
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
if args.restrict_tokens:
loaded.register_forward_hook(filter_digits)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_keys, start_pos, target) = generate_prompt(junks, args.num_keys)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
(answer, return_text) = test_model(pipe, prompt_text)
passed = str(answer).startswith(str(pass_keys[target]))
result = {'prompt_text': prompt_text, 'start_pos': start_pos, 'pass_keys': pass_keys, 'return_text': return_text, 'passed': passed}
success_count += passed
results.append(result)
results.append({'original_prompt': junks})
print(f'Iteration: {args.iterations}')
print(f'Successes: {success_count}')
if args.output_file:
with open(args.output_file, 'w') as f:
json.dump(results, f)
|
def main(args):
tokenizer = AutoTokenizer.from_pretrained(args.model, model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
model = load_model_and_apply_patches(args.model, args)
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id, temperature=args.temperature, repetition_penalty=args.repetition_penalty, top_k=args.top_k, penalty_alpha=args.penalty_alpha, do_sample=(args.temperature is not None))
while True:
if (args.input_file is None):
prompt_text = input('> ')
else:
input(f'Press enter to read {args.input_file} ')
prompt_text = open(args.input_file, encoding='utf=8').read()
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=args.max_new_tokens)[0]['generated_text'][len(prompt_text):]
print(f'< {response}')
|
def get_prompt(sample):
options = sample['options']
instruction = ZERO_SCROLLS_QUALITY_PROMPT.format(story=sample['article'], question=sample['question'], a=options[0], b=options[1], c=options[2], d=options[3])
return f'''{instruction}
Answer: ('''
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
dataset = load_dataset('emozilla/quality', split=args.split)
dataset = dataset.map((lambda sample: {'prompt': get_prompt(sample)}))
if args.max_tokens:
dataset = dataset.filter((lambda sample: (len(tokenizer(sample['prompt']).input_ids) <= args.max_tokens)))
choice_tokens = [x[0] for x in tokenizer(CHOICES, add_special_tokens=False).input_ids]
decoded_choice = tokenizer.decode(choice_tokens, clean_up_tokenization_spaces=True)
results = []
for model in models:
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
correct_answers = 0
i = 0
max = (len(dataset) if (args.limit is None) else args.limit)
bar = tqdm(total=max)
while (i < max):
sample = dataset[i]
tokenized_prompt = tokenizer(sample['prompt'], return_tensors='pt')
input_ids = tokenized_prompt.input_ids.to('cuda')
attention_mask = tokenized_prompt.attention_mask.to('cuda')
output = loaded.generate(input_ids, attention_mask=attention_mask, max_new_tokens=1, return_dict_in_generate=True, output_scores=True, pad_token_id=tokenizer.eos_token_id)
scores = output.scores[0][0]
choice_scores = [x.cpu() for x in [scores[choice_tokens[0]], scores[choice_tokens[1]], scores[choice_tokens[2]], scores[choice_tokens[3]]]]
selection = numpy.argmax([x.float().cpu() for x in choice_scores])
correct_answers += (1 if (selection == sample['answer']) else 0)
if args.print_choices:
print(f"Choice: {CHOICES[selection]} Correct: {CHOICES[sample['answer']]}")
i += 1
percent = ((correct_answers / i) * 100.0)
bar.desc = f'{model}: {percent:.1f}%'
bar.update()
percent = (correct_answers / max)
results.append(str(percent))
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write((','.join(models) + '\n'))
f.write((','.join(results) + '\n'))
|
def find_all_linear_names(model):
lora_module_names = set()
for (name, module) in model.named_modules():
if isinstance(module, torch.nn.Linear):
names = name.split('.')
lora_module_names.add((names[0] if (len(names) == 1) else names[(- 1)]))
if ('lm_head' in lora_module_names):
lora_module_names.remove('lm_head')
return list(lora_module_names)
|
def main(args):
if args.output_dir:
os.makedirs(args.output_dir, exist_ok=True)
if args.wandb:
import wandb
wandb.login()
set_seed(args.seed)
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1000000))
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulate_every, mixed_precision='bf16', log_with=('wandb' if args.wandb else None), kwargs_handlers=[timeout])
accelerator.init_trackers(project_name=(args.wandb if args.wandb else 'yarn'))
accelerator.print(f'Total GPUS: {accelerator.num_processes}')
if (args.architecture == 'llama'):
from scaled_rope.modeling_llama_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
config_cls = LlamaConfig
model_cls = LlamaForCausalLM
original_max_position_embeddings = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
elif (args.architecture == 'mistral'):
from scaled_rope.modeling_mistral_yarn import MistralForCausalLM
from scaled_rope.configuration_mistral import MistralConfig
config_cls = MistralConfig
model_cls = MistralForCausalLM
original_max_position_embeddings = (args.original_max_position_embeddings if args.original_max_position_embeddings else 8192)
config = config_cls.from_pretrained(args.model)
config.rope_scaling = {'type': args.scaling_type, 'factor': args.scaling_factor, 'original_max_position_embeddings': original_max_position_embeddings}
config.rope_theta = args.rope_theta
config.max_position_embeddings = (int((args.scaling_factor * original_max_position_embeddings)) if (not args.max_position_embeddings) else args.max_position_embeddings)
sliding_window_attention_schedule = ([int(x) for x in args.sliding_window_attention_schedule.split(',')] if args.sliding_window_attention_schedule else None)
if ((sliding_window_attention_schedule is not None) and (len(sliding_window_attention_schedule) == 1)):
config.sliding_window = sliding_window_attention_schedule[0]
accelerator.print(f'Sliding attention window set to {config.sliding_window}')
model = model_cls.from_pretrained(args.model, torch_dtype=torch.bfloat16, config=config, use_flash_attention_2=True)
try:
train_dataset = load_dataset(args.dataset)
except:
train_dataset = load_from_disk(args.dataset)
if isinstance(train_dataset, DatasetDict):
train_dataset = train_dataset['train']
if ('input_ids' not in train_dataset.column_names):
raise RuntimeError('Dataset must include an `input_ids` feature')
if ('labels' not in train_dataset.column_names):
def add_labels(sample):
sample['labels'] = copy.deepcopy(sample['input_ids'])
return sample
train_dataset = train_dataset.map(add_labels, desc='Adding labels', num_proc=args.num_proc)
if ('attention_mask' not in train_dataset.column_names):
def add_attention_mask(sample):
sample['attention_mask'] = torch.ones(len(sample['input_ids']), dtype=torch.int8)
return sample
train_dataset = train_dataset.map(add_attention_mask, desc='Adding attention mask', num_proc=args.num_proc)
if args.truncate:
def truncate(sample):
sample['input_ids'] = sample['input_ids'][0:args.truncate]
sample['labels'] = sample['labels'][0:args.truncate]
sample['attention_mask'] = sample['attention_mask'][0:args.truncate]
return sample
train_dataset = train_dataset.map(truncate, desc='Truncating', num_proc=args.num_proc)
train_loader = DataLoader(train_dataset, collate_fn=default_data_collator, shuffle=True, batch_size=args.batch_size)
if args.lora:
from peft import get_peft_model, LoraConfig, TaskType
target_modules = find_all_linear_names(model)
accelerator.print(f'LoRA target modules: {target_modules}')
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=16, lora_alpha=64, lora_dropout=0.05, target_modules=target_modules)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
if args.deepspeed:
optim = DummyOptim(model.parameters(), lr=args.learning_rate)
scheduler = DummyScheduler(optim, num_training_steps=args.max_train_steps, num_warmup_steps=args.warmup_steps)
(model, optim, train_loader, scheduler) = accelerator.prepare(model, optim, train_loader, scheduler)
else:
model = accelerator.prepare(model)
optim = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
if (args.lr_schedule == 'linear'):
scheduler = get_linear_schedule_with_warmup(optim, num_training_steps=args.max_train_steps, num_warmup_steps=args.warmup_steps)
elif (args.lr_schedule == 'constant'):
scheduler = get_constant_schedule_with_warmup(optim, num_warmup_steps=args.warmup_steps)
(optim, train_loader, scheduler) = accelerator.prepare(optim, train_loader, scheduler)
if (not args.lora):
model.gradient_checkpointing_enable()
accelerator.register_for_checkpointing(scheduler)
total_batch_size = ((args.batch_size * accelerator.num_processes) * args.gradient_accumulate_every)
accelerator.print(f'Max train steps: {args.max_train_steps}')
accelerator.print(f'Total batch size: {total_batch_size}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
if args.resume_from_checkpoint:
if ((args.resume_from_checkpoint is not None) or (args.resume_from_checkpoint != '')):
accelerator.print(f'Resuming from checkpoint {args.resume_from_checkpoint}')
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
training_difference = os.path.splitext(path)[0]
resume_step = int(training_difference.replace('step_', ''))
if (args.resume_from_checkpoint and (resume_step is not None)):
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
accelerator.print(f'Resuming training from step {resume_step}')
loss_file = (open(args.log_loss, ('a' if args.resume_from_checkpoint else 'w')) if (args.log_loss and accelerator.is_main_process) else None)
if (not args.save_only):
model.train()
for (step, batch) in enumerate(train_loader):
if (sliding_window_attention_schedule is not None):
model.config.sliding_window = sliding_window_attention_schedule[(completed_steps % len(sliding_window_attention_schedule))]
loss_log = None
with accelerator.accumulate(model):
loss = model(**batch).loss
accelerator.backward(loss)
if accelerator.sync_gradients:
loss_log = {'loss': loss.item()}
accelerator.log(loss_log, step=completed_steps)
if (loss_file is not None):
loss_file.write(f"{loss_log['loss']},")
loss_file.flush()
if isinstance(args.grad_norm, float):
accelerator.clip_grad_norm_(model.parameters(), args.grad_norm)
optim.step()
scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
if (loss_log is not None):
progress_bar.set_postfix(loss_log)
completed_steps += 1
if (isinstance(args.checkpointing_steps, int) and (completed_steps > 0)):
if ((completed_steps % args.checkpointing_steps) == 0):
output_dir = f'step_{completed_steps}'
if (args.output_dir is not None):
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if (completed_steps >= args.max_train_steps):
break
accelerator.print(f'Training Finished')
accelerator.end_training()
if (args.output_dir is not None):
accelerator.print(f'Saving model to {args.output_dir}')
accelerator.wait_for_everyone()
if args.deepspeed:
state_dict = accelerator.get_state_dict(model)
else:
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
state_dict = accelerator.get_state_dict(model, unwrap=False)
accelerator.unwrap_model(model).save_pretrained(f'{args.output_dir}', is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=state_dict)
accelerator.print(f'Saving Finished')
|
def main(args):
obj = json.load(open(args.file, 'r', encoding='utf-8'))
results = [result['acc'] for result in obj['results'].values()]
print(numpy.average(results))
|
def main(args):
data = pd.read_csv(args.csv)
(fig, ax) = plt.subplots(figsize=(10, 5))
x_data = [float(x) for x in data.columns[1:]]
for row in data.values:
label = row[0].replace('NousResearch/', '')
ax.plot(x_data, [float(x) for x in row[1:]], label=label)
ax.set_xlabel('Context Window')
ax.set_ylabel('Perplexity (lower is better)')
ax.set_xlim(args.xmin, args.xmax)
ax.set_ylim(args.ymin, args.ymax)
ax.legend(loc='upper right')
fig.savefig((args.csv + '.png'))
fig.savefig((args.csv + '.pdf'), transparent=True)
|
class LlamaConfig(PretrainedConfig):
'\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don\'t update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n\n Example:\n\n ```python\n >>> from transformers import LlamaModel, LlamaConfig\n\n >>> # Initializing a LLaMA llama-7b style configuration\n >>> configuration = LlamaConfig()\n\n >>> # Initializing a model from the llama-7b style configuration\n >>> model = LlamaModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```'
model_type = 'llama'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000, rope_scaling=None, attention_bias=False, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if (num_key_value_heads is None):
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.attention_bias = attention_bias
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _rope_scaling_validation(self):
'\n Validate the `rope_scaling` configuration.\n '
if (self.rope_scaling is None):
return
if (not isinstance(self.rope_scaling, dict)):
raise ValueError(f'`rope_scaling` must be a dictionary, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if ((rope_scaling_type is None) or (rope_scaling_type not in ['linear', 'dynamic', 'yarn', 'dynamic-yarn'])):
raise ValueError(f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'yarn', 'dynamic-yarn'], got {rope_scaling_type}")
if ((rope_scaling_factor is None) or (not isinstance(rope_scaling_factor, float)) or (rope_scaling_factor <= 1.0)):
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
if ((rope_scaling_type == 'yarn') or (rope_scaling_type == 'dynamic-yarn')):
original_max_position_embeddings = self.rope_scaling.get('original_max_position_embeddings', None)
if ((original_max_position_embeddings is None) or (not isinstance(original_max_position_embeddings, int))):
raise ValueError(f'`rope_scaling.original_max_position_embeddings` must be set to an int when using yarn, and dynamic-yarn')
|
class MistralConfig(PretrainedConfig):
'\n This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an\n Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.\n\n [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)\n [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`MistralModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 14336):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*, defaults to 8):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.\n hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to `4096*32`):\n The maximum sequence length that this model might ever be used with. Mistral\'s sliding window attention\n allows sequence of up to 4096*32 tokens.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n pad_token_id (`int`, *optional*):\n The id of the padding token.\n bos_token_id (`int`, *optional*, defaults to 1):\n The id of the "beginning-of-sequence" token.\n eos_token_id (`int`, *optional*, defaults to 2):\n The id of the "end-of-sequence" token.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether the model\'s input and output word embeddings should be tied.\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{"type": strategy name, "factor": scaling factor}`.\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n sliding_window (`int`, *optional*, defaults to 4096):\n Sliding window attention window size. If not specified, will default to `4096`.\n\n\n ```python\n >>> from transformers import MistralModel, MistralConfig\n\n >>> # Initializing a Mistral 7B style configuration\n >>> configuration = MistralConfig()\n\n >>> # Initializing a model from the Mistral 7B style configuration\n >>> model = MistralModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```'
model_type = 'mistral'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act='silu', max_position_embeddings=(4096 * 32), initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_scaling=None, rope_theta=10000.0, sliding_window=4096, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
if (num_key_value_heads is None):
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.rope_theta = rope_theta
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _rope_scaling_validation(self):
'\n Validate the `rope_scaling` configuration.\n '
if (self.rope_scaling is None):
return
if (not isinstance(self.rope_scaling, dict)):
raise ValueError(f'`rope_scaling` must be a dictionary, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if ((rope_scaling_type is None) or (rope_scaling_type not in ['linear', 'dynamic', 'yarn', 'dynamic-yarn'])):
raise ValueError(f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'yarn', 'dynamic-yarn'], got {rope_scaling_type}")
if ((rope_scaling_factor is None) or (not isinstance(rope_scaling_factor, float)) or (rope_scaling_factor <= 1.0)):
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
if ((rope_scaling_type == 'yarn') or (rope_scaling_type == 'dynamic-yarn')):
original_max_position_embeddings = self.rope_scaling.get('original_max_position_embeddings', None)
if ((original_max_position_embeddings is None) or (not isinstance(original_max_position_embeddings, int))):
raise ValueError(f'`rope_scaling.original_max_position_embeddings` must be set to an int when using yarn, and dynamic-yarn')
|
def patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk):
from .LlamaDynamicScaledRotaryEmbedding import LlamaDynamicScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicScaledRotaryEmbedding(each.self_attn.head_dim, device=each.self_attn.rotary_emb.inv_freq.device, ntk=ntk)
|
def patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, finetuned):
from .LlamaDynamicPartNTKScaledRotaryEmbedding import LlamaDynamicPartNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicPartNTKScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_dynamic_yarn_rotary_embeddings(model, original_max_position_embeddings, finetuned):
from .LlamaDynamicYaRNScaledRotaryEmbedding import LlamaDynamicYaRNScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicYaRNScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, original_max_position_embeddings=original_max_position_embeddings, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model):
from .FalconDynamicPartNTKScaledRotaryEmbedding import FalconDynamicPartNTKScaledRotaryEmbedding
for each in model.transformer.h:
each.self_attention.maybe_rotary = FalconDynamicPartNTKScaledRotaryEmbedding(each.self_attention.head_dim)
|
def patch_llama_for_ntk_scaled_rotary_embeddings(model, alpha):
from .LlamaNTKScaledRotaryEmbedding import LlamaNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaNTKScaledRotaryEmbedding(each.self_attn.head_dim, alpha=alpha, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_linear_scaled_rotary_embeddings(model, scale):
from .LlamaLinearScaledRotaryEmbedding import LlamaLinearScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaLinearScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_part_ntk_scaled_rotary_embeddings(model, scale):
from .LlamaPartNTKScaledRotaryEmbedding import LlamaPartNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaPartNTKScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_yarn_scaled_rotary_embeddings(model, scale, original_max_position_embeddings):
from .LlamaYaRNScaledRotaryEmbedding import LlamaYaRNScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaYaRNScaledRotaryEmbedding(each.self_attn.head_dim, scale=scale, original_max_position_embeddings=original_max_position_embeddings, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_scaled_rotary_embeddings(model):
from .GPTNeoXDynamicScaledRotaryEmbedding import GPTNeoXDynamicScaledRotaryEmbedding
for each in model.gpt_neox.layers:
each.attention.rotary_emb = GPTNeoXDynamicScaledRotaryEmbedding(each.attention.rotary_ndims, model.config.max_position_embeddings, device=each.attention.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_ntk_scaled_rotary_embeddings(model, alpha):
from .GPTNeoXNTKScaledRotaryEmbedding import GPTNeoXNTKScaledRotaryEmbedding
for each in model.gpt_neox.layers:
each.attention.rotary_emb = GPTNeoXNTKScaledRotaryEmbedding(each.attention.rotary_ndims, model.config.max_position_embeddings, alpha=alpha, device=each.attention.rotary_emb.inv_freq.device)
|
def patch_gptneox_for_longer_sequences(model, max_positions):
for each in model.gpt_neox.layers:
each.attention.bias = torch.tril(torch.ones((max_positions, max_positions), dtype=each.attention.bias.dtype, device=each.attention.bias.device)).view(1, 1, max_positions, max_positions)
|
def patch_llama_for_rerope(model, training_length, window):
from .LlamaReRoPE import forward_with_rerope
for each in model.model.layers:
def forward(*args, **kwargs):
return forward_with_rerope(each.self_attn, *args, **kwargs)
each.self_attn.training_length = int(training_length)
each.self_attn.window = int(window)
|
def main(args):
if ((args.dataset is None) or (len(args.dataset[0]) == 0)):
raise RuntimeError('No datasets provided')
datasets = args.dataset[0]
splits = [(x.split(',')[1] if (len(x.split(',')) == 2) else '') for x in datasets]
datasets = [x.split(',')[0] for x in datasets]
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
if args.json:
dataset = load_dataset('json', data_files=datasets)[args.split]
if reduce((lambda x, y: (x or (len(y) > 0))), splits, False):
if (len(datasets) > 1):
raise RuntimeError('Can only use splitting on json datasets if there is exactly one input file')
dataset = dataset.train_test_split(train_size=float(splits[0]), seed=args.seed)['train']
else:
to_concatenate = []
for i in range(0, len(datasets)):
try:
loaded = load_from_disk(datasets[i])
except:
loaded = load_dataset([i])[args.split]
if (len(splits[i]) > 0):
loaded = loaded.train_test_split(train_size=float(splits[i]), seed=args.seed)['train']
to_concatenate.append(loaded)
dataset = concatenate_datasets(to_concatenate)
dataset = dataset.remove_columns([x for x in dataset.column_names if (x not in [args.feature])])
tokenized_dataset = dataset.map((lambda example: tokenizer([(t + tokenizer.eos_token) for t in example[args.feature]])), batched=True, num_proc=args.num_proc, remove_columns=[args.feature])
block_size = args.sequence_length
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if (total_length >= block_size):
total_length = ((total_length // block_size) * block_size)
result = {k: [t[i:(i + block_size)] for i in range(0, total_length, block_size)] for (k, t) in concatenated_examples.items()}
return result
train_dataset = tokenized_dataset.map(group_texts, batched=True, num_proc=args.num_proc)
if args.output:
train_dataset.save_to_disk(args.output)
if args.push_to_hub:
train_dataset.push_to_hub(args.push_to_hub, private=True)
|
def main(args):
dataset = load_dataset(args.dataset, split='train')
def truncate(sample):
sample['input_ids'] = sample['input_ids'][0:args.truncate]
sample['labels'] = sample['labels'][0:args.truncate]
sample['attention_mask'] = sample['attention_mask'][0:args.truncate]
return sample
dataset = dataset.map(truncate, desc='Truncating', num_proc=args.num_proc)
dataset.save_to_disk(args.output)
|
def load_model(model, args):
if args.custom_model:
from scaled_rope.modeling_llama_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
model_cls = LlamaForCausalLM
config_cls = LlamaConfig
elif args.custom_model_together:
from scaled_rope.modeling_llama_together_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
model_cls = LlamaForCausalLM
config_cls = LlamaConfig
elif args.custom_model_mistral:
from scaled_rope.modeling_mistral_yarn import MistralForCausalLM
from scaled_rope.configuration_mistral import MistralConfig
model_cls = MistralForCausalLM
config_cls = MistralConfig
else:
model_cls = AutoModelForCausalLM
config_cls = AutoConfig
config = config_cls.from_pretrained(model, trust_remote_code=(not args.custom_model))
if args.max_position_embeddings:
config.max_position_embeddings = args.max_position_embeddings
if args.factor:
config.rope_scaling['factor'] = args.factor
if args.no_use_cache:
config.use_cache = False
else:
config.use_cache = True
if args.sliding_window_attention:
config.sliding_window = args.sliding_window_attention
if (args.custom_model or args.custom_model_together or args.custom_model_mistral):
if args.linear:
config.rope_scaling = {'type': 'linear', 'factor': args.linear}
elif args.dynamic_ntk:
config.rope_scaling = {'type': 'dynamic', 'factor': args.dynamic_ntk}
elif args.part_ntk:
config.rope_scaling = {'type': 'ntk-by-parts', 'factor': args.part_ntk}
elif args.yarn:
config.rope_scaling = {'type': 'yarn', 'factor': args.yarn, 'original_max_position_embeddings': args.original_max_position_embeddings}
elif args.dynamic_yarn:
config.rope_scaling = {'type': 'dynamic-yarn', 'factor': (args.factor if args.factor else (config.rope_scaling.get('factor', 1.0) if (config.rope_scaling is not None) else 1.0)), 'original_max_position_embeddings': (args.original_max_position_embeddings if args.original_max_position_embeddings else config.rope_scaling['original_max_position_embeddings']), 'finetuned': (args.finetuned if args.finetuned else (config.rope_scaling.get('finetuned', False) if (config.rope_scaling is not None) else False))}
elif args.rerope:
assert ((not args.custom_model) and (not args.custom_model_together))
from transformers.models.llama.modeling_llama import LlamaAttention
from scaled_rope.LlamaReRoPE import forward_with_rerope
LlamaAttention.forward = forward_with_rerope
if (args.load_in_8bit or args.load_in_4bit):
quantization_config = BitsAndBytesConfig(load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4')
torch_dtype = None
config.pretraining_tp = 1
else:
quantization_config = None
torch_dtype = torch.bfloat16
loaded = model_cls.from_pretrained(model, torch_dtype=torch_dtype, device_map='auto', trust_remote_code=(not args.custom_model), config=config, quantization_config=quantization_config, use_flash_attention_2=args.flash_attention)
return loaded
|
def add_args(parser: ArgumentParser):
parser.add_argument('--dynamic-linear', action='store_true')
parser.add_argument('--dynamic-ntk', type=float)
parser.add_argument('--dynamic-part-ntk', action='store_true')
parser.add_argument('--dynamic-yarn', action='store_true')
parser.add_argument('--ntk', type=float)
parser.add_argument('--part-ntk', type=float)
parser.add_argument('--linear', type=float)
parser.add_argument('--yarn', type=float)
parser.add_argument('--rerope', type=float)
parser.add_argument('--factor', type=float)
parser.add_argument('--load-in-8bit', action='store_true')
parser.add_argument('--load-in-4bit', action='store_true')
parser.add_argument('--finetuned', action='store_true')
parser.add_argument('--gpt-neox-max-length', type=int)
parser.add_argument('--adapter', type=str)
parser.add_argument('--max-position-embeddings', type=int)
parser.add_argument('--original-max-position-embeddings', type=int)
parser.add_argument('--sliding-window-attention', type=int)
parser.add_argument('--custom-model', action='store_true')
parser.add_argument('--custom-model-together', action='store_true')
parser.add_argument('--custom-model-mistral', action='store_true')
parser.add_argument('--flash-attention', action='store_true')
parser.add_argument('--no-use-cache', action='store_true')
return parser
|
def apply_patches(model, args):
if ((not args.custom_model) and (not args.custom_model_together) and (not args.custom_model_mistral)):
if ('GPTNeoXForCausalLM' in model.config.architectures):
assert (args.gpt_neox_max_length is not None)
patch_gptneox_for_longer_sequences(model, args.gpt_neox_max_length)
if args.dynamic_linear:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_scaled_rotary_embeddings(model)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic linear')
elif args.dynamic_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk=args.dynamic_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic ntk')
elif args.dynamic_part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, args.finetuned)
elif ('RWForCausalLM' in model.config.architectures):
patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic part ntk')
elif args.dynamic_yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_yarn_rotary_embeddings(model, args.original_max_position_embeddings, args.finetuned)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic yarn')
elif args.ntk:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_ntk_scaled_rotary_embeddings(model, args.ntk)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_ntk_scaled_rotary_embeddings(model, args.ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for ntk')
elif args.linear:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_linear_scaled_rotary_embeddings(model, scale=args.linear)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for linear')
elif args.part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_part_ntk_scaled_rotary_embeddings(model, scale=args.part_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for part ntk')
elif args.yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_yarn_scaled_rotary_embeddings(model, scale=args.yarn, original_max_position_embeddings=args.original_max_position_embeddings)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
elif args.rerope:
if ('LlamaForCausalLM' in model.config.architectures):
training_length = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
window = args.rerope
patch_llama_for_rerope(model, training_length=training_length, window=window)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
if args.adapter:
from peft import PeftModel
model = PeftModel.from_pretrained(model, args.adapter)
model = model.merge_and_unload()
return model
|
def load_model_and_apply_patches(model, args):
return apply_patches(load_model(model, args), args)
|
def generate_prompt(n_garbage):
'Generates a text file and inserts an execute line at a random position.'
n_garbage_prefix = random.randint(0, n_garbage)
n_garbage_suffix = (n_garbage - n_garbage_prefix)
task_description = 'There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.'
garbage = 'The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again.'
garbage_inf = ' '.join(([garbage] * 10000))
assert (len(garbage_inf) >= n_garbage)
garbage_prefix = garbage_inf[:n_garbage_prefix]
garbage_suffix = garbage_inf[:n_garbage_suffix]
pass_key = random.randint(1, 50000)
information_line = f'The pass key is {pass_key}. Remember it. {pass_key} is the pass key.'
final_question = 'What is the pass key? The pass key is'
lines = [task_description, garbage_prefix, information_line, garbage_suffix, final_question]
return ('\n'.join(lines), pass_key)
|
def test_model(pipe, prompt_text, pass_key):
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=10)[0]['generated_text'][len(prompt_text):]
assert (f'The pass key is {pass_key}' in prompt_text)
try:
pass_key = int(re.search('\\d+', response).group())
except:
pass_key = response[:20]
return pass_key
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
if args.fixed_length:
lengths = [args.fixed_length]
tokens = [len(tokenizer.encode(generate_prompt(args.fixed_length)[0]))]
print(f'Prompt is {tokens[0]} tokens')
else:
if args.tokens_step:
tokens = [x for x in range(args.min_tokens, (args.max_tokens + 1), args.tokens_step)]
else:
tokens = [args.min_tokens]
while (args.min_tokens < args.max_tokens):
point = (tokens[(- 1)] * 2)
if (point <= args.max_tokens):
tokens.append(point)
else:
break
lengths = []
last_n = 0
for target in tqdm(tokens, desc='Determining sequence lengths'):
num_tokens = 0
n = last_n
while (num_tokens < target):
last_n = n
n += args.length_step
prompt = generate_prompt(n)[0]
num_tokens = len(tokenizer.encode(prompt))
lengths.append(last_n)
results = []
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
result = ([0] * len(lengths))
for (i, length) in tenumerate(lengths, desc='Lengths', leave=False):
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_key) = generate_prompt(length)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
answer = test_model(pipe, prompt_text, pass_key)
if (answer == pass_key):
result[i] += 1
result[i] /= args.iterations
print(f'{model}: {tokens[i]}={int((result[i] * 100))}%')
result.insert(0, model)
results.append(result)
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write(f''',{','.join([str(x) for x in tokens])}
''')
for result in results:
f.write(f'''{','.join([str(x) for x in result])}
''')
|
def order(i):
if (((i % 10) == 1) and ((i % 10) != 11)):
return (str(i) + 'st')
elif (((i % 10) == 2) and ((i % 10) != 12)):
return (str(i) + 'nd')
elif (((i % 19) == 3) and ((i % 10) != 13)):
return (str(i) + 'rd')
else:
return (str(i) + 'th')
|
def generate_prompt(docs, num_keys=1):
task_description = 'There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.'
pass_keys = [random.randint(1, 50000) for _ in range(num_keys)]
start_pos = sorted([random.randint(1, len(docs)) for _ in range(num_keys)])
information_lines = [f'The {order((i + 1))} pass key is {pass_key}. Remember it. {pass_key} is the {order((i + 1))} pass key.' for (i, pass_key) in enumerate(pass_keys)]
retrieve_number = random.randint(0, (num_keys - 1))
final_question = f'What is the {order((retrieve_number + 1))} pass key? The {order((retrieve_number + 1))} pass key is'
lines = [task_description]
prev = 0
for (line, pos) in zip(information_lines, start_pos):
lines.append(''.join(docs[prev:pos]))
lines.append(line)
prev = pos
lines.append(''.join(docs[prev:]))
lines.append(final_question)
return ('\n'.join(lines), pass_keys, start_pos, retrieve_number)
|
def test_model(pipe, prompt_text):
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=10)[0]['generated_text'][len(prompt_text):]
try:
pass_key = int(re.search('\\d+', response).group())
except:
pass_key = response[:20]
return (pass_key, response)
|
def construct_junk(data, length, tokenizer):
token_count = 0
docs = []
length = (length or 8192)
while (token_count < length):
sample = random.choice(data)['text']
toks = tokenizer(sample, return_offsets_mapping=True)
offsets = [(i, j) for (i, j) in toks['offset_mapping'] if (i < j)]
num_tok_to_add = min((length - token_count), len(offsets))
pretokenized = [sample[i:j] for (i, j) in offsets[:num_tok_to_add]]
docs.extend(pretokenized)
token_count += num_tok_to_add
return docs
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
data = load_dataset(args.dataset)[args.split]
junks = construct_junk(data, args.fixed_length, tokenizer)
if args.restrict_tokens:
vocab = tokenizer.vocab
escape_char = '▁'
digit_tokens = [vocab[a] for a in vocab.keys() if a.lstrip(escape_char).isdigit()]
digit_tokens.append(vocab[tokenizer.eos_token])
extra = [vocab[a] for a in vocab.keys() if (a.strip((' \n' + escape_char)) == '')]
digit_tokens.extend(extra)
mask = torch.ones(tokenizer.vocab_size, dtype=torch.bool)
mask[digit_tokens] = 0
def filter_digits(module, input, output):
output.logits[(..., mask[:output.logits.size((- 1))])] = (- 10000.0)
print(f'Decoding restricted to {len(digit_tokens)} tokens.')
results = []
success_count = 0
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
if args.restrict_tokens:
loaded.register_forward_hook(filter_digits)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_keys, start_pos, target) = generate_prompt(junks, args.num_keys)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
(answer, return_text) = test_model(pipe, prompt_text)
passed = str(answer).startswith(str(pass_keys[target]))
result = {'prompt_text': prompt_text, 'start_pos': start_pos, 'pass_keys': pass_keys, 'return_text': return_text, 'passed': passed}
success_count += passed
results.append(result)
results.append({'original_prompt': junks})
print(f'Iteration: {args.iterations}')
print(f'Successes: {success_count}')
if args.output_file:
with open(args.output_file, 'w') as f:
json.dump(results, f)
|
def main(args):
tokenizer = AutoTokenizer.from_pretrained(args.model, model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
model = load_model_and_apply_patches(args.model, args)
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id, temperature=args.temperature, repetition_penalty=args.repetition_penalty, top_k=args.top_k, penalty_alpha=args.penalty_alpha, do_sample=(args.temperature is not None))
while True:
if (args.input_file is None):
prompt_text = input('> ')
else:
input(f'Press enter to read {args.input_file} ')
prompt_text = open(args.input_file, encoding='utf=8').read()
response = pipe(prompt_text, num_return_sequences=1, max_new_tokens=args.max_new_tokens)[0]['generated_text'][len(prompt_text):]
print(f'< {response}')
|
def get_prompt(sample):
options = sample['options']
instruction = ZERO_SCROLLS_QUALITY_PROMPT.format(story=sample['article'], question=sample['question'], a=options[0], b=options[1], c=options[2], d=options[3])
return f'''{instruction}
Answer: ('''
|
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
dataset = load_dataset('emozilla/quality', split=args.split)
dataset = dataset.map((lambda sample: {'prompt': get_prompt(sample)}))
if args.max_tokens:
dataset = dataset.filter((lambda sample: (len(tokenizer(sample['prompt']).input_ids) <= args.max_tokens)))
choice_tokens = [x[0] for x in tokenizer(CHOICES, add_special_tokens=False).input_ids]
decoded_choice = tokenizer.decode(choice_tokens, clean_up_tokenization_spaces=True)
results = []
for model in models:
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
correct_answers = 0
i = 0
max = (len(dataset) if (args.limit is None) else args.limit)
bar = tqdm(total=max)
while (i < max):
sample = dataset[i]
tokenized_prompt = tokenizer(sample['prompt'], return_tensors='pt')
input_ids = tokenized_prompt.input_ids.to('cuda')
attention_mask = tokenized_prompt.attention_mask.to('cuda')
output = loaded.generate(input_ids, attention_mask=attention_mask, max_new_tokens=1, return_dict_in_generate=True, output_scores=True, pad_token_id=tokenizer.eos_token_id)
scores = output.scores[0][0]
choice_scores = [x.cpu() for x in [scores[choice_tokens[0]], scores[choice_tokens[1]], scores[choice_tokens[2]], scores[choice_tokens[3]]]]
selection = numpy.argmax([x.float().cpu() for x in choice_scores])
correct_answers += (1 if (selection == sample['answer']) else 0)
if args.print_choices:
print(f"Choice: {CHOICES[selection]} Correct: {CHOICES[sample['answer']]}")
i += 1
percent = ((correct_answers / i) * 100.0)
bar.desc = f'{model}: {percent:.1f}%'
bar.update()
percent = (correct_answers / max)
results.append(str(percent))
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write((','.join(models) + '\n'))
f.write((','.join(results) + '\n'))
|
def find_all_linear_names(model):
lora_module_names = set()
for (name, module) in model.named_modules():
if isinstance(module, torch.nn.Linear):
names = name.split('.')
lora_module_names.add((names[0] if (len(names) == 1) else names[(- 1)]))
if ('lm_head' in lora_module_names):
lora_module_names.remove('lm_head')
return list(lora_module_names)
|
def main(args):
if args.output_dir:
os.makedirs(args.output_dir, exist_ok=True)
if args.wandb:
import wandb
wandb.login()
set_seed(args.seed)
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1000000))
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulate_every, mixed_precision='bf16', log_with=('wandb' if args.wandb else None), kwargs_handlers=[timeout])
accelerator.init_trackers(project_name=(args.wandb if args.wandb else 'yarn'))
accelerator.print(f'Total GPUS: {accelerator.num_processes}')
if (args.architecture == 'llama'):
from scaled_rope.modeling_llama_yarn import LlamaForCausalLM
from scaled_rope.configuration_llama import LlamaConfig
config_cls = LlamaConfig
model_cls = LlamaForCausalLM
original_max_position_embeddings = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
elif (args.architecture == 'mistral'):
from scaled_rope.modeling_mistral_yarn import MistralForCausalLM
from scaled_rope.configuration_mistral import MistralConfig
config_cls = MistralConfig
model_cls = MistralForCausalLM
original_max_position_embeddings = (args.original_max_position_embeddings if args.original_max_position_embeddings else 8192)
config = config_cls.from_pretrained(args.model)
config.rope_scaling = {'type': args.scaling_type, 'factor': args.scaling_factor, 'original_max_position_embeddings': original_max_position_embeddings}
config.rope_theta = args.rope_theta
config.max_position_embeddings = (int((args.scaling_factor * original_max_position_embeddings)) if (not args.max_position_embeddings) else args.max_position_embeddings)
sliding_window_attention_schedule = ([int(x) for x in args.sliding_window_attention_schedule.split(',')] if args.sliding_window_attention_schedule else None)
if ((sliding_window_attention_schedule is not None) and (len(sliding_window_attention_schedule) == 1)):
config.sliding_window = sliding_window_attention_schedule[0]
accelerator.print(f'Sliding attention window set to {config.sliding_window}')
model = model_cls.from_pretrained(args.model, torch_dtype=torch.bfloat16, config=config, use_flash_attention_2=True)
try:
train_dataset = load_dataset(args.dataset)
except:
train_dataset = load_from_disk(args.dataset)
if isinstance(train_dataset, DatasetDict):
train_dataset = train_dataset['train']
if ('input_ids' not in train_dataset.column_names):
raise RuntimeError('Dataset must include an `input_ids` feature')
if ('labels' not in train_dataset.column_names):
def add_labels(sample):
sample['labels'] = copy.deepcopy(sample['input_ids'])
return sample
train_dataset = train_dataset.map(add_labels, desc='Adding labels', num_proc=args.num_proc)
if ('attention_mask' not in train_dataset.column_names):
def add_attention_mask(sample):
sample['attention_mask'] = torch.ones(len(sample['input_ids']), dtype=torch.int8)
return sample
train_dataset = train_dataset.map(add_attention_mask, desc='Adding attention mask', num_proc=args.num_proc)
if args.truncate:
def truncate(sample):
sample['input_ids'] = sample['input_ids'][0:args.truncate]
sample['labels'] = sample['labels'][0:args.truncate]
sample['attention_mask'] = sample['attention_mask'][0:args.truncate]
return sample
train_dataset = train_dataset.map(truncate, desc='Truncating', num_proc=args.num_proc)
train_loader = DataLoader(train_dataset, collate_fn=default_data_collator, shuffle=True, batch_size=args.batch_size)
if args.lora:
from peft import get_peft_model, LoraConfig, TaskType
target_modules = find_all_linear_names(model)
accelerator.print(f'LoRA target modules: {target_modules}')
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=16, lora_alpha=64, lora_dropout=0.05, target_modules=target_modules)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
if args.deepspeed:
optim = DummyOptim(model.parameters(), lr=args.learning_rate)
scheduler = DummyScheduler(optim, num_training_steps=args.max_train_steps, num_warmup_steps=args.warmup_steps)
(model, optim, train_loader, scheduler) = accelerator.prepare(model, optim, train_loader, scheduler)
else:
model = accelerator.prepare(model)
optim = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
if (args.lr_schedule == 'linear'):
scheduler = get_linear_schedule_with_warmup(optim, num_training_steps=args.max_train_steps, num_warmup_steps=args.warmup_steps)
elif (args.lr_schedule == 'constant'):
scheduler = get_constant_schedule_with_warmup(optim, num_warmup_steps=args.warmup_steps)
(optim, train_loader, scheduler) = accelerator.prepare(optim, train_loader, scheduler)
if (not args.lora):
model.gradient_checkpointing_enable()
accelerator.register_for_checkpointing(scheduler)
total_batch_size = ((args.batch_size * accelerator.num_processes) * args.gradient_accumulate_every)
accelerator.print(f'Max train steps: {args.max_train_steps}')
accelerator.print(f'Total batch size: {total_batch_size}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
if args.resume_from_checkpoint:
if ((args.resume_from_checkpoint is not None) or (args.resume_from_checkpoint != '')):
accelerator.print(f'Resuming from checkpoint {args.resume_from_checkpoint}')
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
training_difference = os.path.splitext(path)[0]
resume_step = int(training_difference.replace('step_', ''))
if (args.resume_from_checkpoint and (resume_step is not None)):
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
accelerator.print(f'Resuming training from step {resume_step}')
loss_file = (open(args.log_loss, ('a' if args.resume_from_checkpoint else 'w')) if (args.log_loss and accelerator.is_main_process) else None)
if (not args.save_only):
model.train()
for (step, batch) in enumerate(train_loader):
if (sliding_window_attention_schedule is not None):
model.config.sliding_window = sliding_window_attention_schedule[(completed_steps % len(sliding_window_attention_schedule))]
loss_log = None
with accelerator.accumulate(model):
loss = model(**batch).loss
accelerator.backward(loss)
if accelerator.sync_gradients:
loss_log = {'loss': loss.item()}
accelerator.log(loss_log, step=completed_steps)
if (loss_file is not None):
loss_file.write(f"{loss_log['loss']},")
loss_file.flush()
if isinstance(args.grad_norm, float):
accelerator.clip_grad_norm_(model.parameters(), args.grad_norm)
optim.step()
scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
if (loss_log is not None):
progress_bar.set_postfix(loss_log)
completed_steps += 1
if (isinstance(args.checkpointing_steps, int) and (completed_steps > 0)):
if ((completed_steps % args.checkpointing_steps) == 0):
output_dir = f'step_{completed_steps}'
if (args.output_dir is not None):
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if (completed_steps >= args.max_train_steps):
break
accelerator.print(f'Training Finished')
accelerator.end_training()
if (args.output_dir is not None):
accelerator.print(f'Saving model to {args.output_dir}')
accelerator.wait_for_everyone()
if args.deepspeed:
state_dict = accelerator.get_state_dict(model)
else:
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
state_dict = accelerator.get_state_dict(model, unwrap=False)
accelerator.unwrap_model(model).save_pretrained(f'{args.output_dir}', is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=state_dict)
accelerator.print(f'Saving Finished')
|
def main(args):
obj = json.load(open(args.file, 'r', encoding='utf-8'))
results = [result['acc'] for result in obj['results'].values()]
print(numpy.average(results))
|
def main(args):
data = pd.read_csv(args.csv)
(fig, ax) = plt.subplots(figsize=(10, 5))
x_data = [float(x) for x in data.columns[1:]]
for row in data.values:
label = row[0].replace('NousResearch/', '')
ax.plot(x_data, [float(x) for x in row[1:]], label=label)
ax.set_xlabel('Context Window')
ax.set_ylabel('Perplexity (lower is better)')
ax.set_xlim(args.xmin, args.xmax)
ax.set_ylim(args.ymin, args.ymax)
ax.legend(loc='upper right')
fig.savefig((args.csv + '.png'))
fig.savefig((args.csv + '.pdf'), transparent=True)
|
class LlamaConfig(PretrainedConfig):
'\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don\'t update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n\n Example:\n\n ```python\n >>> from transformers import LlamaModel, LlamaConfig\n\n >>> # Initializing a LLaMA llama-7b style configuration\n >>> configuration = LlamaConfig()\n\n >>> # Initializing a model from the llama-7b style configuration\n >>> model = LlamaModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```'
model_type = 'llama'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000, rope_scaling=None, attention_bias=False, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if (num_key_value_heads is None):
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.attention_bias = attention_bias
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _rope_scaling_validation(self):
'\n Validate the `rope_scaling` configuration.\n '
if (self.rope_scaling is None):
return
if (not isinstance(self.rope_scaling, dict)):
raise ValueError(f'`rope_scaling` must be a dictionary, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if ((rope_scaling_type is None) or (rope_scaling_type not in ['linear', 'dynamic', 'yarn', 'dynamic-yarn'])):
raise ValueError(f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'yarn', 'dynamic-yarn'], got {rope_scaling_type}")
if ((rope_scaling_factor is None) or (not isinstance(rope_scaling_factor, float)) or (rope_scaling_factor <= 1.0)):
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
if ((rope_scaling_type == 'yarn') or (rope_scaling_type == 'dynamic-yarn')):
original_max_position_embeddings = self.rope_scaling.get('original_max_position_embeddings', None)
if ((original_max_position_embeddings is None) or (not isinstance(original_max_position_embeddings, int))):
raise ValueError(f'`rope_scaling.original_max_position_embeddings` must be set to an int when using yarn, and dynamic-yarn')
|
class MistralConfig(PretrainedConfig):
'\n This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an\n Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.\n\n [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)\n [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`MistralModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 14336):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*, defaults to 8):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.\n hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to `4096*32`):\n The maximum sequence length that this model might ever be used with. Mistral\'s sliding window attention\n allows sequence of up to 4096*32 tokens.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n pad_token_id (`int`, *optional*):\n The id of the padding token.\n bos_token_id (`int`, *optional*, defaults to 1):\n The id of the "beginning-of-sequence" token.\n eos_token_id (`int`, *optional*, defaults to 2):\n The id of the "end-of-sequence" token.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether the model\'s input and output word embeddings should be tied.\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{"type": strategy name, "factor": scaling factor}`.\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n sliding_window (`int`, *optional*, defaults to 4096):\n Sliding window attention window size. If not specified, will default to `4096`.\n\n\n ```python\n >>> from transformers import MistralModel, MistralConfig\n\n >>> # Initializing a Mistral 7B style configuration\n >>> configuration = MistralConfig()\n\n >>> # Initializing a model from the Mistral 7B style configuration\n >>> model = MistralModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```'
model_type = 'mistral'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act='silu', max_position_embeddings=(4096 * 32), initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_scaling=None, rope_theta=10000.0, sliding_window=4096, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
if (num_key_value_heads is None):
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.rope_theta = rope_theta
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _rope_scaling_validation(self):
'\n Validate the `rope_scaling` configuration.\n '
if (self.rope_scaling is None):
return
if (not isinstance(self.rope_scaling, dict)):
raise ValueError(f'`rope_scaling` must be a dictionary, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if ((rope_scaling_type is None) or (rope_scaling_type not in ['linear', 'dynamic', 'yarn', 'dynamic-yarn'])):
raise ValueError(f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'yarn', 'dynamic-yarn'], got {rope_scaling_type}")
if ((rope_scaling_factor is None) or (not isinstance(rope_scaling_factor, float)) or (rope_scaling_factor <= 1.0)):
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
if ((rope_scaling_type == 'yarn') or (rope_scaling_type == 'dynamic-yarn')):
original_max_position_embeddings = self.rope_scaling.get('original_max_position_embeddings', None)
if ((original_max_position_embeddings is None) or (not isinstance(original_max_position_embeddings, int))):
raise ValueError(f'`rope_scaling.original_max_position_embeddings` must be set to an int when using yarn, and dynamic-yarn')
|
def patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk):
from .LlamaDynamicScaledRotaryEmbedding import LlamaDynamicScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicScaledRotaryEmbedding(each.self_attn.head_dim, device=each.self_attn.rotary_emb.inv_freq.device, ntk=ntk)
|
def patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, finetuned):
from .LlamaDynamicPartNTKScaledRotaryEmbedding import LlamaDynamicPartNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicPartNTKScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_llama_for_dynamic_yarn_rotary_embeddings(model, original_max_position_embeddings, finetuned):
from .LlamaDynamicYaRNScaledRotaryEmbedding import LlamaDynamicYaRNScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicYaRNScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, original_max_position_embeddings=original_max_position_embeddings, device=each.self_attn.rotary_emb.inv_freq.device)
|
def patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model):
from .FalconDynamicPartNTKScaledRotaryEmbedding import FalconDynamicPartNTKScaledRotaryEmbedding
for each in model.transformer.h:
each.self_attention.maybe_rotary = FalconDynamicPartNTKScaledRotaryEmbedding(each.self_attention.head_dim)
|
def patch_llama_for_ntk_scaled_rotary_embeddings(model, alpha):
from .LlamaNTKScaledRotaryEmbedding import LlamaNTKScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaNTKScaledRotaryEmbedding(each.self_attn.head_dim, alpha=alpha, device=each.self_attn.rotary_emb.inv_freq.device)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.