code stringlengths 17 6.64M |
|---|
def pad(x, width, val=0, batch_ndim=1):
'\n Pad a tensor with a constant value.\n\n Parameters\n ----------\n x : tensor\n\n width : int, iterable of int, or iterable of tuple\n Padding width. If an int, pads each axis symmetrically with the same\n amount in the beginning and end. If an iterable of int, defines the\n symmetric padding width separately for each axis. If an iterable of\n tuples of two ints, defines a seperate padding width for each beginning\n and end of each axis.\n\n val : float\n The constant value used for padding\n\n batch_ndim : integer\n Dimensions before the value will not be padded.\n\n '
input_shape = x.shape
input_ndim = x.ndim
output_shape = list(input_shape)
indices = [slice(None) for _ in output_shape]
if isinstance(width, int):
widths = ([width] * (input_ndim - batch_ndim))
else:
widths = width
for (k, w) in enumerate(widths):
try:
(l, r) = w
except TypeError:
l = r = w
output_shape[(k + batch_ndim)] += (l + r)
indices[(k + batch_ndim)] = slice(l, (l + input_shape[(k + batch_ndim)]))
if val:
out = (T.ones(output_shape) * val)
else:
out = T.zeros(output_shape)
return T.set_subtensor(out[tuple(indices)], x)
|
def get_or_compute_grads(loss_or_grads, params):
'Helper function returning a list of gradients\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to return the gradients for\n\n Returns\n -------\n list of expressions\n If `loss_or_grads` is a list, it is assumed to be a list of\n gradients and returned as is, unless it does not match the length\n of `params`, in which case a `ValueError` is raised.\n Otherwise, `loss_or_grads` is assumed to be a cost expression and\n the function returns `theano.grad(loss_or_grads, params)`.\n\n Raises\n ------\n ValueError\n If `loss_or_grads` is a list of a different length than `params`, or if\n any element of `params` is not a shared variable (while we could still\n compute its gradient, we can never update it and want to fail early).\n '
if any(((not isinstance(p, theano.compile.SharedVariable)) for p in params)):
raise ValueError('params must contain shared variables only. If it contains arbitrary parameter expressions, then lasagne.utils.collect_shared_vars() may help you.')
if isinstance(loss_or_grads, list):
if (not (len(loss_or_grads) == len(params))):
raise ValueError(('Got %d gradient expressions for %d parameters' % (len(loss_or_grads), len(params))))
return loss_or_grads
else:
return theano.grad(loss_or_grads, params)
|
def sgd(loss_or_grads, params, learning_rate):
'Stochastic Gradient Descent (SGD) updates\n\n Generates update expressions of the form:\n\n * ``param := param - learning_rate * gradient``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n '
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
updates[param] = (param - (learning_rate * grad))
return updates
|
def apply_momentum(updates, params=None, momentum=0.9):
'Returns a modified update dictionary including momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity + updates[param] - param``\n * ``param := param + velocity``\n\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n params : iterable of shared variables, optional\n The variables to apply momentum to. If omitted, will apply\n momentum to all `updates.keys()`.\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A copy of `updates` with momentum updates for all `params`.\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n See Also\n --------\n momentum : Shortcut applying momentum to SGD updates\n '
if (params is None):
params = updates.keys()
updates = OrderedDict(updates)
for param in params:
value = param.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
x = ((momentum * velocity) + updates[param])
updates[velocity] = (x - param)
updates[param] = x
return updates
|
def momentum(loss_or_grads, params, learning_rate, momentum=0.9):
"Stochastic Gradient Descent (SGD) updates with momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity - learning_rate * gradient``\n * ``param := param + velocity``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n See Also\n --------\n apply_momentum : Generic function applying momentum to updates\n nesterov_momentum : Nesterov's variant of SGD with momentum\n "
updates = sgd(loss_or_grads, params, learning_rate)
return apply_momentum(updates, momentum=momentum)
|
def apply_nesterov_momentum(updates, params=None, momentum=0.9):
'Returns a modified update dictionary including Nesterov momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity + updates[param] - param``\n * ``param := param + momentum * velocity + updates[param] - param``\n\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n params : iterable of shared variables, optional\n The variables to apply momentum to. If omitted, will apply\n momentum to all `updates.keys()`.\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A copy of `updates` with momentum updates for all `params`.\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n The classic formulation of Nesterov momentum (or Nesterov accelerated\n gradient) requires the gradient to be evaluated at the predicted next\n position in parameter space. Here, we use the formulation described at\n https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,\n which allows the gradient to be evaluated at the current parameters.\n\n See Also\n --------\n nesterov_momentum : Shortcut applying Nesterov momentum to SGD updates\n '
if (params is None):
params = updates.keys()
updates = OrderedDict(updates)
for param in params:
value = param.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
x = (((momentum * velocity) + updates[param]) - param)
updates[velocity] = x
updates[param] = ((momentum * x) + updates[param])
return updates
|
def nesterov_momentum(loss_or_grads, params, learning_rate, momentum=0.9):
'Stochastic Gradient Descent (SGD) updates with Nesterov momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity - learning_rate * gradient``\n * ``param := param + momentum * velocity - learning_rate * gradient``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n The classic formulation of Nesterov momentum (or Nesterov accelerated\n gradient) requires the gradient to be evaluated at the predicted next\n position in parameter space. Here, we use the formulation described at\n https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,\n which allows the gradient to be evaluated at the current parameters.\n\n See Also\n --------\n apply_nesterov_momentum : Function applying momentum to updates\n '
updates = sgd(loss_or_grads, params, learning_rate)
return apply_nesterov_momentum(updates, momentum=momentum)
|
def adagrad(loss_or_grads, params, learning_rate=1.0, epsilon=1e-06):
'Adagrad updates\n\n Scale learning rates by dividing with the square root of accumulated\n squared gradients. See [1]_ for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Using step size eta Adagrad calculates the learning rate for feature i at\n time step t as:\n\n .. math:: \\eta_{t,i} = \\frac{\\eta}\n {\\sqrt{\\sum^t_{t^\\prime} g^2_{t^\\prime,i}+\\epsilon}} g_{t,i}\n\n as such the learning rate is monotonically decreasing.\n\n Epsilon is not included in the typical formula, see [2]_.\n\n References\n ----------\n .. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):\n Adaptive subgradient methods for online learning and stochastic\n optimization. JMLR, 12:2121-2159.\n\n .. [2] Chris Dyer:\n Notes on AdaGrad. http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf\n '
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
accu_new = (accu + (grad ** 2))
updates[accu] = accu_new
updates[param] = (param - ((learning_rate * grad) / T.sqrt((accu_new + epsilon))))
return updates
|
def rmsprop(loss_or_grads, params, learning_rate=1.0, rho=0.9, epsilon=1e-06):
'RMSProp updates\n\n Scale learning rates by dividing with the moving average of the root mean\n squared (RMS) gradients. See [1]_ for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n rho : float or symbolic scalar\n Gradient moving average decay factor\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n `rho` should be between 0 and 1. A value of `rho` close to 1 will decay the\n moving average slowly and a value close to 0 will decay the moving average\n fast.\n\n Using the step size :math:`\\eta` and a decay factor :math:`\\rho` the\n learning rate :math:`\\eta_t` is calculated as:\n\n .. math::\n r_t &= \\rho r_{t-1} + (1-\\rho)*g^2\\\\\n \\eta_t &= \\frac{\\eta}{\\sqrt{r_t + \\epsilon}}\n\n References\n ----------\n .. [1] Tieleman, T. and Hinton, G. (2012):\n Neural Networks for Machine Learning, Lecture 6.5 - rmsprop.\n Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU (formula @5:20)\n '
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
accu_new = ((rho * accu) + ((1 - rho) * (grad ** 2)))
updates[accu] = accu_new
updates[param] = (param - ((learning_rate * grad) / T.sqrt((accu_new + epsilon))))
return updates
|
def adadelta(loss_or_grads, params, learning_rate=1.0, rho=0.95, epsilon=1e-06):
' Adadelta updates\n\n Scale learning rates by a the ratio of accumulated gradients to accumulated\n step sizes, see [1]_ and notes for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n rho : float or symbolic scalar\n Squared gradient moving average decay factor\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n rho should be between 0 and 1. A value of rho close to 1 will decay the\n moving average slowly and a value close to 0 will decay the moving average\n fast.\n\n rho = 0.95 and epsilon=1e-6 are suggested in the paper and reported to\n work for multiple datasets (MNIST, speech).\n\n In the paper, no learning rate is considered (so learning_rate=1.0).\n Probably best to keep it at this value.\n epsilon is important for the very first update (so the numerator does\n not become 0).\n\n Using the step size eta and a decay factor rho the learning rate is\n calculated as:\n\n .. math::\n r_t &= \\rho r_{t-1} + (1-\\rho)*g^2\\\\\n \\eta_t &= \\eta \\frac{\\sqrt{s_{t-1} + \\epsilon}}\n {\\sqrt{r_t + \\epsilon}}\\\\\n s_t &= \\rho s_{t-1} + (1-\\rho)*(\\eta_t*g)^2\n\n References\n ----------\n .. [1] Zeiler, M. D. (2012):\n ADADELTA: An Adaptive Learning Rate Method.\n arXiv Preprint arXiv:1212.5701.\n '
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
accu_new = ((rho * accu) + ((1 - rho) * (grad ** 2)))
updates[accu] = accu_new
update = ((grad * T.sqrt((delta_accu + epsilon))) / T.sqrt((accu_new + epsilon)))
updates[param] = (param - (learning_rate * update))
delta_accu_new = ((rho * delta_accu) + ((1 - rho) * (update ** 2)))
updates[delta_accu] = delta_accu_new
return updates
|
def adam(loss_or_grads, params, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08):
'Adam updates\n\n Adam updates implemented as in [1]_.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float\n Learning rate\n beta1 : float\n Exponential decay rate for the first moment estimates.\n beta2 : float\n Exponential decay rate for the second moment estimates.\n epsilon : float\n Constant for numerical stability.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n The paper [1]_ includes an additional hyperparameter lambda. This is only\n needed to prove convergence of the algorithm and has no practical use\n (personal communication with the authors), it is therefore omitted here.\n\n References\n ----------\n .. [1] Kingma, Diederik, and Jimmy Ba (2014):\n Adam: A Method for Stochastic Optimization.\n arXiv preprint arXiv:1412.6980.\n '
all_grads = get_or_compute_grads(loss_or_grads, params)
t_prev = theano.shared(utils.floatX(0.0))
updates = OrderedDict()
t = (t_prev + 1)
a_t = ((learning_rate * T.sqrt((1 - (beta2 ** t)))) / (1 - (beta1 ** t)))
for (param, g_t) in zip(params, all_grads):
value = param.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
v_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
m_t = ((beta1 * m_prev) + ((1 - beta1) * g_t))
v_t = ((beta2 * v_prev) + ((1 - beta2) * (g_t ** 2)))
step = ((a_t * m_t) / (T.sqrt(v_t) + epsilon))
updates[m_prev] = m_t
updates[v_prev] = v_t
updates[param] = (param - step)
updates[t_prev] = t
return updates
|
def adamax(loss_or_grads, params, learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08):
'Adamax updates\n\n Adamax updates implemented as in [1]_. This is a variant of of the Adam\n algorithm based on the infinity norm.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float\n Learning rate\n beta1 : float\n Exponential decay rate for the first moment estimates.\n beta2 : float\n Exponential decay rate for the weighted infinity norm estimates.\n epsilon : float\n Constant for numerical stability.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n References\n ----------\n .. [1] Kingma, Diederik, and Jimmy Ba (2014):\n Adam: A Method for Stochastic Optimization.\n arXiv preprint arXiv:1412.6980.\n '
all_grads = get_or_compute_grads(loss_or_grads, params)
t_prev = theano.shared(utils.floatX(0.0))
updates = OrderedDict()
t = (t_prev + 1)
a_t = (learning_rate / (1 - (beta1 ** t)))
for (param, g_t) in zip(params, all_grads):
value = param.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
u_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
m_t = ((beta1 * m_prev) + ((1 - beta1) * g_t))
u_t = T.maximum((beta2 * u_prev), abs(g_t))
step = ((a_t * m_t) / (u_t + epsilon))
updates[m_prev] = m_t
updates[u_prev] = u_t
updates[param] = (param - step)
updates[t_prev] = t
return updates
|
def norm_constraint(tensor_var, max_norm, norm_axes=None, epsilon=1e-07):
'Max weight norm constraints and gradient clipping\n\n This takes a TensorVariable and rescales it so that incoming weight\n norms are below a specified constraint value. Vectors violating the\n constraint are rescaled so that they are within the allowed range.\n\n Parameters\n ----------\n tensor_var : TensorVariable\n Theano expression for update, gradient, or other quantity.\n max_norm : scalar\n This value sets the maximum allowed value of any norm in\n `tensor_var`.\n norm_axes : sequence (list or tuple)\n The axes over which to compute the norm. This overrides the\n default norm axes defined for the number of dimensions\n in `tensor_var`. When this is not specified and `tensor_var` is a\n matrix (2D), this is set to `(0,)`. If `tensor_var` is a 3D, 4D or\n 5D tensor, it is set to a tuple listing all axes but axis 0. The\n former default is useful for working with dense layers, the latter\n is useful for 1D, 2D and 3D convolutional layers.\n (Optional)\n epsilon : scalar, optional\n Value used to prevent numerical instability when dividing by\n very small or zero norms.\n\n Returns\n -------\n TensorVariable\n Input `tensor_var` with rescaling applied to weight vectors\n that violate the specified constraints.\n\n Examples\n --------\n >>> param = theano.shared(\n ... np.random.randn(100, 200).astype(theano.config.floatX))\n >>> update = param + 100\n >>> update = norm_constraint(update, 10)\n >>> func = theano.function([], [], updates=[(param, update)])\n >>> # Apply constrained update\n >>> _ = func()\n >>> from lasagne.utils import compute_norms\n >>> norms = compute_norms(param.get_value())\n >>> np.isclose(np.max(norms), 10)\n True\n\n Notes\n -----\n When `norm_axes` is not specified, the axes over which the norm is\n computed depend on the dimensionality of the input variable. If it is\n 2D, it is assumed to come from a dense layer, and the norm is computed\n over axis 0. If it is 3D, 4D or 5D, it is assumed to come from a\n convolutional layer and the norm is computed over all trailing axes\n beyond axis 0. For other uses, you should explicitly specify the axes\n over which to compute the norm using `norm_axes`.\n '
ndim = tensor_var.ndim
if (norm_axes is not None):
sum_over = tuple(norm_axes)
elif (ndim == 2):
sum_over = (0,)
elif (ndim in [3, 4, 5]):
sum_over = tuple(range(1, ndim))
else:
raise ValueError('Unsupported tensor dimensionality {}.Must specify `norm_axes`'.format(ndim))
dtype = np.dtype(theano.config.floatX).type
norms = T.sqrt(T.sum(T.sqr(tensor_var), axis=sum_over, keepdims=True))
target_norms = T.clip(norms, 0, dtype(max_norm))
constrained_output = (tensor_var * (target_norms / (dtype(epsilon) + norms)))
return constrained_output
|
def total_norm_constraint(tensor_vars, max_norm, epsilon=1e-07, return_norm=False):
'Rescales a list of tensors based on their combined norm\n\n If the combined norm of the input tensors exceeds the threshold then all\n tensors are rescaled such that the combined norm is equal to the threshold.\n\n Scaling the norms of the gradients is often used when training recurrent\n neural networks [1]_.\n\n Parameters\n ----------\n tensor_vars : List of TensorVariables.\n Tensors to be rescaled.\n max_norm : float\n Threshold value for total norm.\n epsilon : scalar, optional\n Value used to prevent numerical instability when dividing by\n very small or zero norms.\n return_norm : bool\n If true the total norm is also returned.\n\n Returns\n -------\n tensor_vars_scaled : list of TensorVariables\n The scaled tensor variables.\n norm : Theano scalar\n The combined norms of the input variables prior to rescaling,\n only returned if ``return_norms=True``.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> import lasagne\n >>> from lasagne.updates import sgd, total_norm_constraint\n >>> x = T.matrix()\n >>> y = T.ivector()\n >>> l_in = InputLayer((5, 10))\n >>> l1 = DenseLayer(l_in, num_units=7, nonlinearity=T.nnet.softmax)\n >>> output = lasagne.layers.get_output(l1, x)\n >>> cost = T.mean(T.nnet.categorical_crossentropy(output, y))\n >>> all_params = lasagne.layers.get_all_params(l1)\n >>> all_grads = T.grad(cost, all_params)\n >>> scaled_grads = total_norm_constraint(all_grads, 5)\n >>> updates = sgd(scaled_grads, all_params, learning_rate=0.1)\n\n Notes\n -----\n The total norm can be used to monitor training.\n\n References\n ----------\n .. [1] Sutskever, I., Vinyals, O., & Le, Q. V. (2014): Sequence to sequence\n learning with neural networks. In Advances in Neural Information\n Processing Systems (pp. 3104-3112).\n '
norm = T.sqrt(sum((T.sum((tensor ** 2)) for tensor in tensor_vars)))
dtype = np.dtype(theano.config.floatX).type
target_norm = T.clip(norm, 0, dtype(max_norm))
multiplier = (target_norm / (dtype(epsilon) + norm))
tensor_vars_scaled = [(step * multiplier) for step in tensor_vars]
if return_norm:
return (tensor_vars_scaled, norm)
else:
return tensor_vars_scaled
|
def floatX(arr):
'Converts data to a numpy array of dtype ``theano.config.floatX``.\n\n Parameters\n ----------\n arr : array_like\n The data to be converted.\n\n Returns\n -------\n numpy ndarray\n The input array in the ``floatX`` dtype configured for Theano.\n If `arr` is an ndarray of correct dtype, it is returned as is.\n '
return np.asarray(arr, dtype=theano.config.floatX)
|
def shared_empty(dim=2, dtype=None):
'Creates empty Theano shared variable.\n\n Shortcut to create an empty Theano shared variable with\n the specified number of dimensions.\n\n Parameters\n ----------\n dim : int, optional\n The number of dimensions for the empty variable, defaults to 2.\n dtype : a numpy data-type, optional\n The desired dtype for the variable. Defaults to the Theano\n ``floatX`` dtype.\n\n Returns\n -------\n Theano shared variable\n An empty Theano shared variable of dtype ``dtype`` with\n `dim` dimensions.\n '
if (dtype is None):
dtype = theano.config.floatX
shp = tuple(([1] * dim))
return theano.shared(np.zeros(shp, dtype=dtype))
|
def as_theano_expression(input):
'Wrap as Theano expression.\n\n Wraps the given input as a Theano constant if it is not\n a valid Theano expression already. Useful to transparently\n handle numpy arrays and Python scalars, for example.\n\n Parameters\n ----------\n input : number, numpy array or Theano expression\n Expression to be converted to a Theano constant.\n\n Returns\n -------\n Theano symbolic constant\n Theano constant version of `input`.\n '
if isinstance(input, theano.gof.Variable):
return input
else:
try:
return theano.tensor.constant(input)
except Exception as e:
raise TypeError(('Input of type %s is not a Theano expression and cannot be wrapped as a Theano constant (original exception: %s)' % (type(input), e)))
|
def collect_shared_vars(expressions):
'Returns all shared variables the given expression(s) depend on.\n\n Parameters\n ----------\n expressions : Theano expression or iterable of Theano expressions\n The expressions to collect shared variables from.\n\n Returns\n -------\n list of Theano shared variables\n All shared variables the given expression(s) depend on, in fixed order\n (as found by a left-recursive depth-first search). If some expressions\n are shared variables themselves, they are included in the result.\n '
if isinstance(expressions, theano.Variable):
expressions = [expressions]
return [v for v in theano.gof.graph.inputs(reversed(expressions)) if isinstance(v, theano.compile.SharedVariable)]
|
def one_hot(x, m=None):
'One-hot representation of integer vector.\n\n Given a vector of integers from 0 to m-1, returns a matrix\n with a one-hot representation, where each row corresponds\n to an element of x.\n\n Parameters\n ----------\n x : integer vector\n The integer vector to convert to a one-hot representation.\n m : int, optional\n The number of different columns for the one-hot representation. This\n needs to be strictly greater than the maximum value of `x`.\n Defaults to ``max(x) + 1``.\n\n Returns\n -------\n Theano tensor variable\n A Theano tensor variable of shape (``n``, `m`), where ``n`` is the\n length of `x`, with the one-hot representation of `x`.\n\n Notes\n -----\n If your integer vector represents target class memberships, and you wish to\n compute the cross-entropy between predictions and the target class\n memberships, then there is no need to use this function, since the function\n :func:`lasagne.objectives.categorical_crossentropy()` can compute the\n cross-entropy from the integer vector directly.\n\n '
if (m is None):
m = T.cast((T.max(x) + 1), 'int32')
return T.eye(m)[T.cast(x, 'int32')]
|
def unique(l):
'Filters duplicates of iterable.\n\n Create a new list from l with duplicate entries removed,\n while preserving the original order.\n\n Parameters\n ----------\n l : iterable\n Input iterable to filter of duplicates.\n\n Returns\n -------\n list\n A list of elements of `l` without duplicates and in the same order.\n '
new_list = []
seen = set()
for el in l:
if (el not in seen):
new_list.append(el)
seen.add(el)
return new_list
|
def as_tuple(x, N, t=None):
'\n Coerce a value to a tuple of given length (and possibly given type).\n\n Parameters\n ----------\n x : value or iterable\n N : integer\n length of the desired tuple\n t : type, optional\n required type for all elements\n\n Returns\n -------\n tuple\n ``tuple(x)`` if `x` is iterable, ``(x,) * N`` otherwise.\n\n Raises\n ------\n TypeError\n if `type` is given and `x` or any of its elements do not match it\n ValueError\n if `x` is iterable, but does not have exactly `N` elements\n '
try:
X = tuple(x)
except TypeError:
X = ((x,) * N)
if ((t is not None) and (not all((isinstance(v, t) for v in X)))):
raise TypeError('expected a single value or an iterable of {0}, got {1} instead'.format(t.__name__, x))
if (len(X) != N):
raise ValueError('expected a single value or an iterable with length {0}, got {1} instead'.format(N, x))
return X
|
def compute_norms(array, norm_axes=None):
' Compute incoming weight vector norms.\n\n Parameters\n ----------\n array : ndarray\n Weight array.\n norm_axes : sequence (list or tuple)\n The axes over which to compute the norm. This overrides the\n default norm axes defined for the number of dimensions\n in `array`. When this is not specified and `array` is a 2D array,\n this is set to `(0,)`. If `array` is a 3D, 4D or 5D array, it is\n set to a tuple listing all axes but axis 0. The former default is\n useful for working with dense layers, the latter is useful for 1D,\n 2D and 3D convolutional layers.\n (Optional)\n\n Returns\n -------\n norms : 1D array\n 1D array of incoming weight vector norms.\n\n Examples\n --------\n >>> array = np.random.randn(100, 200)\n >>> norms = compute_norms(array)\n >>> norms.shape\n (200,)\n\n >>> norms = compute_norms(array, norm_axes=(1,))\n >>> norms.shape\n (100,)\n '
ndim = array.ndim
if (norm_axes is not None):
sum_over = tuple(norm_axes)
elif (ndim == 2):
sum_over = (0,)
elif (ndim in [3, 4, 5]):
sum_over = tuple(range(1, ndim))
else:
raise ValueError('Unsupported tensor dimensionality {}.Must specify `norm_axes`'.format(array.ndim))
norms = np.sqrt(np.sum((array ** 2), axis=sum_over))
return norms
|
def create_param(spec, shape, name=None):
'\n Helper method to create Theano shared variables for layer parameters\n and to initialize them.\n\n Parameters\n ----------\n spec : numpy array, Theano expression, or callable\n Either of the following:\n\n * a numpy array with the initial parameter values\n * a Theano expression or shared variable representing the parameters\n * a function or callable that takes the desired shape of\n the parameter array as its single argument and returns\n a numpy array.\n\n shape : iterable of int\n a tuple or other iterable of integers representing the desired\n shape of the parameter array.\n\n name : string, optional\n If a new variable is created, the name to give to the parameter\n variable. This is ignored if `spec` is already a Theano expression\n or shared variable.\n\n Returns\n -------\n Theano shared variable or Theano expression\n A Theano shared variable or expression representing layer parameters.\n If a numpy array was provided, a shared variable is initialized to\n contain this array. If a shared variable or expression was provided,\n it is simply returned. If a callable was provided, it is called, and\n its output is used to initialize a shared variable.\n\n Notes\n -----\n This function is called by :meth:`Layer.add_param()` in the constructor\n of most :class:`Layer` subclasses. This enables those layers to\n support initialization with numpy arrays, existing Theano shared variables\n or expressions, and callables for generating initial parameter values.\n '
shape = tuple(shape)
if any(((d <= 0) for d in shape)):
raise ValueError(('Cannot create param with a non-positive shape dimension. Tried to create param with shape=%r, name=%r' % (shape, name)))
if isinstance(spec, theano.Variable):
if (spec.ndim != len(shape)):
raise RuntimeError(('parameter variable has %d dimensions, should be %d' % (spec.ndim, len(shape))))
return spec
elif isinstance(spec, np.ndarray):
if (spec.shape != shape):
raise RuntimeError(('parameter array has shape %s, should be %s' % (spec.shape, shape)))
return theano.shared(spec, name=name)
elif hasattr(spec, '__call__'):
arr = spec(shape)
try:
arr = floatX(arr)
except Exception:
raise RuntimeError('cannot initialize parameters: the provided callable did not return an array-like value')
if (arr.shape != shape):
raise RuntimeError('cannot initialize parameters: the provided callable did not return a value with the correct shape')
return theano.shared(arr, name=name)
else:
raise RuntimeError("cannot initialize parameters: 'spec' is not a numpy array, a Theano expression, or a callable")
|
def unroll_scan(fn, sequences, outputs_info, non_sequences, n_steps, go_backwards=False):
'\n Helper function to unroll for loops. Can be used to unroll theano.scan.\n The parameter names are identical to theano.scan, please refer to here\n for more information.\n\n Note that this function does not support the truncate_gradient\n setting from theano.scan.\n\n Parameters\n ----------\n\n fn : function\n Function that defines calculations at each step.\n\n sequences : TensorVariable or list of TensorVariables\n List of TensorVariable with sequence data. The function iterates\n over the first dimension of each TensorVariable.\n\n outputs_info : list of TensorVariables\n List of tensors specifying the initial values for each recurrent\n value.\n\n non_sequences: list of TensorVariables\n List of theano.shared variables that are used in the step function.\n\n n_steps: int\n Number of steps to unroll.\n\n go_backwards: bool\n If true the recursion starts at sequences[-1] and iterates\n backwards.\n\n Returns\n -------\n List of TensorVariables. Each element in the list gives the recurrent\n values at each time step.\n\n '
if (not isinstance(sequences, (list, tuple))):
sequences = [sequences]
counter = range(n_steps)
if go_backwards:
counter = counter[::(- 1)]
output = []
prev_vals = outputs_info
for i in counter:
step_input = (([s[i] for s in sequences] + prev_vals) + non_sequences)
out_ = fn(*step_input)
if isinstance(out_, T.TensorVariable):
out_ = [out_]
if isinstance(out_, tuple):
out_ = list(out_)
output.append(out_)
prev_vals = output[(- 1)]
output_scan = []
for i in range(len(output[0])):
l = map((lambda x: x[i]), output)
output_scan.append(T.stack(*l))
return output_scan
|
class CustomInstall(install):
def run(self):
install.run(self)
os.system('pip3 install -r requirements.txt --ignore-installed')
os.system('pip3 uninstall transformers -y')
os.system('pip install git+https://github.com/jordiclive/transformers.git@controlprefixes --ignore-installed')
os.system('pip3 install torchtext==0.8.0 torch==1.7.1')
|
def count_trainable_parameters(model):
model_parameters = filter((lambda p: p.requires_grad), model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
|
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f'lr_group_{i}': param['lr'] for (i, param) in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****')
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for (k, v) in metrics.items() if (k not in ['log', 'progress_bar', 'preds'])})
od = Path(pl_module.hparams.output_dir)
if (type_path == 'test'):
results_file = (od / 'test_results.txt')
generations_file = (od / 'test_generations.txt')
else:
results_file = (od / f'{type_path}_results/{trainer.global_step:05d}.txt')
generations_file = (od / f'{type_path}_generations/{trainer.global_step:05d}.txt')
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, 'a+') as writer:
for key in sorted(metrics):
if (key in ['log', 'progress_bar', 'preds']):
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f'''{key}: {val:.6f}
'''
writer.write(msg)
if (not save_generations):
return
if ('preds' in metrics):
content = '\n'.join(metrics['preds'])
generations_file.open('w+').write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
trainer.logger.log_metrics({'n_params': npars, 'mp': (npars / 1000000.0), 'grad_mp': (n_trainable_pars / 1000000.0)})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, 'test')
@rank_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path)
|
def bespoke_scheduler(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after\n a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
def lr_lambda2(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float(((num_training_steps * 3) - current_step)) / float(max(1, ((num_training_steps * 3) - num_warmup_steps)))))
return LambdaLR(optimizer, [lr_lambda, lr_lambda2], last_epoch)
|
class PrefixModule(PrefixTransformer):
mode = 'datatotext'
loss_names = ['loss']
metric_names = ['sacrebleu']
default_val_metric = 'bleu'
def __init__(self, hparams, **kwargs):
if (hparams.sortish_sampler and (hparams.gpus > 1)):
hparams.replace_sampler_ddp = False
elif (hparams.max_tokens_per_batch is not None):
if (hparams.gpus > 1):
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, 'datatotext')
self.metrics_save_path = (Path(self.output_dir) / 'metrics.json')
self.hparams_save_path = (Path(self.output_dir) / 'hparams.pkl')
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = (self.config.tgt_vocab_size if (self.model_type == 'fsmt') else self.config.vocab_size)
if self.hparams.T5_preamble:
p = 'translate Graph to English:'
else:
p = None
self.dataset_kwargs: dict = dict(data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=p)
n_observations_per_split = {'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test_seen': self.hparams.n_test, 'test_unseen': self.hparams.n_test, 'test_both': self.hparams.n_test}
self.n_obs = {k: (v if (v >= 0) else None) for (k, v) in n_observations_per_split.items()}
self.target_lens = {'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, 'test_seen': self.hparams.test_max_target_length, 'test_unseen': self.hparams.test_max_target_length, 'test_both': self.hparams.test_max_target_length}
assert (self.target_lens['train'] <= self.target_lens['val']), f'target_lens: {self.target_lens}'
assert (self.target_lens['train'] <= self.target_lens['test']), f'target_lens: {self.target_lens}'
freeze_params(self.seq2seq_model)
assert_all_frozen(self.seq2seq_model)
if self.hparams.new_tokens:
self.seq2seq_model.resize_token_embeddings(len(self.tokenizer))
make_new_embeddings_learnable(self.seq2seq_model, len(self.tokenizer), self.new_token_len)
freeze_params(self.seq2seq_model)
assert_all_frozen(self.seq2seq_model)
self.seq2seq_model.shared.trainable_weight.requires_grad = True
if self.hparams.freeze_base:
rank_zero_info('Freezing Base')
freeze_prefix(self.model)
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None
self.dataset_class = (Seq2SeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else None)
if self.hparams.DART:
self.dataset_class = (Seq2SeqDatasetSingle if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else None)
self.eval_beams = (self.model.config.num_beams if (self.hparams.eval_beams is None) else self.hparams.eval_beams)
assert (self.eval_beams >= 1), f'got self.eval_beams={self.eval_beams}. Need an integer > 1'
if (self.hparams.eval_max_gen_length is not None):
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = (self.default_val_metric if (self.hparams.val_metric is None) else self.hparams.val_metric)
self.training_acc_across_batches_at_curr_epoch = []
self.eval_min_length = self.hparams.eval_min_length
rank_zero_info('for decoding, eval_max_length={}, eval_min_length={}, eval_beams={}'.format(self.eval_max_length, self.eval_min_length, self.eval_beams))
if self.hparams.restart_with_embed:
self.seq2seq_model.shared.trainable_weight = self.model.es.trainable_weight
self.seq2seq_model.encoder.embed_tokens.trainable_weight = self.model.es.trainable_weight
def freeze_embeds(self):
'Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.'
if (self.model_type == 't5'):
freeze_params(self.model.shared)
for d in [self.model.encoder, self.model.decoder]:
freeze_params(d.embed_tokens)
elif (self.model_type == 'fsmt'):
for d in [self.model.model.encoder, self.model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
else:
freeze_params(self.model.model.shared)
for d in [self.model.model.encoder, self.model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
def forward(self, input_ids, **kwargs):
return self.model(input_ids, frozen_model=self.seq2seq_model, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
(src_ids, src_mask) = (batch['input_ids'], batch['attention_mask'])
tgt_ids = batch['labels']
decoder_input_ids = self.seq2seq_model._shift_right(tgt_ids)
if self.hparams.DART:
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False, use_prefix=True, conditional_info={'sources': batch['sources']})
else:
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False, use_prefix=True, conditional_info={'cats': batch['cats'], 'sources': batch['sources']})
lm_logits = outputs[0]
if (self.hparams.label_smoothing == 0):
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
loss = ce_loss_fct(lm_logits.view((- 1), lm_logits.shape[(- 1)]), tgt_ids.view((- 1)))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=(- 1))
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id)
return (loss,)
@property
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
if (batch_idx == 0):
if self.hparams.DART:
print(batch['sources'])
else:
print(batch['cats'])
print(batch['sources'])
print('Trainable', self.seq2seq_model.encoder.embed_tokens.trainable_weight)
rank_zero_info(f'step {self.step_count}')
loss_tensors = self._step(batch)
logs = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
logs['tpb'] = (batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum())
logs['bs'] = batch['input_ids'].shape[0]
logs['src_pad_tok'] = batch['input_ids'].eq(self.pad).sum()
logs['src_pad_frac'] = batch['input_ids'].eq(self.pad).float().mean()
self.training_acc_across_batches_at_curr_epoch.append(loss_tensors[0].item())
self.log_dict(logs)
loss = loss_tensors[0]
return {'loss': loss}
def on_epoch_end(self):
train_acc_mean = np.mean(self.training_acc_across_batches_at_curr_epoch)
self.log_dict({'train_loss': train_acc_mean})
rank_zero_info('train_loss = {}'.format(train_acc_mean))
self.training_acc_across_batches_per_epoch = []
def validation_step(self, batch, batch_idx) -> Dict:
if self.hparams.hf_checkpoint:
print(self.model.es.trainable_weight)
print('SEQ', self.seq2seq_model.shared.trainable_weight)
self.model.es.trainable_weight = self.seq2seq_model.shared.trainable_weight
rank_zero_info(f'Prefix_stored_weight {self.model.es.trainable_weight}')
save_path = Path(self.hparams.save_hf)
save_path = save_path.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
rank_zero_info('SAVING TO checkpoint {}'.format(save_path))
raise ValueError('just_saving')
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix='val') -> Dict:
self.step_count += 1
val_outputs_folder = 'val_outputs'
os.system(('mkdir -p ' + os.path.join(self.hparams.output_dir, val_outputs_folder)))
if (prefix == 'val'):
output_test_predictions_file = os.path.join(self.hparams.output_dir, val_outputs_folder, (('validation_predictions_' + str(self.step_count)) + '.txt'))
output_test_targets_file = os.path.join(self.hparams.output_dir, val_outputs_folder, (('validation_targets_' + str(self.step_count)) + '.txt'))
output_no_process = os.path.join(self.hparams.output_dir, val_outputs_folder, (('output_no_proceess_' + str(self.step_count)) + '.txt'))
with open(output_test_predictions_file, 'w') as p_writer, open(output_test_targets_file, 'w') as t_writer, open(output_no_process, 'w') as v_writer:
for output_batch in outputs:
p_writer.writelines(((convert_text(s) + '\n') for s in output_batch['preds']))
t_writer.writelines(((convert_text(s) + '\n') for s in output_batch['target']))
v_writer.writelines(((s + '\n') for s in output_batch['preds']))
p_writer.close()
t_writer.close()
v_writer.close()
bleu_info = eval_bleu(self.hparams.data_dir, output_test_predictions_file, 'val')
rank_zero_info(f'%s bleu_info: %s {self.step_count} {bleu_info}')
if (bleu_info == (- 1)):
bleu_info = float(bleu_info)
else:
bleu_info = float(bleu_info.split(',')[0].split('BLEU = ')[1])
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in outputs]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
generative_metrics['bleu'] = bleu_info
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
self.log('bleu', bleu_info)
self.log('VAL_LOSS', loss)
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
preds = flatten_list([x['preds'] for x in outputs])
if (prefix == 'val'):
self.log_dict({'log': all_metrics, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor})
return {'bleu': bleu_info, 'log': all_metrics, 'preds': preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor}
else:
data_logs = {}
for output in outputs:
dataset_idx = output[0]['dataloader_idx']
if (dataset_idx == 0):
dataset_name = 'test_both'
elif (dataset_idx == 1):
dataset_name = 'test_seen'
else:
dataset_name = 'test_unseen'
if (output[0]['bleu'] == (- 1)):
bleu_info = float(output[0]['bleu'])
else:
bleu_info = float(output[0]['bleu'].split(',')[0].split('BLEU = ')[1])
losses = {k: torch.stack([x[k] for x in output]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in output]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
generative_metrics['bleu'] = bleu_info
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
preds = flatten_list([x['preds'] for x in output])
data_logs.update({(('log' + '_') + dataset_name): all_metrics, (('preds' + '_') + dataset_name): preds, ((f'{prefix}_loss' + '_') + dataset_name): loss, ((f'{prefix}_{self.val_metric}' + '_') + dataset_name): metric_tensor})
return data_logs
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_bleu(preds, target)
def _generative_step(self, batch: dict, batch_idx=None, dataloader_idx=None) -> dict:
t0 = time.time()
bsz = batch['input_ids'].size(0)
if self.hparams.DART:
prefix_prompt = self.model.get_prompt(bsz=bsz, sample_size=self.eval_beams, conditional_info={'sources': batch['sources']})
else:
prefix_prompt = self.model.get_prompt(bsz=bsz, sample_size=self.eval_beams, conditional_info={'cats': batch['cats'], 'sources': batch['sources']})
generated_ids = self.seq2seq_model.generate(batch['input_ids'], past_key_values=prefix_prompt, attention_mask=batch['attention_mask'], use_cache=True, length_penalty=self.hparams.length_penalty, use_prefix=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, min_length=self.eval_min_length, max_length=self.eval_max_length)
gen_time = ((time.time() - t0) / batch['input_ids'].shape[0])
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch['labels'])
loss_tensors = self._step(batch)
base_metrics = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
if (dataloader_idx is not None):
base_metrics.update(batch_idx=batch_idx, dataloader_idx=dataloader_idx)
return base_metrics
def test_step(self, batch, batch_idx, dataloader_idx):
if (batch_idx == 0):
rank_zero_info(f'Trainable {self.seq2seq_model.shared.trainable_weight}')
return self._generative_step(batch, batch_idx, dataloader_idx)
def test_epoch_end(self, outputs_all_testsets):
pickle_save(outputs_all_testsets, 'outputs_all_testsets.pkl')
val_outputs_folder = 'val_outputs'
os.system(('mkdir -p ' + os.path.join(self.hparams.output_dir, val_outputs_folder)))
for outputs in outputs_all_testsets:
dataset_idx = outputs[0]['dataloader_idx']
if (dataset_idx == 0):
file_name = 'test_both_predictions.txt'
file_name_tgt = 'test_both_targets.txt'
dataset_name = 'test_both'
elif (dataset_idx == 1):
file_name = 'test_seen_predictions.txt'
file_name_tgt = 'test_seen_targets.txt'
dataset_name = 'test_seen'
else:
file_name = 'test_unseen_predictions.txt'
file_name_tgt = 'test_unseen_targets.txt'
dataset_name = 'test_unseen'
file_name += '.debug'
file_name_tgt += '.debug'
output_test_predictions_file = os.path.join(self.hparams.output_dir, val_outputs_folder, file_name)
output_test_targets_file = os.path.join(self.hparams.output_dir, val_outputs_folder, file_name_tgt)
output_no_process = os.path.join(self.hparams.output_dir, val_outputs_folder, (file_name + 'output_no_process'))
with open(output_test_predictions_file, 'w') as p_writer, open(output_test_targets_file, 'w') as t_writer, open(output_no_process, 'w') as v_writer:
for output_batch in outputs:
p_writer.writelines(((convert_text(s) + '\n') for s in output_batch['preds']))
t_writer.writelines(((convert_text(s) + '\n') for s in output_batch['target']))
v_writer.writelines(((s + '\n') for s in output_batch['preds']))
p_writer.close()
t_writer.close()
v_writer.close()
bleu_info = eval_bleu(self.hparams.data_dir, output_test_predictions_file, dataset_name)
meteor_info = eval_meteor_test_webnlg(self.hparams.data_dir, output_test_predictions_file, dataset_name)
chrf_info = eval_chrf_test_webnlg(self.hparams.data_dir, output_test_predictions_file, dataset_name)
print(f' %s - bleu_info: %s', dataset_name, bleu_info)
print(f' %s - meteor_info: %s', dataset_name, meteor_info)
print(f' %s - chrf_info: %s', dataset_name, chrf_info)
outputs[0]['bleu'] = bleu_info
return self.validation_epoch_end(outputs_all_testsets, prefix='test')
def get_dataset(self, type_path):
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False) -> DataLoader:
dataset = self.get_dataset(type_path)
if (self.hparams.sortish_sampler and (type_path != 'test')):
sampler = dataset.make_sortish_sampler(batch_size, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler)
elif ((self.hparams.max_tokens_per_batch is not None) and (type_path != 'test')):
batch_sampler = dataset.make_dynamic_sampler(self.hparams.max_tokens_per_batch, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, num_workers=self.num_workers)
else:
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None)
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> List[DataLoader]:
test_dataloader = self.get_dataloader('test_both', batch_size=self.hparams.eval_batch_size)
if self.hparams.DART:
return [test_dataloader]
test_seen_dataloader = self.get_dataloader('test_seen', batch_size=self.hparams.eval_batch_size)
test_unseen_dataloader = self.get_dataloader('test_unseen', batch_size=self.hparams.eval_batch_size)
return [test_dataloader, test_seen_dataloader, test_unseen_dataloader]
@staticmethod
def add_model_specific_args(parser, root_dir):
PrefixTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument('--max_source_length', default=512, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_dir', default='', type=str, help='The directory for validation')
parser.add_argument('--skip_train', type=bool, default=False)
parser.add_argument('--val_max_target_length', default=60, type=int, help='The maximum total validation target length specified foor generation')
parser.add_argument('--test_max_target_length', default=100, type=int, help='The maximum total test target length specified for generation')
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=False)
parser.add_argument('--max_tokens_per_batch', type=int, default=None)
parser.add_argument('--logger_name', type=str, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--task_mode', type=str, default='datatotext', required=False, help='if different tasks.')
parser.add_argument('--label_smoothing', type=float, default=0.0, required=False)
parser.add_argument('--src_lang', type=str, default='', required=False)
parser.add_argument('--save_hf', type=str, default='', required=False)
parser.add_argument('--tgt_lang', type=str, default='', required=False)
parser.add_argument('--eval_beams', type=int, default=6, required=False)
parser.add_argument('--eval_min_length', type=int, default=10, required=False)
parser.add_argument('--skip_val', type=bool, default=False, required=False)
parser.add_argument('--val_metric', type=str, default=None, required=False)
parser.add_argument('--eval_max_gen_length', type=int, default=60, help='never generate more than n tokens')
parser.add_argument('--length_penalty', type=float, default=1.0, help='length penalty specified for beam search')
parser.add_argument('--save_top_k', type=int, default=1, required=False, help='How many checkpoints to save')
parser.add_argument('--wb_project', type=str, default='', help='wandb project name')
parser.add_argument('--git', type=bool, default=True)
parser.add_argument('--dev', type=bool, default=False)
parser.add_argument('--freeze_base', type=bool, default=False)
parser.add_argument('--wb_name', type=str, default='', help='wandb run name')
parser.add_argument('--wb_entity', type=str, default='', help='wandb entity')
parser.add_argument('--id', type=str, default='wand id if continuing a run')
parser.add_argument('--DART', default=False, type=bool, help='if running on DART dataseet rather than webnlg, only one testloader required')
parser.add_argument('--early_stopping_patience', type=int, default=(- 1), required=False, help='-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.')
parser.add_argument('--T5_preamble', type=bool, default=False, required=False, help='Add the T5 preamble e.g. Translate graph to text to every input')
parser.add_argument('--restart_with_embed', type=bool, default=False, required=False, help='Set to true if working with special tokens, these methods are fixed LM so the embedding matrix is frozen, bar some special tokens, e.g. <H>, <R> , <T>. ImportantIf continuing a checkpoint, ')
return parser
|
def eval(args, model=None):
if (model is None):
if ('datatotext' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixModule(args)
rank_zero_info('the length penalty is {}'.format(args.length_penalty))
with torch.no_grad():
model.eval()
model = model.cuda()
data_loader = model.test_dataloader()
rank_zero_info('DATALOADER_LEN', len(data_loader))
out_lst = []
for (batch_idx, batch) in enumerate(data_loader):
batch = model.transfer_batch_to_device(batch, model.device)
out = model.test_step(batch, batch_idx)
out_lst.append(out)
if ((batch_idx % 50) == 0):
rank_zero_info(model.test_epoch_end(out_lst))
rank_zero_info(out['preds'])
result = model.test_epoch_end(out_lst)
for (k, v) in result.items():
if (k != 'preds'):
rank_zero_info('FINAL_RESULTS')
rank_zero_info(k, v)
out_path = os.path.join(args.output_dir, 'test_beam_{}'.format(args.length_penalty))
rank_zero_info(f'writing the test results to {out_path}')
with open(out_path, 'w') as f:
for preds in result['preds']:
rank_zero_info(preds, file=f)
|
def main(args, model=None):
Path(args.output_dir).mkdir(exist_ok=True)
if (model is None):
if ('datatotext' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixModule(args)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
if ((args.logger_name == 'default') or args.fast_dev_run or str(args.output_dir).startswith('/tmp') or str(args.output_dir).startswith('/var')):
logger = True
elif (args.logger_name == 'wandb'):
from pytorch_lightning.loggers import WandbLogger
if (args.id is not None):
id_ = args.id
else:
id_ = wandb.util.generate_id()
rank_zero_info(f'ID {id_}')
logger = WandbLogger(id=id_, name=args.wb_name, project=args.wb_project, entity=args.wb_entity)
if args.skip_train:
print('ES', model.model.es.trainable_weight)
print('Seq', model.seq2seq_model.shared.trainable_weight)
model.seq2seq_model.shared.trainable_weight = model.model.es.trainable_weight
trainer = pl.Trainer(gpus=1, precision=32)
trainer.test(model)
print('ES', model.model.es.trainable_weight)
return model
trainer: pl.Trainer = generic_train(model, args, logging_callback=Seq2SeqLoggingCallback(), logger=logger)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
if (not args.do_predict):
return model
if (args.test_checkpoint is not None):
checkpoints = [args.test_checkpoint]
model.hparams.test_checkpoint = checkpoints[(- 1)]
trainer.resume_from_checkpoint = checkpoints[(- 1)]
if (args.do_predict and args.skip_train):
checkpoint = checkpoints[(- 1)]
rank_zero_info(checkpoint)
trainer.test(model, ckpt_path=checkpoint)
return model
trainer.test()
return model
|
class PrefixTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, config=None, tokenizer=None, seq2seq_model=None, **config_kwargs):
'Initialize a model, tokenizer and config.'
super().__init__()
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = (self.hparams.cache_dir if self.hparams.cache_dir else None)
rank_zero_info('the cache dir is {}'.format(cache_dir))
if (config is None):
self.config = AutoConfig.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), **({'num_labels': num_labels} if (num_labels is not None) else {}), cache_dir=cache_dir, **config_kwargs)
else:
self.config: PretrainedConfig = config
extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if (tokenizer is None):
self.tokenizer = AutoTokenizer.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), cache_dir=cache_dir)
if self.hparams.new_tokens:
new_tokens = ['<H>', '<R>', '<T>']
if self.hparams.control_token_DART:
new_tokens.extend(['<e2e>', '<webnlg_old>', '<WikiTableQuestions_lily>', '<WikiSQL_decl_sents>', '<WikiTableQuestions_mturk>', '<WikiSQL_lily>'])
self.new_token_len = len(new_tokens)
new_tokens_vocab = {}
new_tokens_vocab['additional_special_tokens'] = []
for (idx, t) in enumerate(new_tokens):
new_tokens_vocab['additional_special_tokens'].append(t)
num_added_toks = self.tokenizer.add_special_tokens(new_tokens_vocab)
rank_zero_info('We have added %s tokens', num_added_toks)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.config.preseqlen = self.hparams.preseqlen
if self.hparams.control_prefixes:
if self.hparams.DART:
self.config.preseqlen += self.hparams.m_prefix_len
else:
self.config.preseqlen += (self.hparams.m_prefix_len * 2)
self.config.use_prefix = True
self.seq2seq_model_type = AutoModel
if (seq2seq_model is None):
self.seq2seq_model = T5ForConditionalGeneration.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=self.config, cache_dir=cache_dir)
else:
self.seq2seq_model = seq2seq_model
self.seq2seq_model.resize_token_embeddings(len(self.tokenizer))
config_prefix = AutoConfig.from_pretrained(self.hparams.model_name_or_path, cache_dir=cache_dir)
self.model_type = config_prefix.model_type
if (self.hparams.optim_prefix == 'yes'):
optim_prefix_bool = True
elif (self.hparams.optim_prefix == 'no'):
optim_prefix_bool = False
else:
assert False, 'model_args.optim_prefix should be either yes or no'
rank_zero_info(self.model_type)
config_prefix._my_arg_tune_mode = self.hparams.tuning_mode
config_prefix._my_arg_task_mode = self.hparams.task_mode
config_prefix._my_arg_control = True
config_prefix.train_weights = False
config_prefix.optim_prefix = optim_prefix_bool
config_prefix.preseqlen = self.hparams.preseqlen
config_prefix.use_infix = (self.hparams.format_mode == 'infix')
config_prefix.format_mode = self.hparams.format_mode
config_prefix.prefix_dropout = self.hparams.prefix_dropout
config_prefix.vocab_size = len(self.tokenizer)
config_prefix.DART = self.hparams.DART
config_prefix.lowdata = ('lowdata' in self.hparams.output_dir)
if (config_prefix.lowdata and (self.hparams.use_lowdata_token == 'yes')):
config_prefix.lowdata_token = self.tokenizer([self.hparams.lowdata_token], add_prefix_space=True)['input_ids']
rank_zero_info(self.hparams.lowdata_token)
rank_zero_info(config_prefix.lowdata_token)
rank_zero_info(self.tokenizer.pad_token_id)
config_prefix.mid_dim = self.hparams.mid_dim
config_prefix.new_token_len = self.new_token_len
if self.hparams.control_prefixes:
config_prefix.m_prefix_mid_dim = self.hparams.m_prefix_mid_dim
config_prefix.m_prefix_len = self.hparams.m_prefix_len
if self.hparams.unseen:
config_prefix.unseen = True
if (self.hparams.prefixModel_name_or_path is not None):
rank_zero_info('loading from {}'.format(hparams.prefixModel_name_or_path))
self.model = ControlPrefixes.from_pretrained(self.hparams.prefixModel_name_or_path, from_tf=bool(('.ckpt' in self.hparams.prefixModel_name_or_path)), cache_dir=cache_dir, config=config_prefix)
else:
self.model = ControlPrefixes(config_prefix)
def load_hf_checkpoint(self, *args, **kwargs):
assert False, 'why need to load model here?'
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps)
rank_zero_info(f'warm up {self.hparams.warmup_steps}')
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def configure_optimizers(self):
'Prepare optimizer and schedule (linear warmup and decay)'
if self.hparams.different_scheduler:
cefr_params = [p for (n, p) in self.named_parameters() if any(((nd in n) for nd in ['CEFR_matrices.wte']))]
no_cefr_params = [p for (n, p) in self.named_parameters() if (not any(((nd in n) for nd in ['CEFR_matrices.wte'])))]
optimizer_grouped_parameters = [{'params': no_cefr_params, 'weight_decay': self.hparams.weight_decay}, {'params': cefr_params, 'weight_decay': self.hparams.weight_decay}]
if self.hparams.adafactor:
optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = bespoke_scheduler(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps)
rank_zero_info(f'warm up {self.hparams.warmup_steps}')
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return ([optimizer], [scheduler])
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
'The number of total training steps that will be run. Used for lr scheduler purposes.'
num_devices = max(1, self.hparams.gpus)
if (self.hparams.original_batch_size is not None):
effective_batch_size = ((self.hparams.original_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
else:
effective_batch_size = ((self.hparams.train_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
dataset_size = len(self.train_loader.dataset)
return ((dataset_size / effective_batch_size) * self.hparams.max_epochs)
def setup(self, mode):
if (mode == 'fit'):
self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError('You must implement this for your task')
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length)))
@pl.utilities.rank_zero_only
def save_checkpoint(self, trainer) -> None:
rank_zero_info('Saving the the checkpoint.')
return
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[(str, Any)], filepath=None) -> None:
rank_zero_info('SEQ', self.seq2seq_model.shared.trainable_weight)
self.model.es.trainable_weight = self.seq2seq_model.shared.trainable_weight
rank_zero_info('Prefix_stored_weight', self.model.es.trainable_weight)
save_path = self.output_dir.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
rank_zero_info('SAVING TO checkpoint {}'.format(save_path))
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument('--model_name_or_path', default='t5-large', type=str, required=False, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--prefixModel_name_or_path', default=None, type=str, help='Path to pretrained prefix model or model identifier from huggingface.co/models')
parser.add_argument('--prefix_mode', default='activation', type=str, help='embedding or activation')
parser.add_argument('--preseqlen', default=200, type=int, help='the length of the prefix.')
parser.add_argument('--optim_prefix', default='yes', type=str, help='use the task specific optimization of the prefix.')
parser.add_argument('--different_scheduler', default=False, type=bool, help='use a different lr scheduler for control prefixes and main prefix')
parser.add_argument('--tuning_mode', default='prefixtune', type=str, help='Could be prefixtune or finetune')
parser.add_argument('--prefix_dropout', default=0.0, type=float, help='the dropout rate for our prefix model.')
parser.add_argument('--use_dropout', default='no', type=str, help='whether to dropout the main model during training. ')
parser.add_argument('--mid_dim', default=800, type=int, help='the dimension of the intermediate layer of themain prefix reparameterization')
parser.add_argument('--m_prefix_mid_dim', default=512, type=int, help='the dimension of the intermediate layer of the control prefix reparameterizations')
parser.add_argument('--m_prefix_len', default=1, type=int, help='the control prefix length')
parser.add_argument('--unseen', default=False, type=bool, help='Initializing a control prefix for unseen categories to zero')
parser.add_argument('--format_mode', default='cat', type=str, help='whether to look at the input again, including [infix, cat, peek, nopeek]')
parser.add_argument('--use_lowdata_token', default='yes', type=str, help='whether or not to use the lowdata token, ')
parser.add_argument('--lowdata_token', default='summarize', type=str, help='the low data token to use. ')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='/content/gdrive/MyDrive/cache_dir', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config')
parser.add_argument('--control_prefixes', type=bool, default=False, help='if using control prefixes')
parser.add_argument('--new_tokens', type=bool, default=False, help='if using demarcation tokens <H>, <R>, <T> that need to be learnable')
parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The target learning rate.')
parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs', dest='max_epochs', default=30, type=int)
parser.add_argument('--original_batch_size', default=None, type=int)
parser.add_argument('--hf_checkpoint', default=False, type=bool, help='if want to save a hf model checkpoint from a lightning ckpt')
parser.add_argument('--test_checkpoint', default=None, type=str)
parser.add_argument('--train_batch_size', default=8, type=int)
parser.add_argument('--eval_batch_size', default=6, type=int)
parser.add_argument('--adafactor', action='store_true')
|
def add_generic_args(parser, root_dir) -> None:
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=int)
parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=float, help='Max gradient norm')
parser.add_argument('--do_predict', default=True, type=bool, help='Whether to run training.')
parser.add_argument('--gradient_accumulation_steps', dest='accumulate_grad_batches', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--seed', type=int, default=101, help='random seed for initialization')
parser.add_argument('--control_token_DART', type=bool, default=False, help='if using control tokens for DART source')
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir.')
|
def generic_train(model, args: argparse.Namespace, early_stopping_callback=False, logger=True, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs):
pl.seed_everything(args.seed)
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=args.output_dir, monitor=('val_' + args.val_metric), mode='max', save_top_k=args.save_top_k, save_last=True)
if (early_stopping_callback is not False):
extra_callbacks.append(early_stopping_callback)
rank_zero_info('the max number of epochs is {}'.format(args.max_epochs))
rank_zero_info('early stopping', early_stopping_callback)
rank_zero_info('checkpoint_callback', checkpoint_callback)
rank_zero_info('logging', logging_callback)
trainer = pl.Trainer.from_argparse_args(args, max_epochs=args.max_epochs, weights_summary=None, callbacks=([logging_callback] + extra_callbacks), logger=logger, checkpoint_callback=checkpoint_callback)
print('args.do_Train:', (not args.skip_train))
if (not args.skip_train):
trainer.fit(model)
return trainer
|
class PartiallyFixedEmbedding(torch.nn.Module):
def __init__(self, fixed_weights, num_to_learn, padding_idx=1):
super().__init__()
self.num_fixed = fixed_weights.size(0)
self.num_to_learn = num_to_learn
weight = torch.empty((self.num_fixed + num_to_learn), fixed_weights.size(1))
weight[:self.num_fixed] = fixed_weights
self.trainable_weight = torch.nn.Parameter(torch.empty(num_to_learn, fixed_weights.size(1)))
torch.nn.init.kaiming_uniform_(self.trainable_weight)
weight[self.num_fixed:] = self.trainable_weight
self.register_buffer('weight', weight)
self.padding_idx = padding_idx
def forward(self, inp):
self.weight.detach_()
self.weight[self.num_fixed:] = self.trainable_weight
return torch.nn.functional.embedding(inp, self.weight, self.padding_idx, None, 2.0, False, False)
|
def make_new_embeddings_learnable(model, tokenizer_len, num_to_learn):
print('fixed_embeds', (tokenizer_len - num_to_learn))
fixed_weights = model.shared.weight[:32100]
new_embed_layer = PartiallyFixedEmbedding(fixed_weights, num_to_learn)
model.decoder.embed_tokens = new_embed_layer
model.encoder.embed_tokens = new_embed_layer
model.shared = new_embed_layer
|
def run_experiment(yaml_file):
with open(yaml_file, 'r') as stream:
parsed_yaml = yaml.safe_load(stream)
args = ''
for (arg, value) in parsed_yaml.items():
args += f'--{arg} {value} '
os.system(f'python finetune.py {args}--adafactor')
|
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=(- 100)):
'From fairseq'
if (target.dim() == (lprobs.dim() - 1)):
target = target.unsqueeze((- 1))
nll_loss = (- lprobs.gather(dim=(- 1), index=target))
smooth_loss = (- lprobs.sum(dim=(- 1), keepdim=True))
if (ignore_index is not None):
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze((- 1))
smooth_loss = smooth_loss.squeeze((- 1))
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = (epsilon / lprobs.size((- 1)))
loss = (((1.0 - epsilon) * nll_loss) + (eps_i * smooth_loss))
return (loss, nll_loss)
|
def lmap(f: Callable, x: Iterable) -> List:
'list(map(f, x))'
return list(map(f, x))
|
def calculate_bleu(output_lns, refs_lns) -> dict:
"Uses sacrebleu's corpus_bleu implementation."
return {'sacrebleu': round(corpus_bleu(output_lns, [refs_lns]).score, 4)}
|
class AbstractSeq2SeqDataset(Dataset):
def __init__(self, tokenizer, data_dir, max_source_length, max_target_length, type_path='train', n_obs=None, prefix='', **dataset_kwargs):
super().__init__()
self.src_file = Path(data_dir).joinpath((type_path + '.source'))
self.tgt_file = Path(data_dir).joinpath((type_path + '.target'))
self.len_file = Path(data_dir).joinpath((type_path + '.len'))
self.cat_file = list(np.load(Path(data_dir).joinpath((type_path + '.source_cat.npy'))))
self.source_file = list(np.load(Path(data_dir).joinpath((type_path + '.source.npy'))))
if os.path.exists(self.len_file):
self.src_lens = pickle_load(self.len_file)
self.used_char_len = False
else:
self.src_lens = self.get_char_lens(self.src_file)
self.used_char_len = True
self.max_source_length = max_source_length
self.max_target_length = max_target_length
assert (min(self.src_lens) > 0), f'found empty line in {self.src_file}'
self.tokenizer = tokenizer
self.prefix = (prefix if (prefix is not None) else '')
if (n_obs is not None):
self.src_lens = self.src_lens[:n_obs]
self.pad_token_id = self.tokenizer.pad_token_id
self.dataset_kwargs = dataset_kwargs
dataset_kwargs.update(({'add_prefix_space': True} if isinstance(self.tokenizer, BartTokenizer) else {}))
def __len__(self):
return len(self.src_lens)
@staticmethod
def get_char_lens(data_file):
return [len(x) for x in Path(data_file).open().readlines()]
@cached_property
def tgt_lens(self):
'Length in characters of target documents'
return self.get_char_lens(self.tgt_file)
def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs):
if distributed:
return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs)
else:
return SortishSampler(self.src_lens, batch_size, shuffle=shuffle)
def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs):
assert FAIRSEQ_AVAILABLE, 'Dynamic batch size requires `pip install fairseq`'
assert (not self.used_char_len), 'You must call python make_len_file.py before calling make_dynamic_sampler'
sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False))
def num_tokens_in_example(i):
return min(self.src_lens[i], self.max_target_length)
batch_sampler: List[List[int]] = batch_by_size(sorted_indices, num_tokens_fn=num_tokens_in_example, max_tokens=max_tokens_per_batch, required_batch_size_multiple=64)
shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))]
approximate_toks_per_batch = [(max((self.src_lens[i] for i in batch)) * len(batch)) for batch in shuffled_batches]
largest_batch_idx = np.argmax(approximate_toks_per_batch)
(shuffled_batches[0], shuffled_batches[largest_batch_idx]) = (shuffled_batches[largest_batch_idx], shuffled_batches[0])
return shuffled_batches
def __getitem__(self, item):
raise NotImplementedError('You must implement this')
def collate_fn(self, batch):
raise NotImplementedError('You must implement this')
|
class Seq2SeqDataset(AbstractSeq2SeqDataset):
'A dataset that calls prepare_seq2seq_batch.'
def __getitem__(self, index) -> Dict[(str, str)]:
index = (index + 1)
source_line = (self.prefix + linecache.getline(str(self.src_file), index).rstrip('\n'))
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip('\n')
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
return {'tgt_texts': tgt_line, 'src_texts': source_line, 'id': (index - 1), 'category': self.cat_file[(index - 1)], 'source': self.source_file[(index - 1)]}
def collate_fn(self, batch):
'Call prepare_seq2seq_batch.'
batch_encoding: Dict[(str, torch.Tensor)] = self.tokenizer.prepare_seq2seq_batch([x['src_texts'] for x in batch], tgt_texts=[x['tgt_texts'] for x in batch], max_length=self.max_source_length, max_target_length=self.max_target_length, return_tensors='pt', **self.dataset_kwargs).data
batch_encoding['ids'] = torch.tensor([x['id'] for x in batch])
batch_encoding['cats'] = torch.tensor([x['category'] for x in batch])
batch_encoding['sources'] = torch.tensor([x['source'] for x in batch])
return batch_encoding
|
class SortishSampler(Sampler):
'Go through the text data by order of src length with a bit of randomness. From fastai repo.'
def __init__(self, data, batch_size, shuffle=True):
(self.data, self.bs, self.shuffle) = (data, batch_size, shuffle)
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
return iter(sortish_sampler_indices(self.data, self.bs, shuffle=self.shuffle))
|
def sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array:
'Go through the text data by order of src length with a bit of randomness. From fastai repo.'
if (not shuffle):
return np.argsort((np.array(data) * (- 1)))
def key_fn(i):
return data[i]
idxs = np.random.permutation(len(data))
sz = (bs * 50)
ck_idx = [idxs[i:(i + sz)] for i in range(0, len(idxs), sz)]
sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx])
sz = bs
ck_idx = [sort_idx[i:(i + sz)] for i in range(0, len(sort_idx), sz)]
max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx])
(ck_idx[0], ck_idx[max_ck]) = (ck_idx[max_ck], ck_idx[0])
sort_idx = (np.concatenate(np.random.permutation(ck_idx[1:])) if (len(ck_idx) > 1) else np.array([], dtype=np.int))
sort_idx = np.concatenate((ck_idx[0], sort_idx))
return sort_idx
|
class DistributedSortishSampler(Sampler):
'Copied from torch DistributedSampler'
def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
if add_extra_examples:
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
else:
self.total_size = len(dataset)
self.num_samples = len(self.available_indices)
self.batch_size = batch_size
self.add_extra_examples = add_extra_examples
self.shuffle = shuffle
def __iter__(self) -> Iterable:
g = torch.Generator()
g.manual_seed(self.epoch)
sortish_data = [self.dataset.src_lens[i] for i in self.available_indices]
sortish_indices = sortish_sampler_indices(sortish_data, self.batch_size, shuffle=self.shuffle)
indices = [self.available_indices[i] for i in sortish_indices]
assert (len(indices) == self.num_samples)
return iter(indices)
@cached_property
def available_indices(self) -> np.array:
indices = list(range(len(self.dataset)))
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
available_indices = indices[self.rank:self.total_size:self.num_replicas]
return available_indices
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
def use_task_specific_params(model, task):
'Update config with GEC specific params.'
task_specific_params = model.config.task_specific_params
if (task_specific_params is not None):
pars = task_specific_params.get(task, {})
logger.info(f'using task specific params for {task}: {pars}')
model.config.update(pars)
|
def pickle_load(path):
'pickle.load(path)'
with open(path, 'rb') as f:
return pickle.load(f)
|
def pickle_save(obj, path):
'pickle.dump(obj, path)'
with open(path, 'wb') as f:
return pickle.dump(obj, f)
|
def flatten_list(summary_ids: List[List]):
return [x for x in itertools.chain.from_iterable(summary_ids)]
|
def freeze_params(model: nn.Module):
'Set requires_grad=False for each of model.parameters()'
for par in model.parameters():
par.requires_grad = False
|
def freeze_embeds(model):
'Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.'
model_type = model.config.model_type
if (model_type == 't5'):
freeze_params(model.shared)
for d in [model.encoder, model.decoder]:
freeze_params(d.embed_tokens)
elif (model_type == 'fsmt'):
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
else:
freeze_params(model.model.shared)
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
|
def assert_all_frozen(model):
model_grads: List[bool] = list(grad_status(model))
n_require_grad = sum(lmap(int, model_grads))
npars = len(model_grads)
assert (not any(model_grads)), f'{(n_require_grad / npars):.1%} of {npars} weights require grad'
|
def grad_status(model: nn.Module) -> Iterable:
return (par.requires_grad for par in model.parameters())
|
def convert_text(text):
text = text.lower()
text = ' '.join(re.split('(\\W)', text))
text = ' '.join(text.split())
return text
|
def eval_meteor_test_webnlg(folder_data, pred_file, dataset):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = ((((((((('java -jar ' + folder_data_before) + '/meteor-1.5.jar ') + pred_file) + ' ') + folder_data) + '/') + dataset) + '.target_eval_meteor -l en -norm -r 3 > ') + pred_file.replace('txt', 'meteor'))
print(cmd_string)
os.system(cmd_string)
meteor_info = open(pred_file.replace('txt', 'meteor'), 'r').readlines()[(- 1)].strip()
return meteor_info
|
def eval_chrf_test_webnlg(folder_data, pred_file, dataset):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = ((((((((('python ' + folder_data_before) + '/chrf++.py -H ') + pred_file) + ' -R ') + folder_data) + '/') + dataset) + '.target_eval_crf > ') + pred_file.replace('txt', 'chrf'))
os.system(cmd_string)
chrf_info_1 = open(pred_file.replace('txt', 'chrf'), 'r').readlines()[1].strip()
chrf_info_2 = open(pred_file.replace('txt', 'chrf'), 'r').readlines()[2].strip()
return ((chrf_info_1 + ' ') + chrf_info_2)
|
def eval_bleu(folder_data, pred_file, dataset):
dir_path = os.path.dirname(os.path.realpath(__file__))
cmd_string = ((((((((((((((((('perl ' + dir_path) + '/multi-bleu.perl -lc ') + folder_data) + '/') + dataset) + '.target_eval ') + folder_data) + '/') + dataset) + '.target2_eval ') + folder_data) + '/') + dataset) + '.target3_eval < ') + pred_file) + ' > ') + pred_file.replace('txt', 'bleu'))
print(cmd_string)
os.system(cmd_string)
try:
bleu_info = open(pred_file.replace('txt', 'bleu'), 'r').readlines()[0].strip()
except:
bleu_info = (- 1)
return bleu_info
|
def eval_bleu_sents_tok(pred_file, folder_data, dataset):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = (((((('perl ' + folder_data_before) + '/tokenizer.perl -threads 4 -no-escape < ') + pred_file) + ' > ') + pred_file) + '_tok')
os.system(cmd_string)
cmd_string = ((((((((((('perl ' + folder_data_before) + '/multi-bleu.perl -lc ') + folder_data) + '/') + dataset) + '.target.tok') + ' < ') + pred_file) + '_tok') + ' > ') + pred_file.replace('txt', 'bleu_data'))
os.system(cmd_string)
try:
bleu_info_data = open(pred_file.replace('txt', 'bleu_data'), 'r').readlines()[0].strip()
except:
bleu_info_data = 'no data'
return bleu_info_data
|
def eval_meteor(ref_file, pred_file):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = ((((((('java -jar ' + folder_data_before) + '/meteor-1.5.jar ') + pred_file) + ' ') + ref_file) + ' > ') + pred_file.replace('txt', 'meteor'))
os.system(cmd_string)
meteor_info = open(pred_file.replace('txt', 'meteor'), 'r').readlines()[(- 1)].strip()
return meteor_info
|
def eval_chrf(ref_file, pred_file):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = ((((((('python ' + folder_data_before) + '/chrf++.py -H ') + pred_file) + ' -R ') + ref_file) + ' > ') + pred_file.replace('txt', 'chrf'))
os.system(cmd_string)
try:
chrf_info_1 = open(pred_file.replace('txt', 'chrf'), 'r').readlines()[1].strip()
chrf_info_2 = open(pred_file.replace('txt', 'chrf'), 'r').readlines()[2].strip()
chrf_data = ((chrf_info_1 + ' ') + chrf_info_2)
except:
chrf_data = 'no data'
return chrf_data
|
def save_json(content, path, indent=4, **json_dump_kwargs):
with open(path, 'w') as f:
json.dump(content, f, indent=indent, **json_dump_kwargs)
|
def freeze_prefix(model):
params = [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in ['CEFR_matrices'])))]
for par in params:
par.requires_grad = False
|
class AbstractSeq2SeqDatasetSingle(Dataset):
def __init__(self, tokenizer, data_dir, max_source_length, max_target_length, type_path='train', n_obs=None, prefix='', **dataset_kwargs):
super().__init__()
self.src_file = Path(data_dir).joinpath((type_path + '.source'))
self.tgt_file = Path(data_dir).joinpath((type_path + '.target'))
self.len_file = Path(data_dir).joinpath((type_path + '.len'))
self.source_file = list(np.load(Path(data_dir).joinpath((type_path + '.source.npy'))))
if os.path.exists(self.len_file):
self.src_lens = pickle_load(self.len_file)
self.used_char_len = False
else:
self.src_lens = self.get_char_lens(self.src_file)
self.used_char_len = True
self.max_source_length = max_source_length
self.max_target_length = max_target_length
assert (min(self.src_lens) > 0), f'found empty line in {self.src_file}'
self.tokenizer = tokenizer
self.prefix = (prefix if (prefix is not None) else '')
if (n_obs is not None):
self.src_lens = self.src_lens[:n_obs]
self.pad_token_id = self.tokenizer.pad_token_id
self.dataset_kwargs = dataset_kwargs
dataset_kwargs.update(({'add_prefix_space': True} if isinstance(self.tokenizer, BartTokenizer) else {}))
def __len__(self):
return len(self.src_lens)
@staticmethod
def get_char_lens(data_file):
return [len(x) for x in Path(data_file).open().readlines()]
@cached_property
def tgt_lens(self):
'Length in characters of target documents'
return self.get_char_lens(self.tgt_file)
def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs):
if distributed:
return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs)
else:
return SortishSampler(self.src_lens, batch_size, shuffle=shuffle)
def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs):
assert FAIRSEQ_AVAILABLE, 'Dynamic batch size requires `pip install fairseq`'
assert (not self.used_char_len), 'You must call python make_len_file.py before calling make_dynamic_sampler'
sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False))
def num_tokens_in_example(i):
return min(self.src_lens[i], self.max_target_length)
batch_sampler: List[List[int]] = batch_by_size(sorted_indices, num_tokens_fn=num_tokens_in_example, max_tokens=max_tokens_per_batch, required_batch_size_multiple=64)
shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))]
approximate_toks_per_batch = [(max((self.src_lens[i] for i in batch)) * len(batch)) for batch in shuffled_batches]
largest_batch_idx = np.argmax(approximate_toks_per_batch)
(shuffled_batches[0], shuffled_batches[largest_batch_idx]) = (shuffled_batches[largest_batch_idx], shuffled_batches[0])
return shuffled_batches
def __getitem__(self, item):
raise NotImplementedError('You must implement this')
def collate_fn(self, batch):
raise NotImplementedError('You must implement this')
|
class Seq2SeqDatasetSingle(AbstractSeq2SeqDatasetSingle):
'A dataset that calls prepare_seq2seq_batch.'
def __getitem__(self, index) -> Dict[(str, str)]:
index = (index + 1)
source_line = (self.prefix + linecache.getline(str(self.src_file), index).rstrip('\n'))
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip('\n')
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
return {'tgt_texts': tgt_line, 'src_texts': source_line, 'id': (index - 1), 'source': self.source_file[(index - 1)]}
def collate_fn(self, batch):
'Call prepare_seq2seq_batch.'
batch_encoding: Dict[(str, torch.Tensor)] = self.tokenizer.prepare_seq2seq_batch([x['src_texts'] for x in batch], tgt_texts=[x['tgt_texts'] for x in batch], max_length=self.max_source_length, max_target_length=self.max_target_length, return_tensors='pt', **self.dataset_kwargs).data
batch_encoding['ids'] = torch.tensor([x['id'] for x in batch])
batch_encoding['sources'] = torch.tensor([x['source'] for x in batch])
return batch_encoding
|
def count_trainable_parameters(model):
model_parameters = filter((lambda p: p.requires_grad), model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
|
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f'lr_group_{i}': param['lr'] for (i, param) in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****')
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for (k, v) in metrics.items() if (k not in ['log', 'progress_bar', 'preds'])})
od = Path(pl_module.hparams.output_dir)
if (type_path == 'test'):
results_file = (od / 'test_results.txt')
generations_file = (od / 'test_generations.txt')
else:
results_file = (od / f'{type_path}_results/{trainer.global_step:05d}.txt')
generations_file = (od / f'{type_path}_generations/{trainer.global_step:05d}.txt')
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, 'a+') as writer:
for key in sorted(metrics):
if (key in ['log', 'progress_bar', 'preds']):
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f'''{key}: {val:.6f}
'''
writer.write(msg)
if (not save_generations):
return
if ('preds' in metrics):
content = '\n'.join(metrics['preds'])
generations_file.open('w+').write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
trainer.logger.log_metrics({'n_params': npars, 'mp': (npars / 1000000.0), 'grad_mp': (n_trainable_pars / 1000000.0)})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, 'test')
@rank_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path)
|
def bespoke_scheduler(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after\n a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
def lr_lambda2(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float(((num_training_steps * 3) - current_step)) / float(max(1, ((num_training_steps * 3) - num_warmup_steps)))))
return LambdaLR(optimizer, [lr_lambda, lr_lambda2], last_epoch)
|
class PrefixSummarizationModule(PrefixTransformer):
mode = 'summarization'
loss_names = ['loss']
metric_names = ROUGE_KEYS
default_val_metric = 'rouge2'
def __init__(self, hparams, **kwargs):
if (hparams.sortish_sampler and (hparams.gpus > 1)):
hparams.replace_sampler_ddp = False
elif (hparams.max_tokens_per_batch is not None):
if (hparams.gpus > 1):
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, 'summarization')
self.metrics_save_path = (Path(self.output_dir) / 'metrics.json')
self.hparams_save_path = (Path(self.output_dir) / 'hparams.pkl')
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = (self.config.tgt_vocab_size if (self.model_type == 'fsmt') else self.config.vocab_size)
self.val_dir = self.hparams.output_dir
self.dataset_kwargs: dict = dict(data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=(self.model.config.prefix or ''))
n_observations_per_split = {'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test}
self.n_obs = {k: (v if (v >= 0) else None) for (k, v) in n_observations_per_split.items()}
self.target_lens = {'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length}
assert (self.target_lens['train'] <= self.target_lens['val']), f'target_lens: {self.target_lens}'
assert (self.target_lens['train'] <= self.target_lens['test']), f'target_lens: {self.target_lens}'
if (not self.hparams.finetune):
freeze_params(self.seq2seq_model)
assert_all_frozen(self.seq2seq_model)
print('FREEZING ENTIRE seq2seq model.')
else:
print('FINE-TUNING')
self.freeze_embeds()
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None
if ((self.model.config.decoder_start_token_id is None) and isinstance(self.tokenizer, MBartTokenizer)):
self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
self.model.config.decoder_start_token_id = self.decoder_start_token_id
self.dataset_class = (Seq2SeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeq2SeqDataset)
self.eval_beams = (self.model.config.num_beams if (self.hparams.eval_beams is None) else self.hparams.eval_beams)
assert (self.eval_beams >= 1), f'got self.eval_beams={self.eval_beams}. Need an integer > 1'
if (self.hparams.eval_max_gen_length is not None):
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = (self.default_val_metric if (self.hparams.val_metric is None) else self.hparams.val_metric)
self.training_acc_across_batches_at_curr_epoch = []
self.eval_max_length = 60
self.eval_min_length = 10
self.eval_beams = 6
print('for decoding, eval_max_length={}, eval_min_length={}, eval_beams={}'.format(self.eval_max_length, self.eval_min_length, self.eval_beams))
def freeze_embeds(self):
'Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.'
freeze_params(self.seq2seq_model.model.shared)
for d in [self.seq2seq_model.model.encoder, self.seq2seq_model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
def forward(self, input_ids, **kwargs):
return self.model(input_ids, frozen_model=self.seq2seq_model, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
(src_ids, src_mask) = (batch['input_ids'], batch['attention_mask'])
tgt_ids = batch['labels']
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(tgt_ids)
else:
decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id)
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False, use_prefix=True, conditional_info={'cats': batch['cats'], 'sport': batch['sport']})
lm_logits = outputs[0]
if (self.hparams.label_smoothing == 0):
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
assert (lm_logits.shape[(- 1)] == self.vocab_size)
loss = ce_loss_fct(lm_logits.view((- 1), lm_logits.shape[(- 1)]), tgt_ids.view((- 1)))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=(- 1))
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id)
return (loss,)
@property
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
logs['tpb'] = (batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum())
logs['bs'] = batch['input_ids'].shape[0]
logs['src_pad_tok'] = batch['input_ids'].eq(self.pad).sum()
logs['src_pad_frac'] = batch['input_ids'].eq(self.pad).float().mean()
self.training_acc_across_batches_at_curr_epoch.append(loss_tensors[0].item())
self.log_dict(logs)
loss = loss_tensors[0]
return {'loss': loss}
def on_epoch_end(self):
train_acc_mean = np.mean(self.training_acc_across_batches_at_curr_epoch)
self.log_dict({'train_loss': train_acc_mean})
print('train_loss = {}'.format(train_acc_mean))
self.training_acc_across_batches_per_epoch = []
def validation_step(self, batch, batch_idx) -> Dict:
if (self.current_epoch < 1):
return 1
if self.hparams.skip_val:
return 1
if self.hparams.hf_checkpoint:
save_path = Path(self.hparams.save_hf)
save_path = save_path.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
raise ValueError('just_saving')
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix='val') -> Dict:
if (self.current_epoch < 1):
logg = 0.1
self.log('val_rouge2', logg)
return 1
if self.hparams.skip_val:
logg = 0.1
self.log('val_rouge2', logg)
return 1
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in outputs]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
print('ROUGE2', metric_tensor)
print('VAL_LOSS', loss)
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
self.log_dict({'log': all_metrics, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor})
preds = flatten_list([x['preds'] for x in outputs])
return {'log': all_metrics, 'preds': preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor}
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_rouge(preds, target)
def _generative_step(self, batch: dict) -> dict:
t0 = time.time()
bsz = batch['input_ids'].size(0)
prefix_prompt = self.model.get_prompt(bsz=bsz, sample_size=self.eval_beams, conditional_info={'cats': batch['cats'], 'sport': batch['sport']})
generated_ids = self.seq2seq_model.generate(batch['input_ids'], past_key_values=prefix_prompt, attention_mask=batch['attention_mask'], use_cache=True, length_penalty=self.hparams.length_penalty, use_prefix=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, min_length=self.eval_min_length, max_length=self.eval_max_length, no_repeat_ngram_size=3)
gen_time = ((time.time() - t0) / batch['input_ids'].shape[0])
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch['labels'])
loss_tensors = self._step(batch)
base_metrics = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix='test')
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False) -> DataLoader:
dataset = self.get_dataset(type_path)
if (self.hparams.sortish_sampler and (type_path != 'test')):
sampler = dataset.make_sortish_sampler(batch_size, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler)
elif ((self.hparams.max_tokens_per_batch is not None) and (type_path != 'test')):
batch_sampler = dataset.make_dynamic_sampler(self.hparams.max_tokens_per_batch, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, num_workers=self.num_workers)
else:
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None)
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size)
@staticmethod
def add_model_specific_args(parser, root_dir):
PrefixTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument('--max_source_length', default=512, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_dir', default='', type=str, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--test_max_target_length', default=100, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=False)
parser.add_argument('--max_tokens_per_batch', type=int, default=None)
parser.add_argument('--logger_name', type=str, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--task_mode', type=str, default='summarization', required=False, help='# examples. -1 means use all.')
parser.add_argument('--label_smoothing', type=float, default=0.0, required=False)
parser.add_argument('--src_lang', type=str, default='', required=False)
parser.add_argument('--save_hf', type=str, default='', required=False)
parser.add_argument('--tgt_lang', type=str, default='', required=False)
parser.add_argument('--eval_beams', type=int, default=6, required=False)
parser.add_argument('--eval_min_length', type=int, default=10, required=False)
parser.add_argument('--skip_val', type=bool, default=False, required=False)
parser.add_argument('--val_metric', type=str, default=None, required=False)
parser.add_argument('--eval_max_gen_length', type=int, default=60, help='never generate more than n tokens')
parser.add_argument('--length_penalty', type=float, default=1.0, help='never generate more than n tokens')
parser.add_argument('--save_top_k', type=int, default=1, required=False, help='How many checkpoints to save')
parser.add_argument('--wb_project', type=str, default='')
parser.add_argument('--finetune', type=bool, default=False)
parser.add_argument('--git', type=bool, default=True)
parser.add_argument('--dev', type=bool, default=False)
parser.add_argument('--freeze_base', type=bool, default=False)
parser.add_argument('--wb_name', type=str, default='')
parser.add_argument('--id', type=str, default='')
parser.add_argument('--early_stopping_patience', type=int, default=(- 1), required=False, help='-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.')
return parser
|
def eval(args, model=None):
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
print('the length penalty is {}'.format(args.length_penalty))
with torch.no_grad():
model.eval()
model = model.cuda()
data_loader = model.test_dataloader()
print('DATALOADER_LEN', len(data_loader))
out_lst = []
for (batch_idx, batch) in enumerate(data_loader):
batch = model.transfer_batch_to_device(batch, model.device)
out = model.test_step(batch, batch_idx)
out_lst.append(out)
if ((batch_idx % 50) == 0):
print(model.test_epoch_end(out_lst))
print(out['preds'])
result = model.test_epoch_end(out_lst)
for (k, v) in result.items():
if (k != 'preds'):
print('FINAL_RESULTS')
print(k, v)
out_path = os.path.join(args.output_dir, 'test_beam_{}'.format(args.length_penalty))
print('writing the test results to ', out_path)
with open(out_path, 'w') as f:
for preds in result['preds']:
print(preds, file=f)
|
def main(args, model=None):
Path(args.output_dir).mkdir(exist_ok=True)
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
pickle_save(args, os.path.join(args.output_dir, 'args.pkl'))
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
dataset = Path(args.data_dir).name
print(dataset)
if ((args.logger_name == 'default') or args.fast_dev_run or str(args.output_dir).startswith('/tmp') or str(args.output_dir).startswith('/var')):
logger = True
elif (args.logger_name == 'wandb'):
from pytorch_lightning.loggers import WandbLogger
if (args.id is not None):
id_ = args.id
else:
id_ = wandb.util.generate_id()
print('ID', id_)
logger = WandbLogger(id=id_, name=args.wb_name, project=args.wb_project, entity='jordiclive')
if (args.early_stopping_patience >= 0):
es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
else:
es_callback = False
trainer: pl.Trainer = generic_train(model, args, logging_callback=Seq2SeqLoggingCallback(), early_stopping_callback=es_callback, logger=logger)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
return model
|
class PrefixTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, mode='base', config=None, tokenizer=None, seq2seq_model=None, **config_kwargs):
'Initialize a model, tokenizer and config.'
super().__init__()
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = (self.hparams.cache_dir if self.hparams.cache_dir else None)
print('the cache dir is {}'.format(cache_dir))
if (config is None):
self.config = AutoConfig.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), **({'num_labels': num_labels} if (num_labels is not None) else {}), cache_dir=cache_dir, **config_kwargs)
else:
self.config: PretrainedConfig = config
extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if (tokenizer is None):
self.tokenizer = AutoTokenizer.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), cache_dir=cache_dir)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.config.preseqlen = 204
self.config.use_prefix = True
self.seq2seq_model_type = AutoModel
if (seq2seq_model is None):
self.seq2seq_model = BartForConditionalGeneration.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=self.config, cache_dir=cache_dir)
else:
self.seq2seq_model = seq2seq_model
config_prefix = AutoConfig.from_pretrained(self.hparams.model_name_or_path, cache_dir=cache_dir)
self.model_type = config_prefix.model_type
if (self.hparams.optim_prefix == 'yes'):
optim_prefix_bool = True
elif (self.hparams.optim_prefix == 'no'):
optim_prefix_bool = False
else:
assert False, 'model_args.optim_prefix should be either yes or no'
print(self.model_type)
config_prefix._my_arg_tune_mode = self.hparams.tuning_mode
config_prefix._my_arg_task_mode = self.hparams.task_mode
config_prefix._my_arg_control = True
config_prefix.train_weights = False
config_prefix.optim_prefix = optim_prefix_bool
config_prefix.preseqlen = self.hparams.preseqlen
config_prefix.use_infix = (self.hparams.format_mode == 'infix')
config_prefix.format_mode = self.hparams.format_mode
config_prefix.prefix_dropout = self.hparams.prefix_dropout
config_prefix.vocab_size = len(self.tokenizer)
config_prefix.lowdata = ('lowdata' in self.hparams.output_dir)
if (config_prefix.lowdata and (self.hparams.use_lowdata_token == 'yes')):
config_prefix.lowdata_token = self.tokenizer([self.hparams.lowdata_token], add_prefix_space=True)['input_ids']
print(self.hparams.lowdata_token)
print(config_prefix.lowdata_token)
print(self.tokenizer.pad_token_id)
config_prefix.mid_dim = self.hparams.mid_dim
if (self.hparams.prefixModel_name_or_path is not None):
print('LOADING FROM {}'.format(hparams.prefixModel_name_or_path))
self.model = PrefixTuning.from_pretrained(self.hparams.prefixModel_name_or_path, from_tf=bool(('.ckpt' in self.hparams.prefixModel_name_or_path)), cache_dir=cache_dir, config=config_prefix)
else:
self.model = PrefixTuning(config_prefix)
def load_hf_checkpoint(self, *args, **kwargs):
assert False, 'why need to load model here?'
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps)
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def configure_optimizers(self):
'Prepare optimizer and schedule (linear warmup and decay)'
model = self.model
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.hparams.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if self.hparams.adafactor:
optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return ([optimizer], [scheduler])
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
'The number of total training steps that will be run. Used for lr scheduler purposes.'
num_devices = max(1, self.hparams.gpus)
if (self.hparams.original_batch_size is not None):
effective_batch_size = ((self.hparams.original_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
else:
effective_batch_size = ((self.hparams.train_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
dataset_size = len(self.train_loader.dataset)
return ((dataset_size / effective_batch_size) * self.hparams.max_epochs)
def setup(self, mode):
if (mode == 'fit'):
self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError('You must implement this for your task')
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length)))
@pl.utilities.rank_zero_only
def save_checkpoint(self, trainer) -> None:
print('Saving the the checkpoint.')
return
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[(str, Any)], filepath=None) -> None:
save_path = self.output_dir.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument('--model_name_or_path', default='facebook/bart-large', type=str, required=False, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--prefixModel_name_or_path', default=None, type=str, help='Path to pretrained prefix model or model identifier from huggingface.co/models')
parser.add_argument('--prefix_mode', default='activation', type=str, help='embedding or activation')
parser.add_argument('--preseqlen', default=200, type=int, help='the length of the prefix.')
parser.add_argument('--optim_prefix', default='yes', type=str, help='use the task specific optimization of the prefix.')
parser.add_argument('--different_scheduler', default=False, type=bool, help='.')
parser.add_argument('--tuning_mode', default='prefixtune', type=str, help='Could be prefixtune or finetune')
parser.add_argument('--prefix_dropout', default=0.0, type=float, help='the dropout rate for our prefix model.')
parser.add_argument('--use_dropout', default='no', type=str, help='whether to dropout the main model during training. ')
parser.add_argument('--mid_dim', default=800, type=int, help='the dimension of the intermediate layer.')
parser.add_argument('--cefr_mid_dim', default=100, type=int, help='the dimension of the intermediate layer.')
parser.add_argument('--cefr_length', default=2, type=int, help='the dimension of the intermediate layer.')
parser.add_argument('--CEFR_single_reparam', type=bool, default=True)
parser.add_argument('--same_CEFR_intialization', type=bool, default=False)
parser.add_argument('--format_mode', default='cat', type=str, help='whether to look at the input again, including [infix, cat, peek, nopeek]')
parser.add_argument('--use_lowdata_token', default='yes', type=str, help='whether or not to use the lowdata token, ')
parser.add_argument('--lowdata_token', default='summarize', type=str, help='the low data token to use. ')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='/content/gdrive/MyDrive/cache_dir', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config')
parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs', dest='max_epochs', default=30, type=int)
parser.add_argument('--original_batch_size', default=None, type=int)
parser.add_argument('--hf_checkpoint', default=False, type=bool)
parser.add_argument('--train_batch_size', default=8, type=int)
parser.add_argument('--eval_batch_size', default=6, type=int)
parser.add_argument('--adafactor', action='store_true')
|
def add_generic_args(parser, root_dir) -> None:
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=int)
parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=float, help='Max gradient norm')
parser.add_argument('--do_train', default=True, action='store_true', help='Whether to run training.')
parser.add_argument('--do_predict', default=False, type=bool, help='Whether to run training.')
parser.add_argument('--gradient_accumulation_steps', dest='accumulate_grad_batches', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--seed', type=int, default=101, help='random seed for initialization')
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.')
|
def generic_train(model, args: argparse.Namespace, early_stopping_callback=False, logger=True, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs):
pl.seed_everything(args.seed)
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=args.output_dir, monitor=('val_' + args.val_metric), mode='max', save_top_k=args.save_top_k, save_last=True)
if (early_stopping_callback is not False):
extra_callbacks.append(early_stopping_callback)
print('the max number of epochs is {}'.format(args.max_epochs))
print('early stopping', early_stopping_callback)
print('checkpoint_callback', checkpoint_callback)
print('logging', logging_callback)
trainer = pl.Trainer.from_argparse_args(args, max_epochs=args.max_epochs, weights_summary=None, callbacks=([logging_callback] + extra_callbacks), logger=logger, checkpoint_callback=checkpoint_callback)
print('args.do_train:', args.do_train)
if args.do_train:
trainer.fit(model)
return trainer
|
def run_experiment(yaml_file):
with open(yaml_file, 'r') as stream:
parsed_yaml = yaml.safe_load(stream)
args = ''
for (arg, value) in parsed_yaml.items():
args += f'--{arg} {value} '
os.system(f'python finetune.py {args}')
|
def count_trainable_parameters(model):
model_parameters = filter((lambda p: p.requires_grad), model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
|
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f'lr_group_{i}': param['lr'] for (i, param) in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****')
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for (k, v) in metrics.items() if (k not in ['log', 'progress_bar', 'preds'])})
od = Path(pl_module.hparams.output_dir)
if (type_path == 'test'):
results_file = (od / 'test_results.txt')
generations_file = (od / 'test_generations.txt')
else:
results_file = (od / f'{type_path}_results/{trainer.global_step:05d}.txt')
generations_file = (od / f'{type_path}_generations/{trainer.global_step:05d}.txt')
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, 'a+') as writer:
for key in sorted(metrics):
if (key in ['log', 'progress_bar', 'preds']):
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f'''{key}: {val:.6f}
'''
writer.write(msg)
if (not save_generations):
return
if ('preds' in metrics):
content = '\n'.join(metrics['preds'])
generations_file.open('w+').write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
trainer.logger.log_metrics({'n_params': npars, 'mp': (npars / 1000000.0), 'grad_mp': (n_trainable_pars / 1000000.0)})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, 'test')
@rank_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path)
|
def bespoke_scheduler(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after\n a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
def lr_lambda2(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float(((num_training_steps * 3) - current_step)) / float(max(1, ((num_training_steps * 3) - num_warmup_steps)))))
return LambdaLR(optimizer, [lr_lambda, lr_lambda2], last_epoch)
|
class PrefixSummarizationModule(PrefixTransformer):
mode = 'summarization'
loss_names = ['loss']
metric_names = ROUGE_KEYS
default_val_metric = 'rouge2'
def __init__(self, hparams, **kwargs):
if (hparams.sortish_sampler and (hparams.gpus > 1)):
hparams.replace_sampler_ddp = False
elif (hparams.max_tokens_per_batch is not None):
if (hparams.gpus > 1):
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, 'summarization')
self.metrics_save_path = (Path(self.output_dir) / 'metrics.json')
self.hparams_save_path = (Path(self.output_dir) / 'hparams.pkl')
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = (self.config.tgt_vocab_size if (self.model_type == 'fsmt') else self.config.vocab_size)
self.val_dir = self.hparams.output_dir
self.dataset_kwargs: dict = dict(data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=(self.model.config.prefix or ''))
n_observations_per_split = {'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test}
self.n_obs = {k: (v if (v >= 0) else None) for (k, v) in n_observations_per_split.items()}
self.target_lens = {'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length}
assert (self.target_lens['train'] <= self.target_lens['val']), f'target_lens: {self.target_lens}'
assert (self.target_lens['train'] <= self.target_lens['test']), f'target_lens: {self.target_lens}'
if (not self.hparams.finetune):
freeze_params(self.seq2seq_model)
assert_all_frozen(self.seq2seq_model)
print('FREEZING ENTIRE seq2seq model.')
else:
print('FINE-TUNING')
self.freeze_embeds()
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None
if ((self.model.config.decoder_start_token_id is None) and isinstance(self.tokenizer, MBartTokenizer)):
self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
self.model.config.decoder_start_token_id = self.decoder_start_token_id
self.dataset_class = (Seq2SeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeq2SeqDataset)
self.eval_beams = (self.model.config.num_beams if (self.hparams.eval_beams is None) else self.hparams.eval_beams)
assert (self.eval_beams >= 1), f'got self.eval_beams={self.eval_beams}. Need an integer > 1'
if (self.hparams.eval_max_gen_length is not None):
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = (self.default_val_metric if (self.hparams.val_metric is None) else self.hparams.val_metric)
self.training_acc_across_batches_at_curr_epoch = []
self.eval_max_length = 60
self.eval_min_length = 10
self.eval_beams = 6
print('for decoding, eval_max_length={}, eval_min_length={}, eval_beams={}'.format(self.eval_max_length, self.eval_min_length, self.eval_beams))
def freeze_embeds(self):
'Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.'
freeze_params(self.seq2seq_model.model.shared)
for d in [self.seq2seq_model.model.encoder, self.seq2seq_model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
def forward(self, input_ids, **kwargs):
return self.model(input_ids, frozen_model=self.seq2seq_model, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
(src_ids, src_mask) = (batch['input_ids'], batch['attention_mask'])
tgt_ids = batch['labels']
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(tgt_ids)
else:
decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id)
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False, use_prefix=True, conditional_info={'cats': batch['cats'], 'sport': batch['sport']})
lm_logits = outputs[0]
if (self.hparams.label_smoothing == 0):
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
assert (lm_logits.shape[(- 1)] == self.vocab_size)
loss = ce_loss_fct(lm_logits.view((- 1), lm_logits.shape[(- 1)]), tgt_ids.view((- 1)))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=(- 1))
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id)
return (loss,)
@property
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
logs['tpb'] = (batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum())
logs['bs'] = batch['input_ids'].shape[0]
logs['src_pad_tok'] = batch['input_ids'].eq(self.pad).sum()
logs['src_pad_frac'] = batch['input_ids'].eq(self.pad).float().mean()
self.training_acc_across_batches_at_curr_epoch.append(loss_tensors[0].item())
self.log_dict(logs)
loss = loss_tensors[0]
return {'loss': loss}
def on_epoch_end(self):
train_acc_mean = np.mean(self.training_acc_across_batches_at_curr_epoch)
self.log_dict({'train_loss': train_acc_mean})
print('train_loss = {}'.format(train_acc_mean))
self.training_acc_across_batches_per_epoch = []
def validation_step(self, batch, batch_idx) -> Dict:
if (self.current_epoch < 1):
return 1
if self.hparams.skip_val:
return 1
if self.hparams.hf_checkpoint:
save_path = Path(self.hparams.save_hf)
save_path = save_path.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
raise ValueError('just_saving')
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix='val') -> Dict:
if (self.current_epoch < 1):
logg = 0.1
self.log('val_rouge2', logg)
return 1
if self.hparams.skip_val:
logg = 0.1
self.log('val_rouge2', logg)
return 1
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in outputs]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
rank_zero_info(f'Rouge2: {metric_tensor}')
rank_zero_info(f'val_loss: {loss}')
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
self.log_dict({'log': all_metrics, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor})
preds = flatten_list([x['preds'] for x in outputs])
return {'log': all_metrics, 'preds': preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor}
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_rouge(preds, target)
def _generative_step(self, batch: dict) -> dict:
t0 = time.time()
bsz = batch['input_ids'].size(0)
prefix_prompt = self.model.get_prompt(bsz=bsz, sample_size=self.eval_beams, conditional_info={'cats': batch['cats'], 'sport': batch['sport']})
generated_ids = self.seq2seq_model.generate(batch['input_ids'], past_key_values=prefix_prompt, attention_mask=batch['attention_mask'], use_cache=True, length_penalty=self.hparams.length_penalty, use_prefix=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, min_length=self.eval_min_length, max_length=self.eval_max_length, no_repeat_ngram_size=3)
gen_time = ((time.time() - t0) / batch['input_ids'].shape[0])
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch['labels'])
loss_tensors = self._step(batch)
base_metrics = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix='test')
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False) -> DataLoader:
dataset = self.get_dataset(type_path)
if (self.hparams.sortish_sampler and (type_path != 'test')):
sampler = dataset.make_sortish_sampler(batch_size, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler)
elif ((self.hparams.max_tokens_per_batch is not None) and (type_path != 'test')):
batch_sampler = dataset.make_dynamic_sampler(self.hparams.max_tokens_per_batch, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, num_workers=self.num_workers)
else:
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None)
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size)
@staticmethod
def add_model_specific_args(parser, root_dir):
PrefixTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument('--max_source_length', default=512, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_dir', default='', type=str, help='The directory for validation')
parser.add_argument('--val_max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--test_max_target_length', default=100, type=int, help='The maximum total test target length specified for generation')
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=False)
parser.add_argument('--max_tokens_per_batch', type=int, default=None)
parser.add_argument('--logger_name', type=str, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--task_mode', type=str, default='summarization', required=False, help='if different tasks.')
parser.add_argument('--label_smoothing', type=float, default=0.0, required=False)
parser.add_argument('--src_lang', type=str, default='', required=False)
parser.add_argument('--save_hf', type=str, default='', required=False)
parser.add_argument('--tgt_lang', type=str, default='', required=False)
parser.add_argument('--eval_beams', type=int, default=6, required=False)
parser.add_argument('--eval_min_length', type=int, default=10, required=False)
parser.add_argument('--skip_val', type=bool, default=False, required=False)
parser.add_argument('--val_metric', type=str, default=None, required=False)
parser.add_argument('--eval_max_gen_length', type=int, default=60, help='never generate more than n tokens')
parser.add_argument('--length_penalty', type=float, default=1.0, help='never generate more than n tokens')
parser.add_argument('--save_top_k', type=int, default=1, required=False, help='How many checkpoints to save')
parser.add_argument('--wb_project', type=str, default='', help='wandb project name')
parser.add_argument('--finetune', type=bool, default=False)
parser.add_argument('--git', type=bool, default=True)
parser.add_argument('--dev', type=bool, default=False)
parser.add_argument('--freeze_base', type=bool, default=False)
parser.add_argument('--wb_entity', type=str, default='', help='wandb run name')
parser.add_argument('--wb_name', type=str, default='', help='wandb run name')
parser.add_argument('--id', type=str, default='wand id if continuing a run')
parser.add_argument('--early_stopping_patience', type=int, default=(- 1), required=False, help='-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.')
return parser
|
def eval(args, model=None):
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
print('the length penalty is {}'.format(args.length_penalty))
with torch.no_grad():
model.eval()
model = model.cuda()
data_loader = model.test_dataloader()
print('DATALOADER_LEN', len(data_loader))
out_lst = []
for (batch_idx, batch) in enumerate(data_loader):
batch = model.transfer_batch_to_device(batch, model.device)
out = model.test_step(batch, batch_idx)
out_lst.append(out)
if ((batch_idx % 50) == 0):
print(model.test_epoch_end(out_lst))
print(out['preds'])
result = model.test_epoch_end(out_lst)
for (k, v) in result.items():
if (k != 'preds'):
print('FINAL_RESULTS')
print(k, v)
out_path = os.path.join(args.output_dir, 'test_beam_{}'.format(args.length_penalty))
print('writing the test results to ', out_path)
with open(out_path, 'w') as f:
for preds in result['preds']:
print(preds, file=f)
|
def main(args, model=None):
Path(args.output_dir).mkdir(exist_ok=True)
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
pickle_save(args, os.path.join(args.output_dir, 'args.pkl'))
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
dataset = Path(args.data_dir).name
print(dataset)
if ((args.logger_name == 'default') or args.fast_dev_run or str(args.output_dir).startswith('/tmp') or str(args.output_dir).startswith('/var')):
logger = True
elif (args.logger_name == 'wandb'):
from pytorch_lightning.loggers import WandbLogger
if (args.id is not None):
id_ = args.id
else:
id_ = wandb.util.generate_id()
print('ID', id_)
logger = WandbLogger(id=id_, name=args.wb_name, project=args.wb_project, entity=args.wb_entity)
if (args.early_stopping_patience >= 0):
es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
else:
es_callback = False
trainer: pl.Trainer = generic_train(model, args, logging_callback=Seq2SeqLoggingCallback(), early_stopping_callback=es_callback, logger=logger)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
return model
|
class PrefixTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, config=None, tokenizer=None, seq2seq_model=None, **config_kwargs):
'Initialize a model, tokenizer and config.'
super().__init__()
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = (self.hparams.cache_dir if self.hparams.cache_dir else None)
print('the cache dir is {}'.format(cache_dir))
if (config is None):
self.config = AutoConfig.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), **({'num_labels': num_labels} if (num_labels is not None) else {}), cache_dir=cache_dir, **config_kwargs)
else:
self.config: PretrainedConfig = config
extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if (tokenizer is None):
self.tokenizer = AutoTokenizer.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), cache_dir=cache_dir)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.config.use_prefix = True
self.config.preseqlen = self.hparams.preseqlen
if self.hparams.control_prefixes:
self.config.preseqlen += (self.hparams.m_prefix_len * 2)
self.seq2seq_model_type = AutoModel
if (seq2seq_model is None):
self.seq2seq_model = BartForConditionalGeneration.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=self.config, cache_dir=cache_dir)
else:
self.seq2seq_model = seq2seq_model
config_prefix = AutoConfig.from_pretrained(self.hparams.model_name_or_path, cache_dir=cache_dir)
self.model_type = config_prefix.model_type
if (self.hparams.optim_prefix == 'yes'):
optim_prefix_bool = True
elif (self.hparams.optim_prefix == 'no'):
optim_prefix_bool = False
else:
assert False, 'model_args.optim_prefix should be either yes or no'
print(self.model_type)
config_prefix._my_arg_tune_mode = self.hparams.tuning_mode
config_prefix._my_arg_task_mode = self.hparams.task_mode
config_prefix._my_arg_control = True
config_prefix.train_weights = False
config_prefix.optim_prefix = optim_prefix_bool
config_prefix.preseqlen = self.hparams.preseqlen
config_prefix.use_infix = (self.hparams.format_mode == 'infix')
config_prefix.format_mode = self.hparams.format_mode
config_prefix.prefix_dropout = self.hparams.prefix_dropout
config_prefix.vocab_size = len(self.tokenizer)
config_prefix.lowdata = ('lowdata' in self.hparams.output_dir)
if (config_prefix.lowdata and (self.hparams.use_lowdata_token == 'yes')):
config_prefix.lowdata_token = self.tokenizer([self.hparams.lowdata_token], add_prefix_space=True)['input_ids']
print(self.hparams.lowdata_token)
print(config_prefix.lowdata_token)
print(self.tokenizer.pad_token_id)
config_prefix.mid_dim = self.hparams.mid_dim
if (self.hparams.prefixModel_name_or_path is not None):
print('LOADING FROM {}'.format(hparams.prefixModel_name_or_path))
self.model = ControlPrefixes.from_pretrained(self.hparams.prefixModel_name_or_path, from_tf=bool(('.ckpt' in self.hparams.prefixModel_name_or_path)), cache_dir=cache_dir, config=config_prefix)
else:
self.model = ControlPrefixes(config_prefix)
def load_hf_checkpoint(self, *args, **kwargs):
assert False, 'why need to load model here?'
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps)
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def configure_optimizers(self):
'Prepare optimizer and schedule (linear warmup and decay)'
model = self.model
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.hparams.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if self.hparams.adafactor:
optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return ([optimizer], [scheduler])
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
'The number of total training steps that will be run. Used for lr scheduler purposes.'
num_devices = max(1, self.hparams.gpus)
if (self.hparams.original_batch_size is not None):
effective_batch_size = ((self.hparams.original_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
else:
effective_batch_size = ((self.hparams.train_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
dataset_size = len(self.train_loader.dataset)
return ((dataset_size / effective_batch_size) * self.hparams.max_epochs)
def setup(self, mode):
if (mode == 'fit'):
self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError('You must implement this for your task')
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length)))
@pl.utilities.rank_zero_only
def save_checkpoint(self, trainer) -> None:
print('Saving the the checkpoint.')
return
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[(str, Any)], filepath=None) -> None:
save_path = self.output_dir.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument('--model_name_or_path', default='facebook/bart-large', type=str, required=False, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--prefixModel_name_or_path', default=None, type=str, help='Path to pretrained prefix model or model identifier from huggingface.co/models')
parser.add_argument('--prefix_mode', default='activation', type=str, help='embedding or activation')
parser.add_argument('--preseqlen', default=200, type=int, help='the length of the prefix.')
parser.add_argument('--optim_prefix', default='yes', type=str, help='use the task specific optimization of the prefix.')
parser.add_argument('--different_scheduler', default=False, type=bool, help='.')
parser.add_argument('--tuning_mode', default='prefixtune', type=str, help='Could be prefixtune or finetune')
parser.add_argument('--prefix_dropout', default=0.0, type=float, help='the dropout rate for our prefix model.')
parser.add_argument('--use_dropout', default='no', type=str, help='whether to dropout the main model during training. ')
parser.add_argument('--mid_dim', default=800, type=int, help='the dimension of the intermediate layer.')
parser.add_argument('--m_prefix_len', default=1, type=int, help='the control prefix length')
parser.add_argument('--format_mode', default='cat', type=str, help='whether to look at the input again, including [infix, cat, peek, nopeek]')
parser.add_argument('--use_lowdata_token', default='yes', type=str, help='whether or not to use the lowdata token, ')
parser.add_argument('--lowdata_token', default='summarize', type=str, help='the low data token to use. ')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='/content/gdrive/MyDrive/cache_dir', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config')
parser.add_argument('--control_prefixes', type=bool, default=False, help='if using control prefixes')
parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The target learning rate.')
parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs', dest='max_epochs', default=30, type=int)
parser.add_argument('--original_batch_size', default=None, type=int)
parser.add_argument('--hf_checkpoint', default=False, type=bool)
parser.add_argument('--train_batch_size', default=8, type=int)
parser.add_argument('--eval_batch_size', default=6, type=int)
parser.add_argument('--adafactor', action='store_true')
|
def add_generic_args(parser, root_dir) -> None:
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=int)
parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=float, help='Max gradient norm')
parser.add_argument('--do_train', default=True, action='store_true', help='Whether to run training.')
parser.add_argument('--do_predict', default=False, type=bool, help='Whether to run training.')
parser.add_argument('--gradient_accumulation_steps', dest='accumulate_grad_batches', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--seed', type=int, default=101, help='random seed for initialization')
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.')
|
def generic_train(model, args: argparse.Namespace, early_stopping_callback=False, logger=True, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs):
pl.seed_everything(args.seed)
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=args.output_dir, monitor=('val_' + args.val_metric), mode='max', save_top_k=args.save_top_k, save_last=True)
if (early_stopping_callback is not False):
extra_callbacks.append(early_stopping_callback)
print('the max number of epochs is {}'.format(args.max_epochs))
print('early stopping', early_stopping_callback)
print('checkpoint_callback', checkpoint_callback)
print('logging', logging_callback)
trainer = pl.Trainer.from_argparse_args(args, max_epochs=args.max_epochs, weights_summary=None, callbacks=([logging_callback] + extra_callbacks), logger=logger, checkpoint_callback=checkpoint_callback)
print('args.do_train:', args.do_train)
if args.do_train:
trainer.fit(model)
return trainer
|
def run_experiment(yaml_file):
with open(yaml_file, 'r') as stream:
parsed_yaml = yaml.safe_load(stream)
args = ''
for (arg, value) in parsed_yaml.items():
args += f'--{arg} {value} '
os.system(f'python finetune.py {args}')
|
class LiviaSoftmax(LiviaNet3DConvLayer):
' Final Classification layer with Softmax '
def __init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType=0, dropoutRate=0.0, softmaxTemperature=1.0):
LiviaNet3DConvLayer.__init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType, dropoutRate)
self._numberOfOutputClasses = None
self._bClassLayer = None
self._softmaxTemperature = None
self._numberOfOutputClasses = filterShape[0]
self._softmaxTemperature = softmaxTemperature
outputOfConvTrain = self.outputTrain
outputOfConvTest = self.outputTest
outputOfConvShapeTrain = self.outputShapeTrain
outputOfConvShapeTest = self.outputShapeTest
b_values = np.zeros(self._numberOfFeatureMaps, dtype='float32')
self._bClassLayer = theano.shared(value=b_values, borrow=True)
inputToSoftmaxTrain = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTrain)
inputToSoftmaxTest = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTest)
self.params = (self.params + [self._bClassLayer])
(self.p_y_given_x_train, self.y_pred_train) = applySoftMax(inputToSoftmaxTrain, outputOfConvShapeTrain, self._numberOfOutputClasses, softmaxTemperature)
(self.p_y_given_x_test, self.y_pred_test) = applySoftMax(inputToSoftmaxTest, outputOfConvShapeTest, self._numberOfOutputClasses, softmaxTemperature)
def negativeLogLikelihoodWeighted(self, y, weightPerClass):
e1 = np.finfo(np.float32).tiny
addTinyProbMatrix = (T.lt(self.p_y_given_x_train, (4 * e1)) * e1)
weights = weightPerClass.dimshuffle('x', 0, 'x', 'x', 'x')
log_p_y_given_x_train = T.log((self.p_y_given_x_train + addTinyProbMatrix))
weighted_log_probs = (log_p_y_given_x_train * weights)
wShape = weighted_log_probs.shape
idx0 = T.arange(wShape[0]).dimshuffle(0, 'x', 'x', 'x')
idx2 = T.arange(wShape[2]).dimshuffle('x', 0, 'x', 'x')
idx3 = T.arange(wShape[3]).dimshuffle('x', 'x', 0, 'x')
idx4 = T.arange(wShape[4]).dimshuffle('x', 'x', 'x', 0)
return (- T.mean(weighted_log_probs[(idx0, y, idx2, idx3, idx4)]))
def predictionProbabilities(self):
return self.p_y_given_x_test
|
def computeDice(autoSeg, groundTruth):
' Returns\n -------\n DiceArray : floats array\n \n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0 '
n_classes = int((np.max(groundTruth) + 1))
DiceArray = []
for c_i in xrange(1, n_classes):
idx_Auto = np.where((autoSeg.flatten() == c_i))[0]
idx_GT = np.where((groundTruth.flatten() == c_i))[0]
autoArray = np.zeros(autoSeg.size, dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size, dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray)
DiceArray.append(dsc)
return DiceArray
|
def dice(im1, im2):
'\n Computes the Dice coefficient\n ----------\n im1 : boolean array\n im2 : boolean array\n \n If they are not boolean, they will be converted.\n \n -------\n It returns the Dice coefficient as a float on the range [0,1].\n 1: Perfect overlapping \n 0: Not overlapping \n '
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if (im1.size != im2.size):
raise ValueError('Size mismatch between input arrays!!!')
im_sum = (im1.sum() + im2.sum())
if (im_sum == 0):
return 1.0
intersection = np.logical_and(im1, im2)
return ((2.0 * intersection.sum()) / im_sum)
|
def applyActivationFunction_Sigmoid(inputData):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
outputData = T.nnet.sigmoid(inputData)
return outputData
|
def applyActivationFunction_Tanh(inputData):
'inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2])'
outputData = T.tanh(inputData)
return outputData
|
def applyActivationFunction_ReLU_v1(inputData):
' inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2]) '
return T.maximum(inputData, 0)
|
def applyActivationFunction_ReLU_v2(inputData):
return T.switch((inputData < 0.0), 0.0, inputData)
|
def applyActivationFunction_ReLU_v3(inputData):
return ((inputData + abs(inputData)) / 2.0)
|
def applyActivationFunction_ReLU_v4(inputData):
return (((T.sgn(inputData) + 1) * inputData) * 0.5)
|
def applyActivationFunction_LeakyReLU(inputData, leakiness):
'leakiness : float\n Slope for negative input, usually between 0 and 1.\n A leakiness of 0 will lead to the standard rectifier,\n a leakiness of 1 will lead to a linear activation function,\n and any value in between will give a leaky rectifier.\n \n [1] Maas et al. (2013):\n Rectifier Nonlinearities Improve Neural Network Acoustic Models,\n http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n \n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) '
pos = (0.5 * (1 + leakiness))
neg = (0.5 * (1 - leakiness))
output = ((pos * inputData) + (neg * abs(inputData)))
return output
|
def applyActivationFunction_PReLU(inputData, PreluActivations):
'Parametric Rectified Linear Unit.\n It follows:\n `f(x) = alpha * x for x < 0`,\n `f(x) = x for x >= 0`,\n where `alpha` is a learned array with the same shape as x.\n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = T.maximum(0, inputData)
neg = ((preluActivationsAsRow * (inputData - abs(inputData))) * 0.5)
output = (pos + neg)
return output
|
def applyActivationFunction_PReLU_v2(inputData, PreluActivations):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = ((inputData + abs(inputData)) / 2.0)
neg = (preluActivationsAsRow * ((inputData - abs(inputData)) / 2.0))
output = (pos + neg)
return output
|
def applyActivationFunction_PReLU_v3(inputData, PreluActivations):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = (0.5 * (1 + preluActivationsAsRow))
neg = (0.5 * (1 - preluActivationsAsRow))
output = ((pos * inputData) + (neg * abs(inputData)))
return output
|
def apply_Dropout(rng, dropoutRate, inputShape, inputData, task):
' Task:\n # 0: Training\n # 1: Validation\n # 2: Testing '
outputData = inputData
if (dropoutRate > 0.001):
activationRate = (1 - dropoutRate)
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
dropoutMask = srng.binomial(n=1, size=inputShape, p=activationRate, dtype=theano.config.floatX)
if (task == 0):
outputData = (inputData * dropoutMask)
else:
outputData = (inputData * activationRate)
return outputData
|
def convolveWithKernel(W, filter_shape, inputSample, inputSampleShape):
wReshapedForConv = W.dimshuffle(0, 4, 1, 2, 3)
wReshapedForConvShape = (filter_shape[0], filter_shape[4], filter_shape[1], filter_shape[2], filter_shape[3])
inputSampleReshaped = inputSample.dimshuffle(0, 4, 1, 2, 3)
inputSampleReshapedShape = (inputSampleShape[0], inputSampleShape[4], inputSampleShape[1], inputSampleShape[2], inputSampleShape[3])
convolved_Output = T.nnet.conv3d2d.conv3d(inputSampleReshaped, wReshapedForConv, inputSampleReshapedShape, wReshapedForConvShape, border_mode='valid')
output = convolved_Output.dimshuffle(0, 2, 3, 4, 1)
outputShape = [inputSampleShape[0], filter_shape[0], ((inputSampleShape[2] - filter_shape[2]) + 1), ((inputSampleShape[3] - filter_shape[3]) + 1), ((inputSampleShape[4] - filter_shape[4]) + 1)]
return (output, outputShape)
|
def applyBn(numberEpochApplyRolling, inputTrain, inputTest, inputShapeTrain):
numberOfChannels = inputShapeTrain[1]
gBn_values = np.ones(numberOfChannels, dtype='float32')
gBn = theano.shared(value=gBn_values, borrow=True)
bBn_values = np.zeros(numberOfChannels, dtype='float32')
bBn = theano.shared(value=bBn_values, borrow=True)
muArray = theano.shared(np.zeros((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True)
varArray = theano.shared(np.ones((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True)
sharedNewMu_B = theano.shared(np.zeros(numberOfChannels, dtype='float32'), borrow=True)
sharedNewVar_B = theano.shared(np.ones(numberOfChannels, dtype='float32'), borrow=True)
e1 = np.finfo(np.float32).tiny
mu_B = inputTrain.mean(axis=[0, 2, 3, 4])
mu_B = T.unbroadcast(mu_B, 0)
var_B = inputTrain.var(axis=[0, 2, 3, 4])
var_B = T.unbroadcast(var_B, 0)
var_B_plusE = (var_B + e1)
mu_RollingAverage = muArray.mean(axis=0)
effectiveSize = (((inputShapeTrain[0] * inputShapeTrain[2]) * inputShapeTrain[3]) * inputShapeTrain[4])
var_RollingAverage = ((effectiveSize / (effectiveSize - 1)) * varArray.mean(axis=0))
var_RollingAverage_plusE = (var_RollingAverage + e1)
normXi_train = ((inputTrain - mu_B.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_B_plusE.dimshuffle('x', 0, 'x', 'x', 'x')))
normYi_train = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_train) + bBn.dimshuffle('x', 0, 'x', 'x', 'x'))
normXi_test = ((inputTest - mu_RollingAverage.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_RollingAverage_plusE.dimshuffle('x', 0, 'x', 'x', 'x')))
normYi_test = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_test) + bBn.dimshuffle('x', 0, 'x', 'x', 'x'))
return (normYi_train, normYi_test, gBn, bBn, muArray, varArray, sharedNewMu_B, sharedNewVar_B, mu_B, var_B)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.