_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q266800 | _tensor_product | test | def _tensor_product(t1, t2):
"""Computes the outer product of two possibly batched vectors.
Args:
t1: A `tf.Tensor` of shape `[..., n]`.
t2: A `tf.Tensor` of shape `[..., m]`.
Returns:
A tensor of shape `[..., n, m]` with matching batch dimensions, let's call
it `r`, whose components are:
```None
r[..., i, j] = t1[..., i] * t2[..., j]
```
"""
return tf.matmul(tf.expand_dims(t1, axis=-1), tf.expand_dims(t2, axis=-2)) | python | {
"resource": ""
} |
q266801 | _batch_transpose | test | def _batch_transpose(mat):
"""Transpose a possibly batched matrix.
Args:
mat: A `tf.Tensor` of shape `[..., n, m]`.
Returns:
A tensor of shape `[..., m, n]` with matching batch dimensions.
"""
n = distribution_util.prefer_static_rank(mat)
perm = tf.range(n)
perm = tf.concat([perm[:-2], [perm[-1], perm[-2]]], axis=0)
return tf.transpose(a=mat, perm=perm) | python | {
"resource": ""
} |
q266802 | pad_shape_right_with_ones | test | def pad_shape_right_with_ones(x, ndims):
"""Maybe add `ndims` ones to `x.shape` on the right.
If `ndims` is zero, this is a no-op; otherwise, we will create and return a
new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the
right side. If the shape of `x` is known statically, the shape of the return
value will be as well.
Args:
x: The `Tensor` we'll return a reshaping of.
ndims: Python `integer` number of ones to pad onto `x.shape`.
Returns:
If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x`
with `ndims` ones concatenated on the right side. If possible, returns a
`Tensor` whose shape is known statically.
Raises:
ValueError: if `ndims` is not a Python `integer` greater than or equal to
zero.
"""
if not (isinstance(ndims, int) and ndims >= 0):
raise ValueError(
'`ndims` must be a Python `integer` greater than zero. Got: {}'
.format(ndims))
if ndims == 0:
return x
x = tf.convert_to_tensor(value=x)
original_shape = x.shape
new_shape = distribution_util.pad(
tf.shape(input=x), axis=0, back=True, value=1, count=ndims)
x = tf.reshape(x, new_shape)
x.set_shape(original_shape.concatenate([1]*ndims))
return x | python | {
"resource": ""
} |
q266803 | sum_rightmost_ndims_preserving_shape | test | def sum_rightmost_ndims_preserving_shape(x, ndims):
"""Return `Tensor` with right-most ndims summed.
Args:
x: the `Tensor` whose right-most `ndims` dimensions to sum
ndims: number of right-most dimensions to sum.
Returns:
A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most
dimensions. If the shape of `x` is statically known, the result will also
have statically known shape. Otherwise, the resulting shape will only be
known at runtime.
"""
x = tf.convert_to_tensor(value=x)
if x.shape.ndims is not None:
axes = tf.range(x.shape.ndims - ndims, x.shape.ndims)
else:
axes = tf.range(tf.rank(x) - ndims, tf.rank(x))
return tf.reduce_sum(input_tensor=x, axis=axes) | python | {
"resource": ""
} |
q266804 | sqrt_with_finite_grads | test | def sqrt_with_finite_grads(x, name=None):
"""A sqrt function whose gradient at zero is very large but finite.
Args:
x: a `Tensor` whose sqrt is to be computed.
name: a Python `str` prefixed to all ops created by this function.
Default `None` (i.e., "sqrt_with_finite_grads").
Returns:
sqrt: the square root of `x`, with an overridden gradient at zero
grad: a gradient function, which is the same as sqrt's gradient everywhere
except at zero, where it is given a large finite value, instead of `inf`.
Raises:
TypeError: if `tf.convert_to_tensor(x)` is not a `float` type.
Often in kernel functions, we need to compute the L2 norm of the difference
between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the
case where `x` and `y` are identical, e.g., on the diagonal of a kernel
matrix, we get `NaN`s when we take gradients with respect to the inputs. To
see, this consider the forward pass:
```
[x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] -->
(x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2))
```
When we backprop through this forward pass, the `sqrt` yields an `inf` because
`grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at
the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get
`0 * inf`, which is `NaN`.
We'd like to avoid these `NaN`s, since they infect the rest of the connected
computation graph. Practically, when two inputs to a kernel function are
equal, we are in one of two scenarios:
1. We are actually computing k(x, x), in which case norm(x - x) is
identically zero, independent of x. In this case, we'd like the
gradient to reflect this independence: it should be zero.
2. We are computing k(x, y), and x just *happens* to have the same value
as y. The gradient at such inputs is in fact ill-defined (there is a
cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are,
however, an infinite number of sub-gradients, all of which are valid at
all such inputs. By symmetry, there is exactly one which is "special":
zero, and we elect to use that value here. In practice, having two
identical inputs to a kernel matrix is probably a pathological
situation to be avoided, but that is better resolved at a higher level
than this.
To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine
the gradient at zero. We assign it to be a very large value, specifically
the sqrt of the max value of the floating point dtype of the input. We use
the sqrt (as opposed to just using the max floating point value) to avoid
potential overflow when combining this value with others downstream.
"""
with tf.compat.v1.name_scope(name, 'sqrt_with_finite_grads', [x]):
x = tf.convert_to_tensor(value=x, name='x')
if not x.dtype.is_floating:
raise TypeError('Input `x` must be floating type.')
def grad(grad_ys):
large_float_like_x = np.sqrt(np.finfo(x.dtype.as_numpy_dtype()).max)
safe_grads = tf.where(
tf.equal(x, 0), tf.fill(tf.shape(input=x), large_float_like_x),
0.5 * tf.math.rsqrt(x))
return grad_ys * safe_grads
return tf.sqrt(x), grad | python | {
"resource": ""
} |
q266805 | maybe_get_common_dtype | test | def maybe_get_common_dtype(arg_list):
"""Return common dtype of arg_list, or None.
Args:
arg_list: an iterable of items which are either `None` or have a `dtype`
property.
Returns:
dtype: The common dtype of items in `arg_list`, or `None` if the list is
empty or all items are `None`.
"""
# Note that `all` defaults to `True` if `arg_list` is empty.
if all(a is None for a in arg_list):
return None
return dtype_util.common_dtype(arg_list, tf.float32) | python | {
"resource": ""
} |
q266806 | minimize | test | def minimize(value_and_gradients_function,
initial_position,
num_correction_pairs=10,
tolerance=1e-8,
x_tolerance=0,
f_relative_tolerance=0,
initial_inverse_hessian_estimate=None,
max_iterations=50,
parallel_iterations=1,
stopping_condition=None,
name=None):
"""Applies the L-BFGS algorithm to minimize a differentiable function.
Performs unconstrained minimization of a differentiable function using the
L-BFGS scheme. See [Nocedal and Wright(2006)][1] for details of the algorithm.
### Usage:
The following example demonstrates the L-BFGS optimizer attempting to find the
minimum for a simple high-dimensional quadratic objective function.
```python
# A high-dimensional quadratic bowl.
ndims = 60
minimum = np.ones([ndims], dtype='float64')
scales = np.arange(ndims, dtype='float64') + 1.0
# The objective function and the gradient.
def quadratic(x):
value = tf.reduce_sum(scales * (x - minimum) ** 2)
return value, tf.gradients(value, x)[0]
start = np.arange(ndims, 0, -1, dtype='float64')
optim_results = tfp.optimizer.lbfgs_minimize(
quadratic, initial_position=start, num_correction_pairs=10,
tolerance=1e-8)
with tf.Session() as session:
results = session.run(optim_results)
# Check that the search converged
assert(results.converged)
# Check that the argmin is close to the actual value.
np.testing.assert_allclose(results.position, minimum)
```
### References:
[1] Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series
in Operations Research. pp 176-180. 2006
http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf
Args:
value_and_gradients_function: A Python callable that accepts a point as a
real `Tensor` and returns a tuple of `Tensor`s of real dtype containing
the value of the function and its gradient at that point. The function
to be minimized. The input is of shape `[..., n]`, where `n` is the size
of the domain of input points, and all others are batching dimensions.
The first component of the return value is a real `Tensor` of matching
shape `[...]`. The second component (the gradient) is also of shape
`[..., n]` like the input value to the function.
initial_position: Real `Tensor` of shape `[..., n]`. The starting point, or
points when using batching dimensions, of the search procedure. At these
points the function value and the gradient norm should be finite.
num_correction_pairs: Positive integer. Specifies the maximum number of
(position_delta, gradient_delta) correction pairs to keep as implicit
approximation of the Hessian matrix.
tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance
for the procedure. If the supremum norm of the gradient vector is below
this number, the algorithm is stopped.
x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the
position between one iteration and the next is smaller than this number,
the algorithm is stopped.
f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change
in the objective value between one iteration and the next is smaller
than this value, the algorithm is stopped.
initial_inverse_hessian_estimate: None. Option currently not supported.
max_iterations: Scalar positive int32 `Tensor`. The maximum number of
iterations for L-BFGS updates.
parallel_iterations: Positive integer. The number of iterations allowed to
run in parallel.
stopping_condition: (Optional) A Python function that takes as input two
Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor.
The input tensors are `converged` and `failed`, indicating the current
status of each respective batch member; the return value states whether
the algorithm should stop. The default is tfp.optimizer.converged_all
which only stops when all batch members have either converged or failed.
An alternative is tfp.optimizer.converged_any which stops as soon as one
batch member has converged, or when all have failed.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'minimize' is used.
Returns:
optimizer_results: A namedtuple containing the following items:
converged: Scalar boolean tensor indicating whether the minimum was
found within tolerance.
failed: Scalar boolean tensor indicating whether a line search
step failed to find a suitable step size satisfying Wolfe
conditions. In the absence of any constraints on the
number of objective evaluations permitted, this value will
be the complement of `converged`. However, if there is
a constraint and the search stopped due to available
evaluations being exhausted, both `failed` and `converged`
will be simultaneously False.
num_objective_evaluations: The total number of objective
evaluations performed.
position: A tensor containing the last argument value found
during the search. If the search converged, then
this value is the argmin of the objective function.
objective_value: A tensor containing the value of the objective
function at the `position`. If the search converged, then this is
the (local) minimum of the objective function.
objective_gradient: A tensor containing the gradient of the objective
function at the `position`. If the search converged the
max-norm of this tensor should be below the tolerance.
position_deltas: A tensor encoding information about the latest
changes in `position` during the algorithm execution.
gradient_deltas: A tensor encoding information about the latest
changes in `objective_gradient` during the algorithm execution.
"""
if initial_inverse_hessian_estimate is not None:
raise NotImplementedError(
'Support of initial_inverse_hessian_estimate arg not yet implemented')
if stopping_condition is None:
stopping_condition = bfgs_utils.converged_all
with tf.compat.v1.name_scope(name, 'minimize', [initial_position, tolerance]):
initial_position = tf.convert_to_tensor(
value=initial_position, name='initial_position')
dtype = initial_position.dtype.base_dtype
tolerance = tf.convert_to_tensor(
value=tolerance, dtype=dtype, name='grad_tolerance')
f_relative_tolerance = tf.convert_to_tensor(
value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance')
x_tolerance = tf.convert_to_tensor(
value=x_tolerance, dtype=dtype, name='x_tolerance')
max_iterations = tf.convert_to_tensor(
value=max_iterations, name='max_iterations')
# The `state` here is a `LBfgsOptimizerResults` tuple with values for the
# current state of the algorithm computation.
def _cond(state):
"""Continue if iterations remain and stopping condition is not met."""
return ((state.num_iterations < max_iterations) &
tf.logical_not(stopping_condition(state.converged, state.failed)))
def _body(current_state):
"""Main optimization loop."""
search_direction = _get_search_direction(current_state)
# TODO(b/120134934): Check if the derivative at the start point is not
# negative, if so then reset position/gradient deltas and recompute
# search direction.
next_state = bfgs_utils.line_search_step(
current_state,
value_and_gradients_function, search_direction,
tolerance, f_relative_tolerance, x_tolerance, stopping_condition)
# If not failed or converged, update the Hessian estimate.
should_update = ~(next_state.converged | next_state.failed)
state_after_inv_hessian_update = bfgs_utils.update_fields(
next_state,
position_deltas=_queue_push(
current_state.position_deltas, should_update,
next_state.position - current_state.position),
gradient_deltas=_queue_push(
current_state.gradient_deltas, should_update,
next_state.objective_gradient - current_state.objective_gradient))
return [state_after_inv_hessian_update]
initial_state = _get_initial_state(value_and_gradients_function,
initial_position,
num_correction_pairs,
tolerance)
return tf.while_loop(
cond=_cond,
body=_body,
loop_vars=[initial_state],
parallel_iterations=parallel_iterations)[0] | python | {
"resource": ""
} |
q266807 | _get_initial_state | test | def _get_initial_state(value_and_gradients_function,
initial_position,
num_correction_pairs,
tolerance):
"""Create LBfgsOptimizerResults with initial state of search procedure."""
init_args = bfgs_utils.get_initial_state_args(
value_and_gradients_function,
initial_position,
tolerance)
empty_queue = _make_empty_queue_for(num_correction_pairs, initial_position)
init_args.update(position_deltas=empty_queue, gradient_deltas=empty_queue)
return LBfgsOptimizerResults(**init_args) | python | {
"resource": ""
} |
q266808 | _get_search_direction | test | def _get_search_direction(state):
"""Computes the search direction to follow at the current state.
On the `k`-th iteration of the main L-BFGS algorithm, the state has collected
the most recent `m` correction pairs in position_deltas and gradient_deltas,
where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`.
Assuming these, the code below is an implementation of the L-BFGS two-loop
recursion algorithm given by [Nocedal and Wright(2006)][1]:
```None
q_direction = objective_gradient
for i in reversed(range(m)): # First loop.
inv_rho[i] = gradient_deltas[i]^T * position_deltas[i]
alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i]
q_direction = q_direction - alpha[i] * gradient_deltas[i]
kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] /
gradient_deltas[-1]^T * gradient_deltas[-1])
r_direction = kth_inv_hessian_factor * I * q_direction
for i in range(m): # Second loop.
beta = gradient_deltas[i]^T * r_direction / inv_rho[i]
r_direction = r_direction + position_deltas[i] * (alpha[i] - beta)
return -r_direction # Approximates - H_k * objective_gradient.
```
Args:
state: A `LBfgsOptimizerResults` tuple with the current state of the
search procedure.
Returns:
A real `Tensor` of the same shape as the `state.position`. The direction
along which to perform line search.
"""
# The number of correction pairs that have been collected so far.
num_elements = tf.minimum(
state.num_iterations,
distribution_util.prefer_static_shape(state.position_deltas)[0])
def _two_loop_algorithm():
"""L-BFGS two-loop algorithm."""
# Correction pairs are always appended to the end, so only the latest
# `num_elements` vectors have valid position/gradient deltas.
position_deltas = state.position_deltas[-num_elements:]
gradient_deltas = state.gradient_deltas[-num_elements:]
# Pre-compute all `inv_rho[i]`s.
inv_rhos = tf.reduce_sum(
input_tensor=gradient_deltas * position_deltas, axis=-1)
def first_loop(acc, args):
_, q_direction = acc
position_delta, gradient_delta, inv_rho = args
alpha = tf.reduce_sum(
input_tensor=position_delta * q_direction, axis=-1) / inv_rho
direction_delta = tf.expand_dims(alpha, axis=-1) * gradient_delta
return (alpha, q_direction - direction_delta)
# Run first loop body computing and collecting `alpha[i]`s, while also
# computing the updated `q_direction` at each step.
zero = tf.zeros_like(inv_rhos[0])
alphas, q_directions = tf.scan(
first_loop, [position_deltas, gradient_deltas, inv_rhos],
initializer=(zero, state.objective_gradient), reverse=True)
# We use `H^0_k = gamma_k * I` as an estimate for the initial inverse
# hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`.
gamma_k = inv_rhos[-1] / tf.reduce_sum(
input_tensor=gradient_deltas[-1] * gradient_deltas[-1], axis=-1)
r_direction = tf.expand_dims(gamma_k, axis=-1) * q_directions[0]
def second_loop(r_direction, args):
alpha, position_delta, gradient_delta, inv_rho = args
beta = tf.reduce_sum(
input_tensor=gradient_delta * r_direction, axis=-1) / inv_rho
direction_delta = tf.expand_dims(alpha - beta, axis=-1) * position_delta
return r_direction + direction_delta
# Finally, run second loop body computing the updated `r_direction` at each
# step.
r_directions = tf.scan(
second_loop, [alphas, position_deltas, gradient_deltas, inv_rhos],
initializer=r_direction)
return -r_directions[-1]
return prefer_static.cond(tf.equal(num_elements, 0),
(lambda: -state.objective_gradient),
_two_loop_algorithm) | python | {
"resource": ""
} |
q266809 | _make_empty_queue_for | test | def _make_empty_queue_for(k, element):
"""Creates a `tf.Tensor` suitable to hold `k` element-shaped tensors.
For example:
```python
element = tf.constant([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
# A queue capable of holding 3 elements.
_make_empty_queue_for(3, element)
# => [[[ 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0.]],
#
# [[ 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0.]],
#
# [[ 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0.]]]
```
Args:
k: A positive scalar integer, number of elements that each queue will hold.
element: A `tf.Tensor`, only its shape and dtype information are relevant.
Returns:
A zero-filed `tf.Tensor` of shape `(k,) + tf.shape(element)` and same dtype
as `element`.
"""
queue_shape = tf.concat(
[[k], distribution_util.prefer_static_shape(element)], axis=0)
return tf.zeros(queue_shape, dtype=element.dtype.base_dtype) | python | {
"resource": ""
} |
q266810 | _queue_push | test | def _queue_push(queue, should_update, new_vecs):
"""Conditionally push new vectors into a batch of first-in-first-out queues.
The `queue` of shape `[k, ..., n]` can be thought of as a batch of queues,
each holding `k` n-D vectors; while `new_vecs` of shape `[..., n]` is a
fresh new batch of n-D vectors. The `should_update` batch of Boolean scalars,
i.e. shape `[...]`, indicates batch members whose corresponding n-D vector in
`new_vecs` should be added at the back of its queue, pushing out the
corresponding n-D vector from the front. Batch members in `new_vecs` for
which `should_update` is False are ignored.
Note: the choice of placing `k` at the dimension 0 of the queue is
constrained by the L-BFGS two-loop algorithm above. The algorithm uses
tf.scan to iterate over the `k` correction pairs simulatneously across all
batches, and tf.scan itself can only iterate over dimension 0.
For example:
```python
k, b, n = (3, 2, 5)
queue = tf.reshape(tf.range(30), (k, b, n))
# => [[[ 0, 1, 2, 3, 4],
# [ 5, 6, 7, 8, 9]],
#
# [[10, 11, 12, 13, 14],
# [15, 16, 17, 18, 19]],
#
# [[20, 21, 22, 23, 24],
# [25, 26, 27, 28, 29]]]
element = tf.reshape(tf.range(30, 40), (b, n))
# => [[30, 31, 32, 33, 34],
[35, 36, 37, 38, 39]]
should_update = tf.constant([True, False]) # Shape: (b,)
_queue_add(should_update, queue, element)
# => [[[10, 11, 12, 13, 14],
# [ 5, 6, 7, 8, 9]],
#
# [[20, 21, 22, 23, 24],
# [15, 16, 17, 18, 19]],
#
# [[30, 31, 32, 33, 34],
# [25, 26, 27, 28, 29]]]
```
Args:
queue: A `tf.Tensor` of shape `[k, ..., n]`; a batch of queues each with
`k` n-D vectors.
should_update: A Boolean `tf.Tensor` of shape `[...]` indicating batch
members where new vectors should be added to their queues.
new_vecs: A `tf.Tensor` of shape `[..., n]`; a batch of n-D vectors to add
at the end of their respective queues, pushing out the first element from
each.
Returns:
A new `tf.Tensor` of shape `[k, ..., n]`.
"""
new_queue = tf.concat([queue[1:], [new_vecs]], axis=0)
update_pattern = tf.broadcast_to(
should_update[tf.newaxis, ..., tf.newaxis],
distribution_util.prefer_static_shape(queue))
return tf.where(update_pattern, new_queue, queue) | python | {
"resource": ""
} |
q266811 | _psd_mask | test | def _psd_mask(x):
"""Computes whether each square matrix in the input is positive semi-definite.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
Returns:
mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each
scalar is 1 if the corresponding matrix was PSD, otherwise 0.
"""
# Allegedly
# https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
# it is more efficient to test for positive semi-definiteness by
# trying to compute the Cholesky decomposition -- the matrix is PSD
# if you succeed and not PSD if you fail. However, TensorFlow's
# Cholesky raises an exception if _any_ of the input matrices are
# not PSD, from which I don't know how to extract _which ones_, so I
# proceed by explicitly computing all the eigenvalues and checking
# whether they are all positive or not.
#
# Also, as was discussed in the answer, it is somewhat dangerous to
# treat SPD-ness as binary in floating-point arithmetic. Cholesky
# factorization can complete and 'look' like everything is fine
# (e.g., O(1) entries and a diagonal of all ones) but the matrix can
# have an exponential condition number.
eigenvalues, _ = tf.linalg.eigh(x)
return tf.cast(
tf.reduce_min(input_tensor=eigenvalues, axis=-1) >= 0, dtype=x.dtype) | python | {
"resource": ""
} |
q266812 | _det_large_enough_mask | test | def _det_large_enough_mask(x, det_bounds):
"""Returns whether the input matches the given determinant limit.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
det_bounds: A floating-point `Tensor` that must broadcast to shape
`[B1, ..., Bn]`, giving the desired lower bound on the
determinants in `x`.
Returns:
mask: A floating-point `Tensor` of shape [B1, ..., Bn]. Each
scalar is 1 if the corresponding matrix had determinant above
the corresponding bound, otherwise 0.
"""
# For the curious: I wonder whether it is possible and desirable to
# use a Cholesky decomposition-based algorithm for this, since the
# only matrices whose determinant this code cares about will be PSD.
# Didn't figure out how to code that in TensorFlow.
#
# Expert opinion is that it would be about twice as fast since
# Cholesky is roughly half the cost of Gaussian Elimination with
# Partial Pivoting. But this is less of an impact than the switch in
# _psd_mask.
return tf.cast(tf.linalg.det(x) > det_bounds, dtype=x.dtype) | python | {
"resource": ""
} |
q266813 | _uniform_correlation_like_matrix | test | def _uniform_correlation_like_matrix(num_rows, batch_shape, dtype, seed):
"""Returns a uniformly random `Tensor` of "correlation-like" matrices.
A "correlation-like" matrix is a symmetric square matrix with all entries
between -1 and 1 (inclusive) and 1s on the main diagonal. Of these,
the ones that are positive semi-definite are exactly the correlation
matrices.
Args:
num_rows: Python `int` dimension of the correlation-like matrices.
batch_shape: `Tensor` or Python `tuple` of `int` shape of the
batch to return.
dtype: `dtype` of the `Tensor` to return.
seed: Random seed.
Returns:
matrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]`
and dtype `dtype`. Each entry is in [-1, 1], and each matrix
along the bottom two dimensions is symmetric and has 1s on the
main diagonal.
"""
num_entries = num_rows * (num_rows + 1) / 2
ones = tf.ones(shape=[num_entries], dtype=dtype)
# It seems wasteful to generate random values for the diagonal since
# I am going to throw them away, but `fill_triangular` fills the
# diagonal, so I probably need them.
# It's not impossible that it would be more efficient to just fill
# the whole matrix with random values instead of messing with
# `fill_triangular`. Then would need to filter almost half out with
# `matrix_band_part`.
unifs = uniform.Uniform(-ones, ones).sample(batch_shape, seed=seed)
tril = util.fill_triangular(unifs)
symmetric = tril + tf.linalg.matrix_transpose(tril)
diagonal_ones = tf.ones(
shape=util.pad(batch_shape, axis=0, back=True, value=num_rows),
dtype=dtype)
return tf.linalg.set_diag(symmetric, diagonal_ones) | python | {
"resource": ""
} |
q266814 | correlation_matrix_volume_rejection_samples | test | def correlation_matrix_volume_rejection_samples(
det_bounds, dim, sample_shape, dtype, seed):
"""Returns rejection samples from trying to get good correlation matrices.
The proposal being rejected from is the uniform distribution on
"correlation-like" matrices. We say a matrix is "correlation-like"
if it is a symmetric square matrix with all entries between -1 and 1
(inclusive) and 1s on the main diagonal. Of these, the ones that
are positive semi-definite are exactly the correlation matrices.
The rejection algorithm, then, is to sample a `Tensor` of
`sample_shape` correlation-like matrices of dimensions `dim` by
`dim`, and check each one for (i) being a correlation matrix (i.e.,
PSD), and (ii) having determinant at least the corresponding entry
of `det_bounds`.
Args:
det_bounds: A `Tensor` of lower bounds on the determinants of
acceptable matrices. The shape must broadcast with `sample_shape`.
dim: A Python `int` dimension of correlation matrices to sample.
sample_shape: Python `tuple` of `int` shape of the samples to
compute, excluding the two matrix dimensions.
dtype: The `dtype` in which to do the computation.
seed: Random seed.
Returns:
weights: A `Tensor` of shape `sample_shape`. Each entry is 0 if the
corresponding matrix was not a correlation matrix, or had too
small of a determinant. Otherwise, the entry is the
multiplicative inverse of the density of proposing that matrix
uniformly, i.e., the volume of the set of `dim` by `dim`
correlation-like matrices.
volume: The volume of the set of `dim` by `dim` correlation-like
matrices.
"""
with tf.compat.v1.name_scope("rejection_sampler"):
rej_proposals = _uniform_correlation_like_matrix(
dim, sample_shape, dtype, seed=seed)
rej_proposal_volume = 2. ** (dim * (dim - 1) / 2.)
# The density of proposing any given point is 1 / rej_proposal_volume;
# The weight of that point should be scaled by
# 1 / density = rej_proposal_volume.
rej_weights = rej_proposal_volume * _psd_mask(
rej_proposals) * _det_large_enough_mask(rej_proposals, det_bounds)
return rej_weights, rej_proposal_volume | python | {
"resource": ""
} |
q266815 | _clopper_pearson_confidence_interval | test | def _clopper_pearson_confidence_interval(samples, error_rate):
"""Computes a confidence interval for the mean of the given 1-D distribution.
Assumes (and checks) that the given distribution is Bernoulli, i.e.,
takes only two values. This licenses using the CDF of the binomial
distribution for the confidence, which is tighter (for extreme
probabilities) than the DKWM inequality. The method is known as the
[Clopper-Pearson method]
(https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval).
Assumes:
- The given samples were drawn iid from the distribution of interest.
- The given distribution is a Bernoulli, i.e., supported only on
low and high.
Guarantees:
- The probability (over the randomness of drawing the given sample)
that the true mean is outside the returned interval is no more
than the given error_rate.
Args:
samples: `np.ndarray` of samples drawn iid from the distribution
of interest.
error_rate: Python `float` admissible rate of mistakes.
Returns:
low: Lower bound of confidence interval.
high: Upper bound of confidence interval.
Raises:
ValueError: If `samples` has rank other than 1 (batch semantics
are not implemented), or if `samples` contains values other than
`low` or `high` (as that makes the distribution not Bernoulli).
"""
# TODO(b/78025336) Migrate this confidence interval function
# to statistical_testing.py. In order to do that
# - Get the binomial CDF from the Binomial distribution
# - Implement scalar root finding in TF. Batch bisection search
# shouldn't be too hard, and is definitely good enough for this
# problem. Batching the Brent algorithm (from scipy) that is used
# here may be more involved, but may also not be necessary---it's
# only used here because scipy made it convenient. In particular,
# robustness is more important than speed here, which may make
# bisection search actively better.
# - The rest is just a matter of rewriting in the appropriate style.
if optimize is None or stats is None:
raise ValueError(
"Scipy is required for computing Clopper-Pearson confidence intervals")
if len(samples.shape) != 1:
raise ValueError("Batch semantics not implemented")
n = len(samples)
low = np.amin(samples)
high = np.amax(samples)
successes = np.count_nonzero(samples - low)
failures = np.count_nonzero(samples - high)
if successes + failures != n:
uniques = np.unique(samples)
msg = ("Purportedly Bernoulli distribution had distinct samples"
" {}, {}, and {}".format(uniques[0], uniques[1], uniques[2]))
raise ValueError(msg)
def p_small_enough(p):
prob = stats.binom.logcdf(successes, n, p)
return prob - np.log(error_rate / 2.)
def p_big_enough(p):
prob = stats.binom.logsf(successes, n, p)
return prob - np.log(error_rate / 2.)
high_p = optimize.brentq(
p_small_enough, float(successes) / n, 1., rtol=1e-9)
low_p = optimize.brentq(
p_big_enough, 0., float(successes) / n, rtol=1e-9)
low_interval = low + (high - low) * low_p
high_interval = low + (high - low) * high_p
return (low_interval, high_interval) | python | {
"resource": ""
} |
q266816 | compute_true_volumes | test | def compute_true_volumes(
det_bounds, dim, num_samples, error_rate=1e-6, seed=42):
"""Returns confidence intervals for the desired correlation matrix volumes.
The confidence intervals are computed by the [Clopper-Pearson method]
(https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval).
Args:
det_bounds: A rank-1 numpy array of lower bounds on the
determinants of acceptable matrices. Entries must be unique.
dim: A Python `int` dimension of correlation matrices to sample.
num_samples: The number of samples to draw.
error_rate: The statistical significance of the returned
confidence intervals. The significance is broadcast: Each
returned interval separately may be incorrect with probability
(under the sample of correlation-like matrices drawn internally)
at most `error_rate`.
seed: Random seed.
Returns:
bounds: A Python `dict` mapping each determinant bound to the low, high
tuple giving the confidence interval.
"""
bounds = {}
with tf.compat.v1.Session() as sess:
rej_weights, _ = correlation_matrix_volume_rejection_samples(
det_bounds, dim, [num_samples, len(det_bounds)], np.float32, seed=seed)
rej_weights = sess.run(rej_weights)
for rw, det in zip(np.rollaxis(rej_weights, 1), det_bounds):
template = ("Estimating volume of {}x{} correlation "
"matrices with determinant >= {}.")
print(template.format(dim, dim, det))
sys.stdout.flush()
bounds[det] = _clopper_pearson_confidence_interval(
rw, error_rate=error_rate)
return bounds | python | {
"resource": ""
} |
q266817 | _von_mises_cdf_series | test | def _von_mises_cdf_series(x, concentration, num_terms, dtype):
"""Computes the von Mises CDF and its derivative via series expansion."""
# Keep the number of terms as a float. It should be a small integer, so
# exactly representable as a float.
num_terms = tf.cast(num_terms, dtype=dtype)
def loop_body(n, rn, drn_dconcentration, vn, dvn_dconcentration):
"""One iteration of the series loop."""
denominator = 2. * n / concentration + rn
ddenominator_dk = -2. * n / concentration ** 2 + drn_dconcentration
rn = 1. / denominator
drn_dconcentration = -ddenominator_dk / denominator ** 2
multiplier = tf.sin(n * x) / n + vn
vn = rn * multiplier
dvn_dconcentration = (drn_dconcentration * multiplier +
rn * dvn_dconcentration)
n -= 1.
return n, rn, drn_dconcentration, vn, dvn_dconcentration
(_, _, _, vn, dvn_dconcentration) = tf.while_loop(
cond=lambda n, *_: n > 0.,
body=loop_body,
loop_vars=(
num_terms, # n
tf.zeros_like(x, name="rn"),
tf.zeros_like(x, name="drn_dconcentration"),
tf.zeros_like(x, name="vn"),
tf.zeros_like(x, name="dvn_dconcentration"),
),
)
cdf = .5 + x / (2. * np.pi) + vn / np.pi
dcdf_dconcentration = dvn_dconcentration / np.pi
# Clip the result to [0, 1].
cdf_clipped = tf.clip_by_value(cdf, 0., 1.)
# The clipped values do not depend on concentration anymore, so set their
# derivative to zero.
dcdf_dconcentration *= tf.cast((cdf >= 0.) & (cdf <= 1.), dtype)
return cdf_clipped, dcdf_dconcentration | python | {
"resource": ""
} |
q266818 | _von_mises_cdf_normal | test | def _von_mises_cdf_normal(x, concentration, dtype):
"""Computes the von Mises CDF and its derivative via Normal approximation."""
def cdf_func(concentration):
"""A helper function that is passed to value_and_gradient."""
# z is an "almost Normally distributed" random variable.
z = ((np.sqrt(2. / np.pi) / tf.math.bessel_i0e(concentration)) *
tf.sin(.5 * x))
# This is the correction described in [1] which reduces the error
# of the Normal approximation.
z2 = z ** 2
z3 = z2 * z
z4 = z2 ** 2
c = 24. * concentration
c1 = 56.
xi = z - z3 / ((c - 2. * z2 - 16.) / 3. -
(z4 + (7. / 4.) * z2 + 167. / 2.) / (c - c1 - z2 + 3.)) ** 2
distrib = normal.Normal(tf.cast(0., dtype), tf.cast(1., dtype))
return distrib.cdf(xi)
return value_and_gradient(cdf_func, concentration) | python | {
"resource": ""
} |
q266819 | one_step | test | def one_step(
objective_function,
population,
population_values=None,
differential_weight=0.5,
crossover_prob=0.9,
seed=None,
name=None):
"""Performs one step of the differential evolution algorithm.
Args:
objective_function: A Python callable that accepts a batch of possible
solutions and returns the values of the objective function at those
arguments as a rank 1 real `Tensor`. This specifies the function to be
minimized. The input to this callable may be either a single `Tensor`
or a Python `list` of `Tensor`s. The signature must match the format of
the argument `population`. (i.e. objective_function(*population) must
return the value of the function to be minimized).
population: `Tensor` or Python `list` of `Tensor`s representing the
current population vectors. Each `Tensor` must be of the same real dtype.
The first dimension indexes individual population members while the
rest of the dimensions are consumed by the value function. For example,
if the population is a single `Tensor` of shape [n, m1, m2], then `n` is
the population size and the output of `objective_function` applied to the
population is a `Tensor` of shape [n]. If the population is a python
list of `Tensor`s then each `Tensor` in the list should have the first
axis of a common size, say `n` and `objective_function(*population)`
should return a `Tensor of shape [n]. The population must have at least
4 members for the algorithm to work correctly.
population_values: A `Tensor` of rank 1 and real dtype. The result of
applying `objective_function` to the `population`. If not supplied it is
computed using the `objective_function`.
Default value: None.
differential_weight: Real scalar `Tensor`. Must be positive and less than
2.0. The parameter controlling the strength of mutation.
Default value: 0.5
crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The
probability of recombination per site.
Default value: 0.9
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: None.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'one_step' is
used.
Default value: None
Returns:
A sequence containing the following elements (in order):
next_population: A `Tensor` or Python `list` of `Tensor`s of the same
structure as the input population. The population at the next generation.
next_population_values: A `Tensor` of same shape and dtype as input
`population_values`. The function values for the `next_population`.
"""
with tf.compat.v1.name_scope(
name, 'one_step',
[population, population_values, differential_weight, crossover_prob]):
population, _ = _ensure_list(population)
if population_values is None:
population_values = objective_function(*population)
population_size = tf.shape(input=population[0])[0]
seed_stream = distributions.SeedStream(seed, salt='one_step')
mixing_indices = _get_mixing_indices(population_size, seed=seed_stream())
# Construct the mutated solution vectors. There is one for each member of
# the population.
mutants = _get_mutants(population,
population_size,
mixing_indices,
differential_weight)
# Perform recombination between the parents and the mutants.
candidates = _binary_crossover(population,
population_size,
mutants,
crossover_prob,
seed=seed_stream())
candidate_values = objective_function(*candidates)
if population_values is None:
population_values = objective_function(*population)
infinity = tf.zeros_like(population_values) + np.inf
population_values = tf.where(
tf.math.is_nan(population_values), x=infinity, y=population_values)
to_replace = candidate_values < population_values
next_population = [
tf.where(to_replace, x=candidates_part, y=population_part)
for candidates_part, population_part in zip(candidates, population)
]
next_values = tf.where(to_replace, x=candidate_values, y=population_values)
return next_population, next_values | python | {
"resource": ""
} |
q266820 | minimize | test | def minimize(objective_function,
initial_population=None,
initial_position=None,
population_size=50,
population_stddev=1.,
max_iterations=100,
func_tolerance=0,
position_tolerance=1e-8,
differential_weight=0.5,
crossover_prob=0.9,
seed=None,
name=None):
"""Applies the Differential evolution algorithm to minimize a function.
Differential Evolution is an evolutionary optimization algorithm which works
on a set of candidate solutions called the population. It iteratively
improves the population by applying genetic operators of mutation and
recombination. The objective function `f` supplies the fitness of each
candidate. A candidate `s_1` is considered better than `s_2` if
`f(s_1) < f(s_2)`.
This method allows the user to either specify an initial population or a
single candidate solution. If a single solution is specified, a population
of the specified size is initialized by adding independent normal noise
to the candidate solution.
The implementation also supports a multi-part specification of the state. For
example, consider the objective function:
```python
# x is a tensor of shape [n, m] while y is of shape [n].
def objective(x, y):
return tf.math.reduce_sum(x ** 2, axis=-1) + y ** 2
```
The state in this case is specified by two input tensors `x` and `y`. To
apply the algorithm to this objective function, one would need to specify
either an initial population as a list of two tensors of shapes
`[population_size, k]` and `[population_size]`. The following code shows the
complete example:
```python
population_size = 40
# With an initial population and a multi-part state.
initial_population = (tf.random.normal([population_size]),
tf.random.normal([population_size]))
def easom_fn(x, y):
return -(tf.math.cos(x) * tf.math.cos(y) *
tf.math.exp(-(x-np.pi)**2 - (y-np.pi)**2))
optim_results = tfp.optimizers.differential_evolution_minimize(
easom_fn,
initial_population=initial_population,
seed=43210)
print (optim_results.converged)
print (optim_results.position) # Should be (close to) [pi, pi].
print (optim_results.objective_value) # Should be -1.
# With a single starting point
initial_position = (tf.constant(1.0), tf.constant(1.0))
optim_results = tfp.optimizers.differential_evolution_minimize(
easom_fn,
initial_position=initial_position,
population_size=40,
population_stddev=2.0,
seed=43210)
```
Args:
objective_function: A Python callable that accepts a batch of possible
solutions and returns the values of the objective function at those
arguments as a rank 1 real `Tensor`. This specifies the function to be
minimized. The input to this callable may be either a single `Tensor`
or a Python `list` of `Tensor`s. The signature must match the format of
the argument `population`. (i.e. objective_function(*population) must
return the value of the function to be minimized).
initial_population: A real `Tensor` or Python list of `Tensor`s.
If a list, each `Tensor` must be of rank at least 1 and with a common
first dimension. The first dimension indexes into the candidate solutions
while the rest of the dimensions (if any) index into an individual
solution. The size of the population must be at least 4. This is a
requirement of the DE algorithm.
initial_position: A real `Tensor` of any shape. The seed solution used
to initialize the population of solutions. If this parameter is specified
then `initial_population` must not be specified.
population_size: A positive scalar int32 `Tensor` greater than 4. The
size of the population to evolve. This parameter is ignored if
`initial_population` is specified.
Default value: 50.
population_stddev: A positive scalar real `Tensor` of the same dtype
as `initial_position`. This parameter is ignored if `initial_population`
is specified. Used to generate the population from the `initial_position`
by adding random normal noise with zero mean and the specified standard
deviation.
Default value: 1.0
max_iterations: Positive scalar int32 `Tensor`. The maximum number of
generations to evolve the population for.
Default value: 100
func_tolerance: Scalar `Tensor` of the same dtype as the output of the
`objective_function`. The algorithm stops if the absolute difference
between the largest and the smallest objective function value in the
population is below this number.
Default value: 0
position_tolerance: Scalar `Tensor` of the same real dtype as
`initial_position` or `initial_population`. The algorithm terminates if
the largest absolute difference between the coordinates of the population
members is below this threshold.
Default value: 1e-8
differential_weight: Real scalar `Tensor`. Must be positive and less than
2.0. The parameter controlling the strength of mutation in the algorithm.
Default value: 0.5
crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The
probability of recombination per site.
Default value: 0.9
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: None.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name
'differential_evolution_minimize' is used.
Default value: None
Returns:
optimizer_results: An object containing the following attributes:
converged: Scalar boolean `Tensor` indicating whether the minimum was
found within the specified tolerances.
num_objective_evaluations: The total number of objective
evaluations performed.
position: A `Tensor` containing the best point found during the search.
If the search converged, then this value is the argmin of the
objective function within the specified tolerances.
objective_value: A `Tensor` containing the value of the objective
function at the `position`. If the search
converged, then this is the (local) minimum of
the objective function.
final_population: The final state of the population.
final_objective_values: The objective function evaluated at the
final population.
initial_population: The starting population.
initial_objective_values: The objective function evaluated at the
initial population.
num_iterations: The number of iterations of the main algorithm body.
Raises:
ValueError: If neither the initial population, nor the initial position
are specified or if both are specified.
"""
if initial_population is None and initial_position is None:
raise ValueError('Either the initial population or the initial position '
'must be specified.')
if initial_population is not None and initial_position is not None:
raise ValueError('Only one of initial population or initial position '
'should be specified')
with tf.compat.v1.name_scope(
name,
default_name='minimize',
values=[
initial_population, initial_position, population_size,
population_stddev, max_iterations, func_tolerance, position_tolerance,
differential_weight, crossover_prob
]):
(
was_iterable,
population,
population_values,
max_iterations,
func_tolerance,
position_tolerance,
differential_weight,
crossover_prob
) = _get_initial_args(objective_function,
initial_population,
initial_position,
population_size,
population_stddev,
max_iterations,
func_tolerance,
position_tolerance,
differential_weight,
crossover_prob,
seed)
def evolve_body(loop_vars):
"""Performs one step of the evolution."""
next_population, next_population_values = one_step(
objective_function,
loop_vars.population,
population_values=loop_vars.population_values,
differential_weight=differential_weight,
crossover_prob=crossover_prob,
seed=seed)
converged = _check_convergence(next_population,
next_population_values,
func_tolerance,
position_tolerance)
failed = _check_failure(next_population_values)
return [_MinimizeLoopVars(
converged=converged,
failed=failed,
num_iterations=loop_vars.num_iterations+1,
population=next_population,
population_values=next_population_values)]
def evolve_cond(loop_vars):
should_stop = (
loop_vars.failed |
loop_vars.converged |
(max_iterations is not None and
loop_vars.num_iterations >= max_iterations))
return ~should_stop
initial_vars = _MinimizeLoopVars(
converged=tf.convert_to_tensor(value=False),
failed=tf.convert_to_tensor(value=False),
num_iterations=tf.convert_to_tensor(value=0),
population=population,
population_values=population_values)
final_state = tf.while_loop(
cond=evolve_cond, body=evolve_body, loop_vars=(initial_vars,))[0]
best_position, best_values = _find_best_in_population(
final_state.population,
final_state.population_values)
# Ensure we return a similar structure to what the user supplied.
final_population = final_state.population
if not was_iterable:
final_population = final_population[0]
best_position = best_position[0]
return DifferentialEvolutionOptimizerResults(
converged=final_state.converged,
failed=final_state.failed,
position=best_position,
objective_value=best_values,
final_population=final_population,
final_objective_values=final_state.population_values,
initial_population=population,
initial_objective_values=population_values,
num_iterations=final_state.num_iterations) | python | {
"resource": ""
} |
q266821 | _get_initial_args | test | def _get_initial_args(objective_function,
initial_population,
initial_position,
population_size,
population_stddev,
max_iterations,
func_tolerance,
position_tolerance,
differential_weight,
crossover_prob,
seed):
"""Processes initial args."""
was_iterable = False
if initial_position is not None:
initial_position, was_iterable = _ensure_list(initial_position)
if initial_population is not None:
initial_population, was_iterable = _ensure_list(initial_population)
population = _get_starting_population(initial_population,
initial_position,
population_size,
population_stddev,
seed=seed)
differential_weight = tf.convert_to_tensor(
value=differential_weight, dtype=population[0].dtype.base_dtype)
crossover_prob = tf.convert_to_tensor(value=crossover_prob)
population_values = objective_function(*population)
if max_iterations is not None:
max_iterations = tf.convert_to_tensor(value=max_iterations)
func_tolerance = tf.convert_to_tensor(
value=func_tolerance, dtype=population_values.dtype.base_dtype)
position_tolerance = tf.convert_to_tensor(
value=position_tolerance, dtype=population[0].dtype.base_dtype)
return (was_iterable,
population,
population_values,
max_iterations,
func_tolerance,
position_tolerance,
differential_weight,
crossover_prob) | python | {
"resource": ""
} |
q266822 | _find_best_in_population | test | def _find_best_in_population(population, values):
"""Finds the population member with the lowest value."""
best_value = tf.math.reduce_min(input_tensor=values)
best_index = tf.where(tf.math.equal(values, best_value))[0, 0]
return ([population_part[best_index] for population_part in population],
best_value) | python | {
"resource": ""
} |
q266823 | _check_convergence | test | def _check_convergence(population,
population_values,
func_tolerance,
position_tolerance):
"""Checks whether the convergence criteria have been met."""
# Check func tolerance
value_range = tf.math.abs(
tf.math.reduce_max(input_tensor=population_values) -
tf.math.reduce_min(input_tensor=population_values))
value_converged = value_range <= func_tolerance
# Ideally, we would compute the position convergence by computing the
# pairwise distance between every member of the population and checking if
# the maximum of those is less than the supplied tolerance. However, this is
# completely infeasible in terms of performance. We adopt a more conservative
# approach which checks the distance between the first population member
# with the rest of the population. If the largest such distance is less than
# half the supplied tolerance, we stop. The reason why this is sufficient is
# as follows. For any pair of distinct points (a, b) in the population, we
# have the relation: |a - b| <= |x0 - a| + |x0 - b|, where x0 is any
# other point. In particular, let x0 be the first element of the population
# and suppose that the largest distance between this point and any other
# member is epsilon. Then, for any pair of points (a, b),
# |a - b| <= 2 * epsilon and hence, the maximum distance between any pair of
# points in the population is bounded above by twice the distance between
# the first point and other points.
half_tol = position_tolerance / 2
def part_converged(part):
return tf.math.reduce_max(input_tensor=tf.math.abs(part -
part[0])) <= half_tol
x_converged = tf.math.reduce_all(
input_tensor=[part_converged(part) for part in population])
return value_converged | x_converged | python | {
"resource": ""
} |
q266824 | _get_starting_population | test | def _get_starting_population(initial_population,
initial_position,
population_size,
population_stddev,
seed):
"""Constructs the initial population.
If an initial population is not already provided, this function constructs
a population by adding random normal noise to the initial position.
Args:
initial_population: None or a list of `Tensor`s. The initial population.
initial_position: None or a list of `Tensor`s. The initial position.
If initial_population is None, this argument must not be None.
population_size: Scalar integer `Tensor`. The number of members in the
population. If the initial population is not None, this parameter is
ignored.
population_stddev: A positive scalar real `Tensor` of the same dtype
as `initial_position` or `initial_population` (whichever is not None).
This parameter is ignored if `initial_population`
is specified. Used to generate the population from the
`initial_position` by adding random normal noise with zero mean and
the specified standard deviation.
seed: Seed for random number generation.
Returns:
A list of `Tensor`s. The initial population.
"""
if initial_population is not None:
return [tf.convert_to_tensor(value=part) for part in initial_population]
# Constructs the population by adding normal noise to the initial position.
seed_stream = distributions.SeedStream(seed, salt='get_starting_population')
population = []
for part in initial_position:
part = tf.convert_to_tensor(value=part)
part_event_shape = tf.shape(input=part)
# We only draw population_size-1 random vectors because we want to ensure
# that the supplied position is part of the population. The first member
# is set to be the initial_position.
population_part_shape = tf.concat([[population_size-1],
part_event_shape], axis=0)
population_part = tf.random.normal(population_part_shape,
stddev=population_stddev,
dtype=part.dtype.base_dtype,
seed=seed_stream())
population_part += part
population_part = tf.concat([[part], population_part], axis=0)
population.append(population_part)
return population | python | {
"resource": ""
} |
q266825 | _binary_crossover | test | def _binary_crossover(population,
population_size,
mutants,
crossover_prob,
seed):
"""Performs recombination by binary crossover for the current population.
Let v_i denote the i'th component of the member v and m_i the corresponding
component of the mutant vector corresponding to v. Then the crossed over
vector w_i is determined by setting w_i =
(m_i with probability=crossover_prob else v_i). In addition, DE requires that
at least one of the components is crossed over (otherwise we end
up with no change). This is done by choosing on index say k randomly where
a force crossover is performed (i.e. w_k = m_k). This is the scheme
implemented in this function.
Args:
population: A Python list of `Tensor`s where each `Tensor` in the list
must be of rank at least 1 and all the elements must have a common
first dimension. The base population to cross over.
population_size: A scalar integer `Tensor`. The number of elements in the
population (i.e. size of the first dimension of any member of
`population`).
mutants: A Python list of `Tensor`s with the same structure as `population`.
The mutated population.
crossover_prob: A postive real scalar `Tensor` bounded above by 1.0. The
probability of a crossover being performed for each axis.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Returns:
A list of `Tensor`s of the same structure, dtype and shape as `population`.
The recombined population.
"""
sizes = [tf.cast(tf.size(input=x), dtype=tf.float64) for x in population]
seed_stream = distributions.SeedStream(seed, salt='binary_crossover')
force_crossover_group = distributions.Categorical(sizes).sample(
[population_size, 1], seed=seed_stream())
recombinants = []
for i, population_part in enumerate(population):
pop_part_flat = tf.reshape(population_part, [population_size, -1])
mutant_part_flat = tf.reshape(mutants[i], [population_size, -1])
part_size = tf.size(input=population_part) // population_size
force_crossovers = tf.one_hot(
tf.random.uniform([population_size],
minval=0,
maxval=part_size,
dtype=tf.int32,
seed=seed_stream()),
part_size,
on_value=True,
off_value=False,
dtype=tf.bool) # Tensor of shape [population_size, size]
group_mask = tf.math.equal(force_crossover_group, i)
force_crossovers &= group_mask
do_binary_crossover = tf.random.uniform(
[population_size, part_size],
dtype=crossover_prob.dtype.base_dtype,
seed=seed_stream()) < crossover_prob
do_binary_crossover |= force_crossovers
recombinant_flat = tf.where(
do_binary_crossover,
x=mutant_part_flat,
y=pop_part_flat)
recombinant = tf.reshape(recombinant_flat, tf.shape(input=population_part))
recombinants.append(recombinant)
return recombinants | python | {
"resource": ""
} |
q266826 | _get_mutants | test | def _get_mutants(population,
population_size,
mixing_indices,
differential_weight):
"""Computes the mutatated vectors for each population member.
Args:
population: Python `list` of `Tensor`s representing the
current population vectors. Each `Tensor` must be of the same real dtype.
The first dimension of each `Tensor` indexes individual
population members. For example, if the population is a list with a
single `Tensor` of shape [n, m1, m2], then `n` is the population size and
the shape of an individual solution is [m1, m2].
If there is more than one element in the population, then each `Tensor`
in the list should have the first axis of the same size.
population_size: Scalar integer `Tensor`. The size of the population.
mixing_indices: `Tensor` of integral dtype and shape [n, 3] where `n` is the
number of members in the population. Each element of the `Tensor` must be
a valid index into the first dimension of the population (i.e range
between `0` and `n-1` inclusive).
differential_weight: Real scalar `Tensor`. Must be positive and less than
2.0. The parameter controlling the strength of mutation.
Returns:
mutants: `Tensor` or Python `list` of `Tensor`s of the same shape and dtype
as the input population. The mutated vectors.
"""
mixing_indices = tf.reshape(mixing_indices, [-1])
weights = tf.stack([1.0, differential_weight, -differential_weight])
def _mutant_part(population_part):
donors = tf.gather(population_part, mixing_indices)
donors = tf.transpose(
a=tf.reshape(donors, [population_size, 3, -1]), perm=[0, 2, 1])
return tf.math.reduce_sum(input_tensor=donors * weights, axis=-1)
return [_mutant_part(population_part) for population_part in population] | python | {
"resource": ""
} |
q266827 | _get_mixing_indices | test | def _get_mixing_indices(size, seed=None, name=None):
"""Generates an array of indices suitable for mutation operation.
The mutation operation in differential evolution requires that for every
element of the population, three distinct other elements be chosen to produce
a trial candidate. This function generates an array of shape [size, 3]
satisfying the properties that:
(a). array[i, :] does not contain the index 'i'.
(b). array[i, :] does not contain any overlapping indices.
(c). All elements in the array are between 0 and size - 1 inclusive.
Args:
size: Scalar integer `Tensor`. The number of samples as well as a the range
of the indices to sample from.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'get_mixing_indices'.
Returns:
sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing
samples without replacement between 0 and size - 1 (inclusive) with the
`i`th row not including the number `i`.
"""
with tf.compat.v1.name_scope(
name, default_name='get_mixing_indices', values=[size]):
size = tf.convert_to_tensor(value=size)
dtype = size.dtype
seed_stream = distributions.SeedStream(seed, salt='get_mixing_indices')
first = tf.random.uniform([size],
maxval=size-1,
dtype=dtype,
seed=seed_stream())
second = tf.random.uniform([size],
maxval=size-2,
dtype=dtype,
seed=seed_stream())
third = tf.random.uniform([size],
maxval=size-3,
dtype=dtype,
seed=seed_stream())
# Shift second if it is on top of or to the right of first
second = tf.where(first < second, x=second, y=second + 1)
smaller = tf.math.minimum(first, second)
larger = tf.math.maximum(first, second)
# Shift the third one so it does not coincide with either the first or the
# second number. Assuming first < second, shift by 1 if the number is in
# [first, second) and by 2 if the number is greater than or equal to the
# second.
third = tf.where(third < smaller, x=third, y=third + 1)
third = tf.where(third < larger, x=third, y=third + 1)
sample = tf.stack([first, second, third], axis=1)
to_avoid = tf.expand_dims(tf.range(size), axis=-1)
sample = tf.where(sample < to_avoid, x=sample, y=sample + 1)
return sample | python | {
"resource": ""
} |
q266828 | _ensure_list | test | def _ensure_list(tensor_or_list):
"""Converts the input arg to a list if it is not a list already.
Args:
tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to
convert to a list of `Tensor`s.
Returns:
A tuple of two elements. The first is a Python list of `Tensor`s containing
the original arguments. The second is a boolean indicating whether
the original argument was a list or tuple already.
"""
if isinstance(tensor_or_list, (list, tuple)):
return list(tensor_or_list), True
return [tensor_or_list], False | python | {
"resource": ""
} |
q266829 | _get_tol | test | def _get_tol(tol, dtype, validate_args):
"""Gets a Tensor of type `dtype`, 0 if `tol` is None, validation optional."""
if tol is None:
return tf.convert_to_tensor(value=0, dtype=dtype)
tol = tf.convert_to_tensor(value=tol, dtype=dtype)
if validate_args:
tol = distribution_util.with_dependencies([
assert_util.assert_non_negative(
tol, message="Argument 'tol' must be non-negative")
], tol)
return tol | python | {
"resource": ""
} |
q266830 | soft_threshold | test | def soft_threshold(x, threshold, name=None):
"""Soft Thresholding operator.
This operator is defined by the equations
```none
{ x[i] - gamma, x[i] > gamma
SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma
{ x[i] + gamma, x[i] < -gamma
```
In the context of proximal gradient methods, we have
```none
SoftThreshold(x, gamma) = prox_{gamma L1}(x)
```
where `prox` is the proximity operator. Thus the soft thresholding operator
is used in proximal gradient descent for optimizing a smooth function with
(non-smooth) L1 regularization, as outlined below.
The proximity operator is defined as:
```none
prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z },
```
where `r` is a (weakly) convex function, not necessarily differentiable.
Because the L2 norm is strictly convex, the above argmin is unique.
One important application of the proximity operator is as follows. Let `L` be
a convex and differentiable function with Lipschitz-continuous gradient. Let
`R` be a convex lower semicontinuous function which is possibly
nondifferentiable. Let `gamma` be an arbitrary positive real. Then
```none
x_star = argmin{ L(x) + R(x) : x }
```
if and only if the fixed-point equation is satisfied:
```none
x_star = prox_{gamma R}(x_star - gamma grad L(x_star))
```
Proximal gradient descent thus typically consists of choosing an initial value
`x^{(0)}` and repeatedly applying the update
```none
x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)}))
```
where `gamma` is allowed to vary from iteration to iteration. Specializing to
the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly
applying the update
```
x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma)
```
(This idea can also be extended to second-order approximations, although the
multivariate case does not have a known closed form like above.)
Args:
x: `float` `Tensor` representing the input to the SoftThreshold function.
threshold: nonnegative scalar, `float` `Tensor` representing the radius of
the interval on which each coordinate of SoftThreshold takes the value
zero. Denoted `gamma` above.
name: Python string indicating the name of the TensorFlow operation.
Default value: `'soft_threshold'`.
Returns:
softthreshold: `float` `Tensor` with the same shape and dtype as `x`,
representing the value of the SoftThreshold function.
#### References
[1]: Yu, Yao-Liang. The Proximity Operator.
https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf
[2]: Wikipedia Contributors. Proximal gradient methods for learning.
_Wikipedia, The Free Encyclopedia_, 2018.
https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning
"""
# https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator
with tf.compat.v1.name_scope(name, 'soft_threshold', [x, threshold]):
x = tf.convert_to_tensor(value=x, name='x')
threshold = tf.convert_to_tensor(
value=threshold, dtype=x.dtype, name='threshold')
return tf.sign(x) * tf.maximum(tf.abs(x) - threshold, 0.) | python | {
"resource": ""
} |
q266831 | clip_by_value_preserve_gradient | test | def clip_by_value_preserve_gradient(t, clip_value_min, clip_value_max,
name=None):
"""Clips values to a specified min and max while leaving gradient unaltered.
Like `tf.clip_by_value`, this function returns a tensor of the same type and
shape as input `t` but with values clamped to be no smaller than to
`clip_value_min` and no larger than `clip_value_max`. Unlike
`tf.clip_by_value`, the gradient is unaffected by this op, i.e.,
```python
tf.gradients(tfp.math.clip_by_value_preserve_gradient(x), x)[0]
# ==> ones_like(x)
```
Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for
correct results.
Args:
t: A `Tensor`.
clip_value_min: A scalar `Tensor`, or a `Tensor` with the same shape
as `t`. The minimum value to clip by.
clip_value_max: A scalar `Tensor`, or a `Tensor` with the same shape
as `t`. The maximum value to clip by.
name: A name for the operation (optional).
Default value: `'clip_by_value_preserve_gradient'`.
Returns:
clipped_t: A clipped `Tensor`.
"""
with tf.compat.v1.name_scope(name, 'clip_by_value_preserve_gradient',
[t, clip_value_min, clip_value_max]):
t = tf.convert_to_tensor(value=t, name='t')
clip_t = tf.clip_by_value(t, clip_value_min, clip_value_max)
return t + tf.stop_gradient(clip_t - t) | python | {
"resource": ""
} |
q266832 | build_input_pipeline | test | def build_input_pipeline(train_images, batch_size):
"""Build an iterator over training batches."""
training_dataset = tf.data.Dataset.from_tensor_slices(train_images)
training_batches = training_dataset.shuffle(
50000, reshuffle_each_iteration=True).repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
images = training_iterator.get_next()
return images | python | {
"resource": ""
} |
q266833 | plot_generated_images | test | def plot_generated_images(images, fname):
"""Save a synthetic image as a PNG file.
Args:
images: samples of synthetic images generated by the generative network.
fname: Python `str`, filename to save the plot to.
"""
fig = plt.figure(figsize=(4, 4))
canvas = backend_agg.FigureCanvasAgg(fig)
for i, image in enumerate(images):
ax = fig.add_subplot(4, 4, i + 1)
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.imshow(image.reshape(IMAGE_SHAPE[:-1]), cmap='Greys_r')
fig.tight_layout()
plt.subplots_adjust(wspace=0.05, hspace=0.05)
canvas.print_figure(fname, format='png') | python | {
"resource": ""
} |
q266834 | SmilesGrammar.convert_to_string | test | def convert_to_string(self, productions):
"""Converts a sequence of productions into a string of terminal symbols.
Args:
productions: Tensor of shape [1, num_productions, num_production_rules].
Slices along the `num_productions` dimension represent one-hot vectors.
Returns:
str that concatenates all terminal symbols from `productions`.
Raises:
ValueError: If the first production rule does not begin with
`self.start_symbol`.
"""
symbols = []
for production in tf.unstack(productions, axis=1):
lhs, rhs = self.production_rules[tf.argmax(input=production, axis=-1)]
if not symbols: # first iteration
if lhs != self.start_symbol:
raise ValueError("`productions` must begin with `self.start_symbol`.")
symbols = rhs
else:
# Greedily unroll the nonterminal symbols based on the first occurrence
# in a linear sequence.
index = symbols.index(lhs)
symbols = symbols[:index] + rhs + symbols[index + 1:]
string = "".join(symbols)
return string | python | {
"resource": ""
} |
q266835 | ProbabilisticGrammar.call | test | def call(self, inputs):
"""Runs the model forward to generate a sequence of productions.
Args:
inputs: Unused.
Returns:
productions: Tensor of shape [1, num_productions, num_production_rules].
Slices along the `num_productions` dimension represent one-hot vectors.
"""
del inputs # unused
latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size),
sample_shape=1,
name="latent_code")
state = self.lstm.zero_state(1, dtype=tf.float32)
t = 0
productions = []
stack = [self.grammar.start_symbol]
while stack:
symbol = stack.pop()
net, state = self.lstm(latent_code, state)
logits = (self.output_layer(net) +
self.grammar.mask(symbol, on_value=0., off_value=-1e9))
production = ed.OneHotCategorical(logits=logits,
name="production_" + str(t))
_, rhs = self.grammar.production_rules[tf.argmax(
input=production, axis=-1)]
for symbol in rhs:
if symbol in self.grammar.nonterminal_symbols:
stack.append(symbol)
productions.append(production)
t += 1
return tf.stack(productions, axis=1) | python | {
"resource": ""
} |
q266836 | ProbabilisticGrammarVariational.call | test | def call(self, inputs):
"""Runs the model forward to return a stochastic encoding.
Args:
inputs: Tensor of shape [1, num_productions, num_production_rules]. It is
a sequence of productions of length `num_productions`. Each production
is a one-hot vector of length `num_production_rules`: it determines
which production rule the production corresponds to.
Returns:
latent_code_posterior: A random variable capturing a sample from the
variational distribution, of shape [1, self.latent_size].
"""
net = self.encoder_net(tf.cast(inputs, tf.float32))
return ed.MultivariateNormalDiag(
loc=net[..., :self.latent_size],
scale_diag=tf.nn.softplus(net[..., self.latent_size:]),
name="latent_code_posterior") | python | {
"resource": ""
} |
q266837 | Zipf._hat_integral | test | def _hat_integral(self, x):
"""Integral of the `hat` function, used for sampling.
We choose a `hat` function, h(x) = x^(-power), which is a continuous
(unnormalized) density touching each positive integer at the (unnormalized)
pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt;
which is needed for sampling purposes.
Arguments:
x: A Tensor of points x at which to evaluate H(x).
Returns:
A Tensor containing evaluation H(x) at x.
"""
x = tf.cast(x, self.power.dtype)
t = self.power - 1.
return tf.exp((-t) * tf.math.log1p(x) - tf.math.log(t)) | python | {
"resource": ""
} |
q266838 | Zipf._hat_integral_inverse | test | def _hat_integral_inverse(self, x):
"""Inverse function of _hat_integral."""
x = tf.cast(x, self.power.dtype)
t = self.power - 1.
return tf.math.expm1(-(tf.math.log(t) + tf.math.log(x)) / t) | python | {
"resource": ""
} |
q266839 | matrix_rank | test | def matrix_rank(a, tol=None, validate_args=False, name=None):
"""Compute the matrix rank; the number of non-zero SVD singular values.
Arguments:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as "zero".
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: "matrix_rank".
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
"""
with tf.compat.v1.name_scope(name, 'matrix_rank', [a, tol]):
a = tf.convert_to_tensor(value=a, dtype_hint=tf.float32, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with tf.control_dependencies(assertions):
a = tf.identity(a)
s = tf.linalg.svd(a, compute_uv=False)
if tol is None:
if a.shape[-2:].is_fully_defined():
m = np.max(a.shape[-2:].as_list())
else:
m = tf.reduce_max(input_tensor=tf.shape(input=a)[-2:])
eps = np.finfo(a.dtype.as_numpy_dtype).eps
tol = (eps * tf.cast(m, a.dtype) *
tf.reduce_max(input_tensor=s, axis=-1, keepdims=True))
return tf.reduce_sum(input_tensor=tf.cast(s > tol, tf.int32), axis=-1) | python | {
"resource": ""
} |
q266840 | pinv | test | def pinv(a, rcond=None, validate_args=False, name=None):
"""Compute the Moore-Penrose pseudo-inverse of a matrix.
Calculate the [generalized inverse of a matrix](
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
singular-value decomposition (SVD) and including all large singular values.
The pseudo-inverse of a matrix `A`, is defined as: "the matrix that 'solves'
[the least-squares problem] `A @ x = b`," i.e., if `x_hat` is a solution, then
`A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
`U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
`A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]
This function is analogous to [`numpy.linalg.pinv`](
https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
default `rcond` is `1e-15`. Here the default is
`10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
rcond: `Tensor` of small singular value cutoffs. Singular values smaller
(in modulus) than `rcond` * largest_singular_value (again, in modulus) are
set to zero. Must broadcast against `tf.shape(a)[:-2]`.
Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: "pinv".
Returns:
a_pinv: The pseudo-inverse of input `a`. Has same shape as `a` except
rightmost two dimensions are transposed.
Raises:
TypeError: if input `a` does not have `float`-like `dtype`.
ValueError: if input `a` has fewer than 2 dimensions.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
a = tf.constant([[1., 0.4, 0.5],
[0.4, 0.2, 0.25],
[0.5, 0.25, 0.35]])
tf.matmul(tfp.math.pinv(a), a)
# ==> array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
a = tf.constant([[1., 0.4, 0.5, 1.],
[0.4, 0.2, 0.25, 2.],
[0.5, 0.25, 0.35, 3.]])
tf.matmul(tfp.math.pinv(a), a)
# ==> array([[ 0.76, 0.37, 0.21, -0.02],
[ 0.37, 0.43, -0.33, 0.02],
[ 0.21, -0.33, 0.81, 0.01],
[-0.02, 0.02, 0.01, 1. ]], dtype=float32)
```
#### References
[1]: G. Strang. "Linear Algebra and Its Applications, 2nd Ed." Academic Press,
Inc., 1980, pp. 139-142.
"""
with tf.compat.v1.name_scope(name, 'pinv', [a, rcond]):
a = tf.convert_to_tensor(value=a, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with tf.control_dependencies(assertions):
a = tf.identity(a)
dtype = a.dtype.as_numpy_dtype
if rcond is None:
def get_dim_size(dim):
if tf.compat.dimension_value(a.shape[dim]) is not None:
return tf.compat.dimension_value(a.shape[dim])
return tf.shape(input=a)[dim]
num_rows = get_dim_size(-2)
num_cols = get_dim_size(-1)
if isinstance(num_rows, int) and isinstance(num_cols, int):
max_rows_cols = float(max(num_rows, num_cols))
else:
max_rows_cols = tf.cast(tf.maximum(num_rows, num_cols), dtype)
rcond = 10. * max_rows_cols * np.finfo(dtype).eps
rcond = tf.convert_to_tensor(value=rcond, dtype=dtype, name='rcond')
# Calculate pseudo inverse via SVD.
# Note: if a is symmetric then u == v. (We might observe additional
# performance by explicitly setting `v = u` in such cases.)
[
singular_values, # Sigma
left_singular_vectors, # U
right_singular_vectors, # V
] = tf.linalg.svd(a, full_matrices=False, compute_uv=True)
# Saturate small singular values to inf. This has the effect of make
# `1. / s = 0.` while not resulting in `NaN` gradients.
cutoff = rcond * tf.reduce_max(input_tensor=singular_values, axis=-1)
singular_values = tf.where(
singular_values > cutoff[..., tf.newaxis], singular_values,
tf.fill(tf.shape(input=singular_values), np.array(np.inf, dtype)))
# Although `a == tf.matmul(u, s * v, transpose_b=True)` we swap
# `u` and `v` here so that `tf.matmul(pinv(A), A) = tf.eye()`, i.e.,
# a matrix inverse has "transposed" semantics.
a_pinv = tf.matmul(
right_singular_vectors / singular_values[..., tf.newaxis, :],
left_singular_vectors,
adjoint_b=True)
if a.shape.ndims is not None:
a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))
return a_pinv | python | {
"resource": ""
} |
q266841 | lu_solve | test | def lu_solve(lower_upper, perm, rhs,
validate_args=False,
name=None):
"""Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use:
`lu_solve(..., rhs[..., tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., "lu_solve").
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with tf.compat.v1.name_scope(name, 'lu_solve', [lower_upper, perm, rhs]):
lower_upper = tf.convert_to_tensor(
value=lower_upper, dtype_hint=tf.float32, name='lower_upper')
perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm')
rhs = tf.convert_to_tensor(
value=rhs, dtype_hint=lower_upper.dtype, name='rhs')
assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
if assertions:
with tf.control_dependencies(assertions):
lower_upper = tf.identity(lower_upper)
perm = tf.identity(perm)
rhs = tf.identity(rhs)
if rhs.shape.ndims == 2 and perm.shape.ndims == 1:
# Both rhs and perm have scalar batch_shape.
permuted_rhs = tf.gather(rhs, perm, axis=-2)
else:
# Either rhs or perm have non-scalar batch_shape or we can't determine
# this information statically.
rhs_shape = tf.shape(input=rhs)
broadcast_batch_shape = tf.broadcast_dynamic_shape(
rhs_shape[:-2],
tf.shape(input=perm)[:-1])
d, m = rhs_shape[-2], rhs_shape[-1]
rhs_broadcast_shape = tf.concat([broadcast_batch_shape, [d, m]], axis=0)
# Tile out rhs.
broadcast_rhs = tf.broadcast_to(rhs, rhs_broadcast_shape)
broadcast_rhs = tf.reshape(broadcast_rhs, [-1, d, m])
# Tile out perm and add batch indices.
broadcast_perm = tf.broadcast_to(perm, rhs_broadcast_shape[:-1])
broadcast_perm = tf.reshape(broadcast_perm, [-1, d])
broadcast_batch_size = tf.reduce_prod(input_tensor=broadcast_batch_shape)
broadcast_batch_indices = tf.broadcast_to(
tf.range(broadcast_batch_size)[:, tf.newaxis],
[broadcast_batch_size, d])
broadcast_perm = tf.stack([broadcast_batch_indices, broadcast_perm],
axis=-1)
permuted_rhs = tf.gather_nd(broadcast_rhs, broadcast_perm)
permuted_rhs = tf.reshape(permuted_rhs, rhs_broadcast_shape)
lower = tf.linalg.set_diag(
tf.linalg.band_part(lower_upper, num_lower=-1, num_upper=0),
tf.ones(tf.shape(input=lower_upper)[:-1], dtype=lower_upper.dtype))
return linear_operator_util.matrix_triangular_solve_with_broadcast(
lower_upper, # Only upper is accessed.
linear_operator_util.matrix_triangular_solve_with_broadcast(
lower, permuted_rhs),
lower=False) | python | {
"resource": ""
} |
q266842 | lu_matrix_inverse | test | def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):
"""Computes a matrix inverse given the matrix's LU decomposition.
This op is conceptually identical to,
````python
inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))
tf.assert_near(tf.matrix_inverse(X), inv_X)
# ==> True
```
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., "lu_matrix_inverse").
Returns:
inv_x: The matrix_inv, i.e.,
`tf.matrix_inverse(tfp.math.lu_reconstruct(lu, perm))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
inv_x = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with tf.compat.v1.name_scope(name, 'lu_matrix_inverse', [lower_upper, perm]):
lower_upper = tf.convert_to_tensor(
value=lower_upper, dtype_hint=tf.float32, name='lower_upper')
perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm')
assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with tf.control_dependencies(assertions):
lower_upper = tf.identity(lower_upper)
perm = tf.identity(perm)
shape = tf.shape(input=lower_upper)
return lu_solve(
lower_upper, perm,
rhs=tf.eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype),
validate_args=False) | python | {
"resource": ""
} |
q266843 | _lu_reconstruct_assertions | test | def _lu_reconstruct_assertions(lower_upper, perm, validate_args):
"""Returns list of assertions related to `lu_reconstruct` assumptions."""
assertions = []
message = 'Input `lower_upper` must have at least 2 dimensions.'
if lower_upper.shape.ndims is not None:
if lower_upper.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_rank_at_least(lower_upper, rank=2, message=message))
message = '`rank(lower_upper)` must equal `rank(perm) + 1`'
if lower_upper.shape.ndims is not None and perm.shape.ndims is not None:
if lower_upper.shape.ndims != perm.shape.ndims + 1:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_rank(
lower_upper, rank=tf.rank(perm) + 1, message=message))
message = '`lower_upper` must be square.'
if lower_upper.shape[:-2].is_fully_defined():
if lower_upper.shape[-2] != lower_upper.shape[-1]:
raise ValueError(message)
elif validate_args:
m, n = tf.split(tf.shape(input=lower_upper)[-2:], num_or_size_splits=2)
assertions.append(tf.compat.v1.assert_equal(m, n, message=message))
return assertions | python | {
"resource": ""
} |
q266844 | _lu_solve_assertions | test | def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
"""Returns list of assertions related to `lu_solve` assumptions."""
assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_rank_at_least(rhs, rank=2, message=message))
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if (tf.compat.dimension_value(lower_upper.shape[-1]) is not None and
tf.compat.dimension_value(rhs.shape[-2]) is not None):
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_equal(
tf.shape(input=lower_upper)[-1],
tf.shape(input=rhs)[-2],
message=message))
return assertions | python | {
"resource": ""
} |
q266845 | _sparse_block_diag | test | def _sparse_block_diag(sp_a):
"""Returns a block diagonal rank 2 SparseTensor from a batch of SparseTensors.
Args:
sp_a: A rank 3 `SparseTensor` representing a batch of matrices.
Returns:
sp_block_diag_a: matrix-shaped, `float` `SparseTensor` with the same dtype
as `sparse_or_matrix`, of shape [B * M, B * N] where `sp_a` has shape
[B, M, N]. Each [M, N] batch of `sp_a` is lined up along the diagonal.
"""
# Construct the matrix [[M, N], [1, 0], [0, 1]] which would map the index
# (b, i, j) to (Mb + i, Nb + j). This effectively creates a block-diagonal
# matrix of dense shape [B * M, B * N].
# Note that this transformation doesn't increase the number of non-zero
# entries in the SparseTensor.
sp_a_shape = tf.convert_to_tensor(value=_get_shape(sp_a, tf.int64))
ind_mat = tf.concat([[sp_a_shape[-2:]], tf.eye(2, dtype=tf.int64)], axis=0)
indices = tf.matmul(sp_a.indices, ind_mat)
dense_shape = sp_a_shape[0] * sp_a_shape[1:]
return tf.SparseTensor(
indices=indices, values=sp_a.values, dense_shape=dense_shape) | python | {
"resource": ""
} |
q266846 | _maybe_validate_matrix | test | def _maybe_validate_matrix(a, validate_args):
"""Checks that input is a `float` matrix."""
assertions = []
if not a.dtype.is_floating:
raise TypeError('Input `a` must have `float`-like `dtype` '
'(saw {}).'.format(a.dtype.name))
if a.shape.ndims is not None:
if a.shape.ndims < 2:
raise ValueError('Input `a` must have at least 2 dimensions '
'(saw: {}).'.format(a.shape.ndims))
elif validate_args:
assertions.append(tf.compat.v1.assert_rank_at_least(
a, rank=2, message='Input `a` must have at least 2 dimensions.'))
return assertions | python | {
"resource": ""
} |
q266847 | _grad_neg_log_likelihood_and_fim | test | def _grad_neg_log_likelihood_and_fim(model_matrix, linear_response, response,
model):
"""Computes the neg-log-likelihood gradient and Fisher information for a GLM.
Note that Fisher information is related to the Hessian of the log-likelihood
by the equation
```none
FisherInfo = E[Hessian with respect to model_coefficients of -LogLikelihood(
Y | model_matrix, model_coefficients)]
```
where `LogLikelihood` is the log-likelihood of a generalized linear model
parameterized by `model_matrix` and `model_coefficients`, and the expectation
is taken over Y, distributed according to the same GLM with the same parameter
values.
Args:
model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor`
where each row represents a sample's features. Has shape `[N, n]` where
`N` is the number of data samples and `n` is the number of features per
sample.
linear_response: (Batch of) vector-shaped `Tensor` with the same dtype as
`model_matrix`, equal to `model_matix @ model_coefficients` where
`model_coefficients` are the coefficients of the linear component of the
GLM.
response: (Batch of) vector-shaped `Tensor` with the same dtype as
`model_matrix` where each element represents a sample's observed response
(to the corresponding row of features).
model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link
function and distribution of the GLM, and thus characterizes the negative
log-likelihood. Must have sufficient statistic equal to the response, that
is, `T(y) = y`.
Returns:
grad_neg_log_likelihood: (Batch of) vector-shaped `Tensor` with the same
shape and dtype as a single row of `model_matrix`, representing the
gradient of the negative log likelihood of `response` given linear
response `linear_response`.
fim_middle: (Batch of) vector-shaped `Tensor` with the same shape and dtype
as a single column of `model_matrix`, satisfying the equation
`Fisher information =
Transpose(model_matrix)
@ diag(fim_middle)
@ model_matrix`.
"""
# TODO(b/111926503): Determine whether there are some practical cases where it
# is computationally favorable to compute the full FIM.
mean, variance, grad_mean = model(linear_response)
is_valid = (
tf.math.is_finite(grad_mean) & tf.not_equal(grad_mean, 0.)
& tf.math.is_finite(variance) & (variance > 0.))
def _mask_if_invalid(x, mask):
mask = tf.fill(
tf.shape(input=x), value=np.array(mask, x.dtype.as_numpy_dtype))
return tf.where(is_valid, x, mask)
# TODO(b/111923449): Link to derivation once it's available.
v = (response - mean) * _mask_if_invalid(grad_mean, 1) / _mask_if_invalid(
variance, np.inf)
grad_log_likelihood = sparse_or_dense_matvecmul(
model_matrix, v, adjoint_a=True)
fim_middle = _mask_if_invalid(grad_mean, 0.)**2 / _mask_if_invalid(
variance, np.inf)
return -grad_log_likelihood, fim_middle | python | {
"resource": ""
} |
q266848 | fit_sparse | test | def fit_sparse(model_matrix,
response,
model,
model_coefficients_start,
tolerance,
l1_regularizer,
l2_regularizer=None,
maximum_iterations=None,
maximum_full_sweeps_per_iteration=1,
learning_rate=None,
name=None):
r"""Fits a GLM using coordinate-wise FIM-informed proximal gradient descent.
This function uses a L1- and L2-regularized, second-order quasi-Newton method
to find maximum-likelihood parameters for the given model and observed data.
The second-order approximations use negative Fisher information in place of
the Hessian, that is,
```none
FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood(
Y | model_matrix, current value of model_coefficients)]
```
For large, sparse data sets, `model_matrix` should be supplied as a
`SparseTensor`.
Args:
model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor`
where each row represents a sample's features. Has shape `[N, n]` where
`N` is the number of data samples and `n` is the number of features per
sample.
response: (Batch of) vector-shaped `Tensor` with the same dtype as
`model_matrix` where each element represents a sample's observed response
(to the corresponding row of features).
model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link
function and distribution of the GLM, and thus characterizes the negative
log-likelihood which will be minimized. Must have sufficient statistic
equal to the response, that is, `T(y) = y`.
model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with
the same dtype as `model_matrix`, representing the initial values of the
coefficients for the GLM regression. Has shape `[n]` where `model_matrix`
has shape `[N, n]`.
tolerance: scalar, `float` `Tensor` representing the tolerance for each
optiization step; see the `tolerance` argument of `fit_sparse_one_step`.
l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1
regularization term.
l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2
regularization term.
Default value: `None` (i.e., no L2 regularization).
maximum_iterations: Python integer specifying maximum number of iterations
of the outer loop of the optimizer (i.e., maximum number of calls to
`fit_sparse_one_step`). After this many iterations of the outer loop, the
algorithm will terminate even if the return value `model_coefficients` has
not converged.
Default value: `1`.
maximum_full_sweeps_per_iteration: Python integer specifying the maximum
number of coordinate descent sweeps allowed in each iteration.
Default value: `1`.
learning_rate: scalar, `float` `Tensor` representing a multiplicative factor
used to dampen the proximal gradient descent steps.
Default value: `None` (i.e., factor is conceptually `1`).
name: Python string representing the name of the TensorFlow operation.
The default name is `"fit_sparse"`.
Returns:
model_coefficients: (Batch of) `Tensor` of the same shape and dtype as
`model_coefficients_start`, representing the computed model coefficients
which minimize the regularized negative log-likelihood.
is_converged: scalar, `bool` `Tensor` indicating whether the minimization
procedure converged across all batches within the specified number of
iterations. Here convergence means that an iteration of the inner loop
(`fit_sparse_one_step`) returns `True` for its `is_converged` output
value.
iter: scalar, `int` `Tensor` indicating the actual number of iterations of
the outer loop of the optimizer completed (i.e., number of calls to
`fit_sparse_one_step` before achieving convergence).
#### Example
```python
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def make_dataset(n, d, link, scale=1., dtype=np.float32):
model_coefficients = tfd.Uniform(
low=np.array(-1, dtype), high=np.array(1, dtype)).sample(
d, seed=42)
radius = np.sqrt(2.)
model_coefficients *= radius / tf.linalg.norm(model_coefficients)
mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d))
model_coefficients = tf.where(mask, model_coefficients,
tf.zeros_like(model_coefficients))
model_matrix = tfd.Normal(
loc=np.array(0, dtype), scale=np.array(1, dtype)).sample(
[n, d], seed=43)
scale = tf.convert_to_tensor(scale, dtype)
linear_response = tf.matmul(model_matrix,
model_coefficients[..., tf.newaxis])[..., 0]
if link == 'linear':
response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44)
elif link == 'probit':
response = tf.cast(
tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0,
dtype)
elif link == 'logit':
response = tfd.Bernoulli(logits=linear_response).sample(seed=44)
else:
raise ValueError('unrecognized true link: {}'.format(link))
return model_matrix, response, model_coefficients, mask
with tf.Session() as sess:
x_, y_, model_coefficients_true_, _ = sess.run(make_dataset(
n=int(1e5), d=100, link='probit'))
model = tfp.glm.Bernoulli()
model_coefficients_start = tf.zeros(x_.shape[-1], np.float32)
model_coefficients, is_converged, num_iter = tfp.glm.fit_sparse(
model_matrix=tf.convert_to_tensor(x_),
response=tf.convert_to_tensor(y_),
model=model,
model_coefficients_start=model_coefficients_start,
l1_regularizer=800.,
l2_regularizer=None,
maximum_iterations=10,
maximum_full_sweeps_per_iteration=10,
tolerance=1e-6,
learning_rate=None)
model_coefficients_, is_converged_, num_iter_ = sess.run([
model_coefficients, is_converged, num_iter])
print("is_converged:", is_converged_)
print(" num_iter:", num_iter_)
print("\nLearned / True")
print(np.concatenate(
[[model_coefficients_], [model_coefficients_true_]], axis=0).T)
# ==>
# is_converged: True
# num_iter: 1
#
# Learned / True
# [[ 0. 0. ]
# [ 0. 0. ]
# [ 0. 0. ]
# [ 0.11195257 0.12484948]
# [ 0. 0. ]
# [ 0.05191106 0.06394956]
# [-0.15090358 -0.15325639]
# [-0.18187316 -0.18825999]
# [-0.06140942 -0.07994166]
# [ 0. 0. ]
# [ 0. 0. ]
# [ 0. 0. ]
# [ 0.14474444 0.15810856]
# [ 0. 0. ]
# [-0.25249591 -0.24260855]
# [ 0. 0. ]
# [ 0. 0. ]
# [-0.03888761 -0.06755984]
# [ 0. 0. ]
# [ 0. 0. ]
# [ 0. 0. ]
# [-0.0192222 -0.04169233]
# [ 0. 0. ]
# [ 0. 0. ]
# [ 0.01434913 0.03568212]
# [-0.11336883 -0.12873614]
# [ 0. 0. ]
# [-0.24496339 -0.24048163]
# [ 0. 0. ]
# [ 0. 0. ]
# [ 0.04088281 0.06565224]
# [-0.12784363 -0.13359821]
# [ 0.05618424 0.07396613]
# [ 0. 0. ]
# [ 0. 0. ]
# [ 0. 0. ]
# [ 0. -0.01719233]
# [ 0. 0. ]
# [ 0. 0. ]
# [-0.00076072 -0.03607186]
# [ 0.21801499 0.21146794]
# [-0.02161094 -0.04031265]
# [ 0.0918689 0.10487888]
# [ 0.0106154 0.03233612]
# [-0.07817317 -0.09725142]
# [ 0. 0. ]
# [ 0. 0. ]
# [-0.23725343 -0.24194022]
# [ 0. 0. ]
# [-0.08725718 -0.1048776 ]
# [ 0. 0. ]
# [ 0. 0. ]
# [-0.02114314 -0.04145789]
# [ 0. 0. ]
# [ 0. 0. ]
# [-0.02710908 -0.04590397]
# [ 0.15293184 0.15415154]
# [ 0.2114463 0.2088728 ]
# [-0.10969634 -0.12368613]
# [ 0. -0.01505797]
# [-0.01140458 -0.03234904]
# [ 0.16051085 0.1680062 ]
# [ 0.09816848 0.11094204]
```
#### References
[1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths
for Generalized Linear Models via Coordinate Descent. _Journal of
Statistical Software_, 33(1), 2010.
https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf
[2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for
L1-regularized Logistic Regression. _Journal of Machine Learning
Research_, 13, 2012.
http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
"""
graph_deps = [
model_matrix,
response,
model_coefficients_start,
l1_regularizer,
l2_regularizer,
maximum_iterations,
maximum_full_sweeps_per_iteration,
# TODO(b/111925792): Replace `tolerance` arg with something like
# `convergence_criteria_fn`.
tolerance,
learning_rate,
]
with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps):
# TODO(b/111922388): Include dispersion and offset parameters.
def _grad_neg_log_likelihood_and_fim_fn(x):
predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x)
g, h_middle = _grad_neg_log_likelihood_and_fim(
model_matrix, predicted_linear_response, response, model)
return g, model_matrix, h_middle
return tfp.optimizer.proximal_hessian_sparse_minimize(
_grad_neg_log_likelihood_and_fim_fn,
x_start=model_coefficients_start,
l1_regularizer=l1_regularizer,
l2_regularizer=l2_regularizer,
maximum_iterations=maximum_iterations,
maximum_full_sweeps_per_iteration=maximum_full_sweeps_per_iteration,
learning_rate=learning_rate,
tolerance=tolerance,
name=name) | python | {
"resource": ""
} |
q266849 | _gen_slices | test | def _gen_slices(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE):
"""Generate the slices for building an autoregressive mask."""
# TODO(b/67594795): Better support of dynamic shape.
slices = []
col = 0
d_in = n_in // num_blocks
d_out = n_out // num_blocks
row = d_out if mask_type == MASK_EXCLUSIVE else 0
for _ in range(num_blocks):
row_slice = slice(row, None)
col_slice = slice(col, col + d_in)
slices.append([row_slice, col_slice])
col += d_in
row += d_out
return slices | python | {
"resource": ""
} |
q266850 | _gen_mask | test | def _gen_mask(num_blocks,
n_in,
n_out,
mask_type=MASK_EXCLUSIVE,
dtype=tf.float32):
"""Generate the mask for building an autoregressive dense layer."""
# TODO(b/67594795): Better support of dynamic shape.
mask = np.zeros([n_out, n_in], dtype=dtype.as_numpy_dtype())
slices = _gen_slices(num_blocks, n_in, n_out, mask_type=mask_type)
for [row_slice, col_slice] in slices:
mask[row_slice, col_slice] = 1
return mask | python | {
"resource": ""
} |
q266851 | masked_dense | test | def masked_dense(inputs,
units,
num_blocks=None,
exclusive=False,
kernel_initializer=None,
reuse=None,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
"""A autoregressively masked dense layer. Analogous to `tf.layers.dense`.
See [Germain et al. (2015)][1] for detailed explanation.
Arguments:
inputs: Tensor input.
units: Python `int` scalar representing the dimensionality of the output
space.
num_blocks: Python `int` scalar representing the number of blocks for the
MADE masks.
exclusive: Python `bool` scalar representing whether to zero the diagonal of
the mask, used for the first layer of a MADE.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the
`tf.glorot_random_initializer`.
reuse: Python `bool` scalar representing whether to reuse the weights of a
previous layer by the same name.
name: Python `str` used to describe ops managed by this function.
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
Output tensor.
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
# TODO(b/67594795): Better support of dynamic shape.
input_depth = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(inputs.shape, 1)[-1])
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
mask = _gen_mask(num_blocks, input_depth, units,
MASK_EXCLUSIVE if exclusive else MASK_INCLUSIVE).T
if kernel_initializer is None:
kernel_initializer = tf.compat.v1.glorot_normal_initializer()
def masked_initializer(shape, dtype=None, partition_info=None):
return mask * kernel_initializer(shape, dtype, partition_info)
with tf.compat.v2.name_scope(name or "masked_dense"):
layer = tf.compat.v1.layers.Dense(
units,
kernel_initializer=masked_initializer,
kernel_constraint=lambda x: mask * x,
name=name,
dtype=dtype_util.base_dtype(inputs.dtype),
_scope=name,
_reuse=reuse,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
return layer.apply(inputs) | python | {
"resource": ""
} |
q266852 | _create_input_order | test | def _create_input_order(input_size, input_order="left-to-right"):
"""Returns a degree vectors for the input."""
if isinstance(input_order, six.string_types):
if input_order == "left-to-right":
return np.arange(start=1, stop=input_size + 1)
elif input_order == "right-to-left":
return np.arange(start=input_size, stop=0, step=-1)
elif input_order == "random":
ret = np.arange(start=1, stop=input_size + 1)
np.random.shuffle(ret)
return ret
elif np.all(np.sort(input_order) == np.arange(1, input_size + 1)):
return np.array(input_order)
raise ValueError("Invalid input order: '{}'.".format(input_order)) | python | {
"resource": ""
} |
q266853 | _create_degrees | test | def _create_degrees(input_size,
hidden_units=None,
input_order="left-to-right",
hidden_degrees="equal"):
"""Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_size: Number of inputs.
hidden_units: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_degrees: Method for assigning degrees to the hidden units:
'equal', 'random'. If 'equal', hidden units in each layer are allocated
equally (up to a remainder term) to each degree. Default: 'equal'.
Raises:
ValueError: invalid input order.
ValueError: invalid hidden degrees.
"""
input_order = _create_input_order(input_size, input_order)
degrees = [input_order]
if hidden_units is None:
hidden_units = []
for units in hidden_units:
if isinstance(hidden_degrees, six.string_types):
if hidden_degrees == "random":
# samples from: [low, high)
degrees.append(
np.random.randint(low=min(np.min(degrees[-1]), input_size - 1),
high=input_size,
size=units))
elif hidden_degrees == "equal":
min_degree = min(np.min(degrees[-1]), input_size - 1)
degrees.append(np.maximum(
min_degree,
# Evenly divide the range `[1, input_size - 1]` in to `units + 1`
# segments, and pick the boundaries between the segments as degrees.
np.ceil(np.arange(1, units + 1)
* (input_size - 1) / float(units + 1)).astype(np.int32)))
else:
raise ValueError('Invalid hidden order: "{}".'.format(hidden_degrees))
return degrees | python | {
"resource": ""
} |
q266854 | _create_masks | test | def _create_masks(degrees):
"""Returns a list of binary mask matrices enforcing autoregressivity."""
return [
# Create input->hidden and hidden->hidden masks.
inp[:, np.newaxis] <= out
for inp, out in zip(degrees[:-1], degrees[1:])
] + [
# Create hidden->output mask.
degrees[-1][:, np.newaxis] < degrees[0]
] | python | {
"resource": ""
} |
q266855 | _make_masked_initializer | test | def _make_masked_initializer(mask, initializer):
"""Returns a masked version of the given initializer."""
initializer = tf.keras.initializers.get(initializer)
def masked_initializer(shape, dtype=None, partition_info=None):
# If no `partition_info` is given, then don't pass it to `initializer`, as
# `initializer` may be a `tf.compat.v2.initializers.Initializer` (which
# don't accept a `partition_info` argument).
if partition_info is None:
x = initializer(shape, dtype)
else:
x = initializer(shape, dtype, partition_info)
return tf.cast(mask, x.dtype) * x
return masked_initializer | python | {
"resource": ""
} |
q266856 | AutoregressiveLayer.build | test | def build(self, input_shape):
"""See tfkl.Layer.build."""
if self._event_shape is None:
# `event_shape` wasn't specied at __init__, so infer from `input_shape`.
self._event_shape = [tf.compat.dimension_value(input_shape[-1])]
self._event_size = self._event_shape[-1]
self._event_ndims = len(self._event_shape)
# Should we throw if input_shape has rank > 2?
if input_shape[-1] != self._event_shape[-1]:
raise ValueError("Invalid final dimension of `input_shape`. "
"Expected `{!r}`, but got `{!r}`".format(
self._event_shape[-1], input_shape[-1]))
# Construct the masks.
self._input_order = _create_input_order(
self._event_size, self._input_order_param)
self._masks = _create_masks(_create_degrees(
input_size=self._event_size,
hidden_units=self._hidden_units,
input_order=self._input_order,
hidden_degrees=self._hidden_degrees))
# In the final layer, we will produce `self._params` outputs for each of the
# `self._event_size` inputs to `AutoregressiveLayer`. But `masks[-1]` has
# shape `[self._hidden_units[-1], self._event_size]`. Thus, we need to
# expand the mask to `[hidden_units[-1], event_size * self._params]` such
# that all units for the same input are masked identically. In particular,
# we tile the mask so the j-th element of `tf.unstack(output, axis=-1)` is a
# tensor of the j-th parameter/unit for each input.
#
# NOTE: Other orderings of the output could be faster -- should benchmark.
self._masks[-1] = np.reshape(
np.tile(self._masks[-1][..., tf.newaxis], [1, 1, self._params]),
[self._masks[-1].shape[0], self._event_size * self._params])
self._network = tf.keras.Sequential([
# Starting this model with an `InputLayer` ensures that Keras will build
# and propagate our `dtype` to each layer we add.
tf.keras.layers.InputLayer((self._event_size,), dtype=self.dtype)
])
# Input-to-hidden, hidden-to-hidden, and hidden-to-output layers:
# [..., self._event_size] -> [..., self._hidden_units[0]].
# [..., self._hidden_units[k-1]] -> [..., self._hidden_units[k]].
# [..., self._hidden_units[-1]] -> [..., event_size * self._params].
layer_output_sizes = self._hidden_units + [self._event_size * self._params]
for k in range(len(self._masks)):
self._network.add(tf.keras.layers.Dense(
layer_output_sizes[k],
kernel_initializer=_make_masked_initializer(
self._masks[k], self._kernel_initializer),
kernel_constraint=_make_masked_constraint(self._masks[k]),
activation=self._activation if k + 1 < len(self._masks) else None,
use_bias=self._use_bias,
**self._kwargs))
# Record that the layer has been built.
super(AutoregressiveLayer, self).build(input_shape) | python | {
"resource": ""
} |
q266857 | AutoregressiveLayer.call | test | def call(self, x):
"""See tfkl.Layer.call."""
with tf.compat.v2.name_scope(self.name or "AutoregressiveLayer_call"):
x = tf.convert_to_tensor(value=x, dtype=self.dtype, name="x")
input_shape = tf.shape(input=x)
# TODO(b/67594795): Better support for dynamic shapes.
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
return tf.reshape(self._network(x),
tf.concat([input_shape, [self._params]], axis=0)) | python | {
"resource": ""
} |
q266858 | draw_sample | test | def draw_sample(num_samples, num_classes, logits, num_trials, dtype, seed):
"""Sample a multinomial.
The batch shape is given by broadcasting num_trials with
remove_last_dimension(logits).
Args:
num_samples: Python int or singleton integer Tensor: number of multinomial
samples to draw.
num_classes: Python int or singleton integer Tensor: number of classes.
logits: Floating Tensor with last dimension k, of (unnormalized) logit
probabilities per class.
num_trials: Tensor of number of categorical trials each multinomial consists
of. num_trials[..., tf.newaxis] must broadcast with logits.
dtype: dtype at which to emit samples.
seed: Random seed.
Returns:
samples: Tensor of given dtype and shape [n] + batch_shape + [k].
"""
with tf.name_scope("multinomial.draw_sample"):
# broadcast the num_trials and logits to same shape
num_trials = tf.ones_like(
logits[..., 0], dtype=num_trials.dtype) * num_trials
logits = tf.ones_like(
num_trials[..., tf.newaxis], dtype=logits.dtype) * logits
# flatten the total_count and logits
# flat_logits has shape [B1B2...Bm, num_classes]
flat_logits = tf.reshape(logits, [-1, num_classes])
flat_num_trials = num_samples * tf.reshape(num_trials, [-1]) # [B1B2...Bm]
# Computes each logits and num_trials situation by map_fn.
# Using just one batch tf.random.categorical call doesn't work because that
# requires num_trials to be the same across all members of the batch of
# logits. This restriction makes sense for tf.random.categorical because
# for it, num_trials is part of the returned shape. However, the
# multinomial sampler does not need that restriction, because it sums out
# exactly that dimension.
# One possibility would be to draw a batch categorical whose sample count is
# max(num_trials) and mask out the excess ones. However, if the elements of
# num_trials vary widely, this can be wasteful of memory.
# TODO(b/123763054, b/112152209): Revisit the possibility of writing this
# with a batch categorical followed by batch unsorted_segment_sum, once both
# of those work and are memory-efficient enough.
def _sample_one_batch_member(args):
logits, num_cat_samples = args[0], args[1] # [K], []
# x has shape [1, num_cat_samples = num_samples * num_trials]
x = tf.random.categorical(
logits[tf.newaxis, ...], num_cat_samples, seed=seed)
x = tf.reshape(x, shape=[num_samples, -1]) # [num_samples, num_trials]
x = tf.one_hot(
x, depth=num_classes) # [num_samples, num_trials, num_classes]
x = tf.reduce_sum(input_tensor=x, axis=-2) # [num_samples, num_classes]
return tf.cast(x, dtype=dtype)
x = tf.map_fn(
_sample_one_batch_member, [flat_logits, flat_num_trials],
dtype=dtype) # [B1B2...Bm, num_samples, num_classes]
# reshape the results to proper shape
x = tf.transpose(a=x, perm=[1, 0, 2])
final_shape = tf.concat([[num_samples],
tf.shape(input=num_trials), [num_classes]],
axis=0)
x = tf.reshape(x, final_shape)
return x | python | {
"resource": ""
} |
q266859 | _zero_dimensional_mvndiag | test | def _zero_dimensional_mvndiag(dtype):
"""Build a zero-dimensional MVNDiag object."""
dummy_mvndiag = tfd.MultivariateNormalDiag(
scale_diag=tf.ones([0], dtype=dtype))
dummy_mvndiag.covariance = lambda: dummy_mvndiag.variance()[..., tf.newaxis]
return dummy_mvndiag | python | {
"resource": ""
} |
q266860 | _observe_timeseries_fn | test | def _observe_timeseries_fn(timeseries):
"""Build an observation_noise_fn that observes a Tensor timeseries."""
def observation_noise_fn(t):
current_slice = timeseries[..., t, :]
return tfd.MultivariateNormalDiag(
loc=current_slice,
scale_diag=tf.zeros_like(current_slice))
return observation_noise_fn | python | {
"resource": ""
} |
q266861 | SparseLinearRegression.params_to_weights | test | def params_to_weights(self,
global_scale_variance,
global_scale_noncentered,
local_scale_variances,
local_scales_noncentered,
weights_noncentered):
"""Build regression weights from model parameters."""
global_scale = (global_scale_noncentered *
tf.sqrt(global_scale_variance) *
self.weights_prior_scale)
local_scales = local_scales_noncentered * tf.sqrt(local_scale_variances)
return weights_noncentered * local_scales * global_scale[..., tf.newaxis] | python | {
"resource": ""
} |
q266862 | _depth | test | def _depth(g):
"""Computes the number of edges on longest path from node to root."""
def _explore(v):
if v.depth < 0:
v.depth = ((1 + max([-1] + [_explore(annotated_graph[u])
for u in v.parents]))
if v.parents else 0)
return v.depth
annotated_graph = {k: _Node(k, v) for k, v in g.items()}
for v in annotated_graph.values():
_explore(v)
return annotated_graph | python | {
"resource": ""
} |
q266863 | _best_order | test | def _best_order(g):
"""Creates tuple of str tuple-str pairs representing resolved & sorted DAG."""
def _explore(u):
"""Recursive function to ascend up through unvisited dependencies."""
if u.depth < 0:
return # Already visited.
if not u.parents:
result.append((u.name, u.parents))
u.depth = -1 # Mark visited.
return
b = (u.name, [])
result.append(b)
u.depth = -1 # Mark visited.
d = 0
for v in sorted((g.get(p) for p in u.parents), key=lambda v: v.depth):
n0 = len(result)
_explore(v)
n1 = len(result)
b[1].extend(['_']*d + [v.name])
d = n1 - n0 - 1
g = _depth(g)
result = []
for u in sorted(g.values(), key=lambda v: v.depth, reverse=True):
_explore(u)
return tuple(reversed(result)) | python | {
"resource": ""
} |
q266864 | _prob_chain_rule_flatten | test | def _prob_chain_rule_flatten(named_makers):
"""Creates lists of callables suitable for JDSeq."""
def _make(dist_fn, args):
if args is None:
return lambda *_: dist_fn
if not args:
return lambda *_: dist_fn()
def _fn(*xs):
kwargs = dict(zip(args, reversed(xs[-len(args):])))
kwargs.pop('_', None)
return dist_fn(**kwargs)
return _fn
named_makers = _convert_to_dict(named_makers)
g = {k: (None if distribution_util.is_distribution_instance(v)
else joint_distribution_sequential._get_required_args(v)) # pylint: disable=protected-access
for k, v in named_makers.items()}
g = _best_order(g)
dist_fn_name, dist_fn_args = zip(*g)
dist_fn_args = tuple(None if a is None else tuple(a) for a in dist_fn_args)
dist_fn_wrapped = tuple(_make(named_makers[name], parents)
for (name, parents) in g)
dist_fn = tuple(named_makers.get(n) for n in dist_fn_name)
return dist_fn, dist_fn_wrapped, dist_fn_args, dist_fn_name | python | {
"resource": ""
} |
q266865 | JointDistributionNamed._build | test | def _build(self, model):
"""Creates `dist_fn`, `dist_fn_wrapped`, `dist_fn_args`, `dist_fn_name`."""
if not _is_dict_like(model):
raise TypeError('`model` must be convertible to `dict` (saw: {}).'.format(
type(model).__name__))
[
self._dist_fn,
self._dist_fn_wrapped,
self._dist_fn_args,
self._dist_fn_name, # JointDistributionSequential doesn't have this.
] = _prob_chain_rule_flatten(model) | python | {
"resource": ""
} |
q266866 | VariationalGaussianProcess.variational_loss | test | def variational_loss(self,
observations,
observation_index_points=None,
kl_weight=1.,
name='variational_loss'):
"""Variational loss for the VGP.
Given `observations` and `observation_index_points`, compute the
negative variational lower bound as specified in [Hensman, 2013][1].
Args:
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below). If
set to `None` uses `index_points` as the origin for observations.
Default value: None.
kl_weight: Amount by which to scale the KL divergence loss between prior
and posterior.
Default value: 1.
name: Python `str` name prefixed to Ops created by this class.
Default value: "GaussianProcess".
Returns:
loss: Scalar tensor representing the negative variational lower bound.
Can be directly used in a `tf.Optimizer`.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Hensman, J., Lawrence, N. "Gaussian Processes for Big Data", 2013
https://arxiv.org/abs/1309.6835
"""
with tf.name_scope(name or 'variational_gp_loss'):
if observation_index_points is None:
observation_index_points = self._index_points
observation_index_points = tf.convert_to_tensor(
value=observation_index_points, dtype=self._dtype,
name='observation_index_points')
observations = tf.convert_to_tensor(
value=observations, dtype=self._dtype, name='observations')
kl_weight = tf.convert_to_tensor(
value=kl_weight, dtype=self._dtype,
name='kl_weight')
# The variational loss is a negative ELBO. The ELBO can be broken down
# into three terms:
# 1. a likelihood term
# 2. a trace term arising from the covariance of the posterior predictive
kzx = self.kernel.matrix(self._inducing_index_points,
observation_index_points)
kzx_linop = tf.linalg.LinearOperatorFullMatrix(kzx)
loc = (self._mean_fn(observation_index_points) +
kzx_linop.matvec(self._kzz_inv_varloc, adjoint=True))
likelihood = independent.Independent(
normal.Normal(
loc=loc,
scale=tf.sqrt(self._observation_noise_variance + self._jitter),
name='NormalLikelihood'),
reinterpreted_batch_ndims=1)
obs_ll = likelihood.log_prob(observations)
chol_kzz_linop = tf.linalg.LinearOperatorLowerTriangular(self._chol_kzz)
chol_kzz_inv_kzx = chol_kzz_linop.solve(kzx)
kzz_inv_kzx = chol_kzz_linop.solve(chol_kzz_inv_kzx, adjoint=True)
kxx_diag = tf.linalg.diag_part(
self.kernel.matrix(
observation_index_points, observation_index_points))
ktilde_trace_term = (
tf.reduce_sum(input_tensor=kxx_diag, axis=-1) -
tf.reduce_sum(input_tensor=chol_kzz_inv_kzx ** 2, axis=[-2, -1]))
# Tr(SB)
# where S = A A.T, A = variational_inducing_observations_scale
# and B = Kzz^-1 Kzx Kzx.T Kzz^-1
#
# Now Tr(SB) = Tr(A A.T Kzz^-1 Kzx Kzx.T Kzz^-1)
# = Tr(A.T Kzz^-1 Kzx Kzx.T Kzz^-1 A)
# = sum_ij (A.T Kzz^-1 Kzx)_{ij}^2
other_trace_term = tf.reduce_sum(
input_tensor=(
self._variational_inducing_observations_posterior.scale.matmul(
kzz_inv_kzx) ** 2),
axis=[-2, -1])
trace_term = (.5 * (ktilde_trace_term + other_trace_term) /
self._observation_noise_variance)
inducing_prior = gaussian_process.GaussianProcess(
kernel=self._kernel,
mean_fn=self._mean_fn,
index_points=self._inducing_index_points,
observation_noise_variance=self._observation_noise_variance)
kl_term = kl_weight * kullback_leibler.kl_divergence(
self._variational_inducing_observations_posterior,
inducing_prior)
lower_bound = (obs_ll - trace_term - kl_term)
return -tf.reduce_mean(input_tensor=lower_bound) | python | {
"resource": ""
} |
q266867 | VariationalGaussianProcess.optimal_variational_posterior | test | def optimal_variational_posterior(
kernel,
inducing_index_points,
observation_index_points,
observations,
observation_noise_variance,
mean_fn=None,
jitter=1e-6,
name=None):
"""Model selection for optimal variational hyperparameters.
Given the full training set (parameterized by `observations` and
`observation_index_points`), compute the optimal variational
location and scale for the VGP. This is based of the method suggested
in [Titsias, 2009][1].
Args:
kernel: `PositiveSemidefiniteKernel`-like instance representing the
GP's covariance function.
inducing_index_points: `float` `Tensor` of locations of inducing points in
the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just
like `observation_index_points`. The batch shape components needn't be
identical to those of `observation_index_points`, but must be broadcast
compatible with them.
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below).
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_noise_variance: `float` `Tensor` representing the variance
of the noise in the Normal likelihood distribution of the model. May be
batched, in which case the batch shape must be broadcastable with the
shapes of all other batched parameters (`kernel.batch_shape`,
`index_points`, etc.).
Default value: `0.`
mean_fn: Python `callable` that acts on index points to produce a (batch
of) vector(s) of mean values at those index points. Takes a `Tensor` of
shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is
(broadcastable with) `[b1, ..., bB]`. Default value: `None` implies
constant zero function.
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Default value: `1e-6`.
name: Python `str` name prefixed to Ops created by this class.
Default value: "optimal_variational_posterior".
Returns:
loc, scale: Tuple representing the variational location and scale.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Titsias, M. "Variational Model Selection for Sparse Gaussian Process
Regression", 2009.
http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf
"""
with tf.name_scope(name or 'optimal_variational_posterior'):
dtype = dtype_util.common_dtype(
[inducing_index_points,
observation_index_points,
observations,
observation_noise_variance,
jitter], tf.float32)
inducing_index_points = tf.convert_to_tensor(
value=inducing_index_points,
dtype=dtype, name='inducing_index_points')
observation_index_points = tf.convert_to_tensor(
value=observation_index_points, dtype=dtype,
name='observation_index_points')
observations = tf.convert_to_tensor(
value=observations, dtype=dtype, name='observations')
observation_noise_variance = tf.convert_to_tensor(
value=observation_noise_variance,
dtype=dtype,
name='observation_noise_variance')
jitter = tf.convert_to_tensor(
value=jitter, dtype=dtype, name='jitter')
# Default to a constant zero function.
if mean_fn is None:
mean_fn = lambda x: tf.zeros([1], dtype=dtype)
else:
if not callable(mean_fn):
raise ValueError('`mean_fn` must be a Python callable')
# z are the inducing points and x are the observation index points.
kzz = kernel.matrix(inducing_index_points, inducing_index_points)
kzx = kernel.matrix(inducing_index_points, observation_index_points)
noise_var_inv = tf.math.reciprocal(observation_noise_variance)
sigma_inv = _add_diagonal_shift(
kzz + noise_var_inv * tf.matmul(kzx, kzx, adjoint_b=True),
jitter)
chol_sigma_inv = tf.linalg.cholesky(sigma_inv)
kzx_lin_op = tf.linalg.LinearOperatorFullMatrix(kzx)
kzx_obs = kzx_lin_op.matvec(
observations - mean_fn(observation_index_points))
kzz_lin_op = tf.linalg.LinearOperatorFullMatrix(kzz)
loc = (mean_fn(inducing_index_points) +
noise_var_inv * kzz_lin_op.matvec(
_solve_cholesky_factored_system_vec(chol_sigma_inv, kzx_obs)))
chol_sigma_inv_lin_op = tf.linalg.LinearOperatorLowerTriangular(
chol_sigma_inv)
scale = chol_sigma_inv_lin_op.solve(kzz)
return loc, scale | python | {
"resource": ""
} |
q266868 | build_is_last_day_of_season | test | def build_is_last_day_of_season(num_steps_per_season):
"""Build utility method to compute whether the season is changing."""
num_steps_per_cycle = np.sum(num_steps_per_season)
changepoints = np.cumsum(np.ravel(num_steps_per_season)) - 1
def is_last_day_of_season(t):
t_ = dist_util.maybe_get_static_value(t)
if t_ is not None: # static case
step_in_cycle = t_ % num_steps_per_cycle
return any(step_in_cycle == changepoints)
else:
step_in_cycle = tf.math.floormod(t, num_steps_per_cycle)
return tf.reduce_any(
input_tensor=tf.equal(step_in_cycle, changepoints))
return is_last_day_of_season | python | {
"resource": ""
} |
q266869 | build_effects_to_residuals_matrix | test | def build_effects_to_residuals_matrix(num_seasons, dtype):
"""Build change-of-basis matrices for constrained seasonal effects.
This method builds the matrix that transforms seasonal effects into
effect residuals (differences from the mean effect), and additionally
projects these residuals onto the subspace where the mean effect is zero.
See `ConstrainedSeasonalStateSpaceModel` for mathematical details.
Args:
num_seasons: scalar `int` number of seasons.
dtype: TensorFlow `dtype` for the returned values.
Returns:
effects_to_residuals: `Tensor` of shape
`[num_seasons-1, num_seasons]`, such that `differences_from_mean_effect =
matmul(effects_to_residuals, seasonal_effects)`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`effects_to_residuals = P * R`.
residuals_to_effects: the (pseudo)-inverse of the above; a
`Tensor` of shape `[num_seasons, num_seasons-1]`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`residuals_to_effects = R^{-1} * P'`.
"""
# Build the matrix that converts effects `e_i` into differences from the mean
# effect `(e_i - sum(e_i)) / num_seasons`, with the mean effect in the last
# row so that the transformation is invertible.
effects_to_residuals_fullrank = np.eye(num_seasons) - 1./num_seasons
effects_to_residuals_fullrank[-1, :] = 1./num_seasons # compute mean effect
residuals_to_effects_fullrank = np.linalg.inv(effects_to_residuals_fullrank)
# Drop the final dimension, effectively setting the mean effect to zero.
effects_to_residuals = effects_to_residuals_fullrank[:-1, :]
residuals_to_effects = residuals_to_effects_fullrank[:, :-1]
# Return Tensor values of the specified dtype.
effects_to_residuals = tf.cast(
effects_to_residuals, dtype=dtype, name='effects_to_residuals')
residuals_to_effects = tf.cast(
residuals_to_effects, dtype=dtype, name='residuals_to_effects')
return effects_to_residuals, residuals_to_effects | python | {
"resource": ""
} |
q266870 | build_seasonal_transition_matrix | test | def build_seasonal_transition_matrix(
num_seasons, is_last_day_of_season, dtype,
basis_change_matrix=None, basis_change_matrix_inv=None):
"""Build a function computing transitions for a seasonal effect model."""
with tf.compat.v1.name_scope('build_seasonal_transition_matrix'):
# If the season is changing, the transition matrix permutes the latent
# state to shift all seasons up by a dimension, and sends the current
# season's effect to the bottom.
seasonal_permutation = np.concatenate(
[np.arange(1, num_seasons), [0]], axis=0)
seasonal_permutation_matrix = tf.constant(
np.eye(num_seasons)[seasonal_permutation], dtype=dtype)
# Optionally transform the transition matrix into a reparameterized space,
# enforcing the zero-sum constraint for ConstrainedSeasonalStateSpaceModel.
if basis_change_matrix is not None:
seasonal_permutation_matrix = tf.matmul(
basis_change_matrix,
tf.matmul(seasonal_permutation_matrix, basis_change_matrix_inv))
identity_matrix = tf.eye(
tf.shape(input=seasonal_permutation_matrix)[-1], dtype=dtype)
def seasonal_transition_matrix(t):
return tf.linalg.LinearOperatorFullMatrix(
matrix=dist_util.pick_scalar_condition(
is_last_day_of_season(t),
seasonal_permutation_matrix,
identity_matrix))
return seasonal_transition_matrix | python | {
"resource": ""
} |
q266871 | build_seasonal_transition_noise | test | def build_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season):
"""Build the transition noise model for a SeasonalStateSpaceModel."""
# If the current season has just ended, increase the variance of its effect
# following drift_scale. (the just-ended seasonal effect will always be the
# bottom element of the vector). Otherwise, do nothing.
drift_scale_diag = tf.stack(
[tf.zeros_like(drift_scale)] * (num_seasons - 1) + [drift_scale],
axis=-1)
def seasonal_transition_noise(t):
noise_scale_diag = dist_util.pick_scalar_condition(
is_last_day_of_season(t),
drift_scale_diag,
tf.zeros_like(drift_scale_diag))
return tfd.MultivariateNormalDiag(
loc=tf.zeros(num_seasons, dtype=drift_scale.dtype),
scale_diag=noise_scale_diag)
return seasonal_transition_noise | python | {
"resource": ""
} |
q266872 | build_constrained_seasonal_transition_noise | test | def build_constrained_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season):
"""Build transition noise distribution for a ConstrainedSeasonalSSM."""
# Conceptually, this method takes the noise covariance on effects L @ L'
# computed by `build_seasonal_transition_noise`, with scale factor
# L = [ 0, 0, ..., 0
# ...
# 0, 0, ..., drift_scale],
# and transforms it to act on the constrained-residual representation.
#
# The resulting noise covariance M @ M' is equivalent to
# M @ M' = effects_to_residuals @ LL' @ residuals_to_effects
# where `@` is matrix multiplication. However because this matrix is
# rank-deficient, we can't take its Cholesky decomposition directly, so we'll
# construct its lower-triangular scale factor `M` by hand instead.
#
# Concretely, let `M = P @ R @ L` be the scale factor in the
# transformed space, with matrices `R`, `P` applying the reparameterization
# and zero-mean constraint respectively as defined in the
# "Mathematical Details" section of `ConstrainedSeasonalStateSpaceModel`. It's
# easy to see (*) that the implied covariance
# `M @ M' = P @ R @ L @ L' @ R' @ P'` is just the constant matrix
# `M @ M' = [ 1, 1, ..., 1, 0
# 1, 1, ..., 1, 0
# ...
# 1, 1, ..., 1, 0
# 0, 0, ..., 0, 0] * (drift_scale / num_seasons)**2`
# with zeros in the final row and column. So we can directly construct
# the lower-triangular factor
# `Q = [ 1, 0, ... 0
# 1, 0, ..., 0
# ...
# 1, 0, ..., 0
# 0, 0, ..., 0 ] * drift_scale/num_seasons`
# such that Q @ Q' = M @ M'. In practice, we don't reify the final row and
# column full of zeroes, i.e., we construct
# `Q[:num_seasons-1, :num_seasons-1]` as the scale-TriL covariance factor.
#
# (*) Argument: `L` is zero everywhere but the last column, so `R @ L` will be
# too. Since the last column of `R` is the constant `-1/num_seasons`, `R @ L`
# is simply the matrix with constant `-drift_scale/num_seasons` in the final
# column (except the final row, which is negated) and zero in all other
# columns, and `M = P @ R @ L` additionally zeroes out the final row. Then
# M @ M' is just the outer product of that final column with itself (since all
# other columns are zero), which gives the matrix shown above.
drift_scale_tril_nonzeros = tf.concat([
tf.ones([num_seasons - 1, 1], dtype=drift_scale.dtype),
tf.zeros([num_seasons - 1, num_seasons - 2], dtype=drift_scale.dtype)],
axis=-1)
drift_scale_tril = (drift_scale_tril_nonzeros *
drift_scale[..., tf.newaxis, tf.newaxis] / num_seasons)
# Inject transition noise iff it is the last day of the season.
def seasonal_transition_noise(t):
noise_scale_tril = dist_util.pick_scalar_condition(
is_last_day_of_season(t),
drift_scale_tril,
tf.zeros_like(drift_scale_tril))
return tfd.MultivariateNormalTriL(
loc=tf.zeros(num_seasons-1, dtype=drift_scale.dtype),
scale_tril=noise_scale_tril)
return seasonal_transition_noise | python | {
"resource": ""
} |
q266873 | _is_empty_observation_data | test | def _is_empty_observation_data(
feature_ndims, observation_index_points, observations):
"""Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty.
"""
# If both input locations and observations are `None`, we consider this
# "empty" observation data.
if observation_index_points is None and observations is None:
return True
num_obs = tf.compat.dimension_value(
observation_index_points.shape[-(feature_ndims + 1)])
if num_obs is not None and num_obs == 0:
return True
return False | python | {
"resource": ""
} |
q266874 | _validate_observation_data | test | def _validate_observation_data(
kernel, observation_index_points, observations):
"""Ensure that observation data and locations have consistent shapes.
This basically means that the batch shapes are broadcastable. We can only
ensure this when those shapes are fully statically defined.
Args:
kernel: The GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Raises:
ValueError: if the observations' batch shapes are not broadcastable.
"""
# Check that observation index points and observation counts broadcast.
ndims = kernel.feature_ndims
if (tensorshape_util.is_fully_defined(
observation_index_points.shape[:-ndims]) and
tensorshape_util.is_fully_defined(observations.shape)):
index_point_count = observation_index_points.shape[:-ndims]
observation_count = observations.shape
try:
tf.broadcast_static_shape(index_point_count, observation_count)
except ValueError:
# Re-raise with our own more contextual error message.
raise ValueError(
'Observation index point and observation counts are not '
'broadcastable: {} and {}, respectively.'.format(
index_point_count, observation_count)) | python | {
"resource": ""
} |
q266875 | SequentialSchedule.add | test | def add(self, scheduler, max_iteration, bigdl_type="float"):
"""
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
"""
return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration) | python | {
"resource": ""
} |
q266876 | BaseOptimizer.set_checkpoint | test | def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite) | python | {
"resource": ""
} |
q266877 | BaseOptimizer.set_gradclip_const | test | def set_gradclip_const(self, min_value, max_value):
"""
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
"""
callBigDlFunc(self.bigdl_type, "setConstantClip", self.value, min_value, max_value) | python | {
"resource": ""
} |
q266878 | BaseOptimizer.optimize | test | def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(self.value.optimize)
from bigdl.nn.layer import Layer
return Layer.of(jmodel) | python | {
"resource": ""
} |
q266879 | BaseOptimizer.set_train_summary | test | def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self | python | {
"resource": ""
} |
q266880 | BaseOptimizer.set_val_summary | test | def set_val_summary(self, summary):
"""
Set validation summary. A ValidationSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
"""
callBigDlFunc(self.bigdl_type, "setValSummary", self.value,
summary)
return self | python | {
"resource": ""
} |
q266881 | Optimizer.create | test | def create(model,
training_set,
criterion,
end_trigger=None,
batch_size=32,
optim_method=None,
cores=None,
bigdl_type="float"):
"""
Create an optimizer.
Depend on the input type, the returning optimizer can be a local optimizer \
or a distributed optimizer.
:param model: the neural net model
:param training_set: (features, label) for local mode. RDD[Sample] for distributed mode.
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization. default value is MapEpoch(1)
:param batch_size: training batch size
:param cores: This is for local optimizer only and use total physical cores as the default value
"""
if not end_trigger:
end_trigger = MaxEpoch(1)
if not optim_method:
optim_method = SGD()
if isinstance(training_set, RDD) or isinstance(training_set, DataSet):
return DistriOptimizer(model=model,
training_rdd=training_set,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
bigdl_type=bigdl_type)
elif isinstance(training_set, tuple) and len(training_set) == 2:
x, y = training_set
return LocalOptimizer(X=x,
Y=y,
model=model,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
cores=cores,
bigdl_type="float")
else:
raise Exception("Not supported training set: %s" % type(training_set)) | python | {
"resource": ""
} |
q266882 | Optimizer.set_traindata | test | def set_traindata(self, training_rdd, batch_size):
"""
Set new training dataset, for optimizer reuse
:param training_rdd: the training dataset
:param batch_size: training batch size
:return:
"""
callBigDlFunc(self.bigdl_type, "setTrainData", self.value,
training_rdd, batch_size) | python | {
"resource": ""
} |
q266883 | TrainSummary.set_summary_trigger | test | def set_summary_trigger(self, name, trigger):
"""
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod.
:param trigger: trigger
"""
return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value,
name, trigger) | python | {
"resource": ""
} |
q266884 | read_data_sets | test | def read_data_sets(train_dir, data_type="train"):
"""
Parse or download mnist data if train_dir is empty.
:param: train_dir: The directory storing the mnist data
:param: data_type: Reading training set or testing set.It can be either "train" or "test"
:return:
```
(ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth] representing each pixel valued from 0 to 255.
labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
```
"""
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
if data_type == "train":
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f)
return train_images, train_labels
else:
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f)
return test_images, test_labels | python | {
"resource": ""
} |
q266885 | get_news20 | test | def get_news20(source_dir="./data/news20/"):
"""
Parse or download news20 if source_dir is empty.
:param source_dir: The directory storing news data.
:return: A list of (tokens, label)
"""
news_dir = download_news20(source_dir)
texts = [] # list of text samples
label_id = 0
for name in sorted(os.listdir(news_dir)):
path = os.path.join(news_dir, name)
label_id += 1
if os.path.isdir(path):
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
content = f.read()
texts.append((content, label_id))
f.close()
print('Found %s texts.' % len(texts))
return texts | python | {
"resource": ""
} |
q266886 | get_glove_w2v | test | def get_glove_w2v(source_dir="./data/news20/", dim=100):
"""
Parse or download the pre-trained glove word2vec if source_dir is empty.
:param source_dir: The directory storing the pre-trained word2vec
:param dim: The dimension of a vector
:return: A dict mapping from word to vector
"""
w2v_dir = download_glove_w2v(source_dir)
w2v_path = os.path.join(w2v_dir, "glove.6B.%sd.txt" % dim)
if sys.version_info < (3,):
w2v_f = open(w2v_path)
else:
w2v_f = open(w2v_path, encoding='latin-1')
pre_w2v = {}
for line in w2v_f.readlines():
items = line.split(" ")
pre_w2v[items[0]] = [float(i) for i in items[1:]]
w2v_f.close()
return pre_w2v | python | {
"resource": ""
} |
q266887 | KerasModel.compile | test | def compile(self, optimizer, loss, metrics=None):
"""
Configures the learning process. Must be called before fit or evaluate.
# Arguments
optimizer: Optimization method to be used. One can alternatively pass in the corresponding
string representation, such as 'sgd'.
loss: Criterion to be used. One can alternatively pass in the corresponding string
representation, such as 'mse'.
metrics: List of validation methods to be used. Default is None. One can alternatively use ['accuracy'].
"""
if isinstance(optimizer, six.string_types):
optimizer = self.__convert_optim_method(optimizer)
if isinstance(loss, six.string_types):
loss = self.__convert_criterion(loss)
if all(isinstance(metric, six.string_types) for metric in metrics):
metrics = self.__convert_metrics(metrics)
callBigDlFunc(self.bigdl_type, "compile",
self.value,
optimizer,
loss,
metrics) | python | {
"resource": ""
} |
q266888 | KerasModel.fit | test | def fit(self, x, y=None, batch_size=32, nb_epoch=10, validation_data=None, distributed=True):
"""
Train a model for a fixed number of epochs on a dataset.
# Arguments
x: Input data. A Numpy array or RDD of Sample or Image DataSet.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet.
batch_size: Number of samples per gradient update.
nb_epoch: Number of iterations to train.
validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays.
Or RDD of Sample. Default is None if no validation is involved.
distributed: Boolean. Whether to train the model in distributed mode or local mode.
Default is True. In local mode, x and y must both be Numpy arrays.
"""
if distributed:
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
training_data = to_sample_rdd(x, y)
if validation_data:
validation_data = to_sample_rdd(*validation_data)
elif (isinstance(x, RDD) or isinstance(x, DataSet)) and not y:
training_data = x
else:
raise TypeError("Unsupported training data type: %s" % type(x))
callBigDlFunc(self.bigdl_type, "fit",
self.value,
training_data,
batch_size,
nb_epoch,
validation_data)
else:
if validation_data:
val_x = [JTensor.from_ndarray(x) for x in to_list(validation_data[0])]
val_y = JTensor.from_ndarray(validation_data[1])
else:
val_x, val_y = None, None
callBigDlFunc(self.bigdl_type, "fit",
self.value,
[JTensor.from_ndarray(x) for x in to_list(x)],
JTensor.from_ndarray(y),
batch_size,
nb_epoch,
val_x,
val_y,
multiprocessing.cpu_count()) | python | {
"resource": ""
} |
q266889 | KerasModel.evaluate | test | def evaluate(self, x, y=None, batch_size=32):
"""
Evaluate a model on a given dataset in distributed mode.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample.
batch_size: Number of samples per gradient update.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
evaluation_data = to_sample_rdd(x, y)
elif isinstance(x, RDD) and not y:
evaluation_data = x
else:
raise TypeError("Unsupported evaluation data type: %s" % type(x))
return callBigDlFunc(self.bigdl_type, "evaluate",
self.value,
evaluation_data,
batch_size) | python | {
"resource": ""
} |
q266890 | KerasModel.predict | test | def predict(self, x, distributed=True):
"""
Use a model to do prediction.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
"""
if is_distributed:
if isinstance(x, np.ndarray):
features = to_sample_rdd(x, np.zeros([x.shape[0]]))
elif isinstance(x, RDD):
features = x
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
return self.predict_distributed(features)
else:
if isinstance(x, np.ndarray):
return self.predict_local(x)
else:
raise TypeError("Unsupported prediction data type: %s" % type(x)) | python | {
"resource": ""
} |
q266891 | get_mnist | test | def get_mnist(sc, data_type="train", location="/tmp/mnist"):
"""
Get mnist dataset and parallelize into RDDs.
Data would be downloaded automatically if it doesn't present at the specific location.
:param sc: SparkContext.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: RDD of (features: ndarray, label: ndarray).
"""
(images, labels) = mnist.read_data_sets(location, data_type)
images = sc.parallelize(images)
labels = sc.parallelize(labels + 1) # Target start from 1 in BigDL
record = images.zip(labels)
return record | python | {
"resource": ""
} |
q266892 | preprocess_mnist | test | def preprocess_mnist(sc, options):
"""
Preprocess mnist dataset.
Normalize and transform into Sample of RDDs.
"""
train_data = get_mnist(sc, "train", options.dataPath)\
.map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
rec_tuple[1]))\
.map(lambda t: Sample.from_ndarray(t[0], t[1]))
test_data = get_mnist(sc, "test", options.dataPath)\
.map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TEST_MEAN, mnist.TEST_STD),
rec_tuple[1]))\
.map(lambda t: Sample.from_ndarray(t[0], t[1]))
return train_data, test_data | python | {
"resource": ""
} |
q266893 | get_end_trigger | test | def get_end_trigger(options):
"""
When to end the optimization based on input option.
"""
if options.endTriggerType.lower() == "epoch":
return MaxEpoch(options.endTriggerNum)
else:
return MaxIteration(options.endTriggerNum) | python | {
"resource": ""
} |
q266894 | validate_optimizer | test | def validate_optimizer(optimizer, test_data, options):
"""
Set validation and checkpoint for distributed optimizer.
"""
optimizer.set_validation(
batch_size=options.batchSize,
val_rdd=test_data,
trigger=EveryEpoch(),
val_method=[Top1Accuracy()]
)
optimizer.set_checkpoint(EveryEpoch(), options.checkpointPath) | python | {
"resource": ""
} |
q266895 | ModelBroadcast.value | test | def value(self):
""" Return the broadcasted value
"""
if not hasattr(self, "_value") and self._path is not None:
self._value = self._load(self._path)
return self._value | python | {
"resource": ""
} |
q266896 | callBigDlFunc | test | def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
gateway = _get_gateway()
error = Exception("Cannot find function: %s" % name)
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
# hasattr(jinvoker, name) always return true here,
# so you need to invoke the method to check if it exist or not
try:
api = getattr(jinvoker, name)
result = callJavaFunc(api, *args)
except Exception as e:
error = e
if "does not exist" not in str(e):
raise e
else:
return result
raise error | python | {
"resource": ""
} |
q266897 | callJavaFunc | test | def callJavaFunc(func, *args):
""" Call Java Function """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
result = func(*args)
return _java2py(gateway, result) | python | {
"resource": ""
} |
q266898 | _to_java_object_rdd | test | def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True) | python | {
"resource": ""
} |
q266899 | _py2java | test | def _py2java(gateway, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(gateway, x) for x in obj],
gateway._gateway_client)
elif isinstance(obj, dict):
result = {}
for (key, value) in obj.items():
result[key] = _py2java(gateway, value)
obj = MapConverter().convert(result, gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.