python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """Constraints: functions that impose constraints on weight values. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object from tensorflow.python.keras.utils.generic_utils import serialize_keras_object from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.constraints.Constraint') class Constraint(object): def __call__(self, w): return w def get_config(self): return {} @keras_export('keras.constraints.MaxNorm', 'keras.constraints.max_norm') class MaxNorm(Constraint): """MaxNorm weight constraint. Constrains the weights incident to each hidden unit to have a norm less than or equal to a desired value. Arguments: m: the maximum norm for the incoming weights. axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format="channels_last"`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`. """ def __init__(self, max_value=2, axis=0): self.max_value = max_value self.axis = axis def __call__(self, w): norms = K.sqrt( math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True)) desired = K.clip(norms, 0, self.max_value) return w * (desired / (K.epsilon() + norms)) def get_config(self): return {'max_value': self.max_value, 'axis': self.axis} @keras_export('keras.constraints.NonNeg', 'keras.constraints.non_neg') class NonNeg(Constraint): """Constrains the weights to be non-negative. """ def __call__(self, w): return w * math_ops.cast(math_ops.greater_equal(w, 0.), K.floatx()) @keras_export('keras.constraints.UnitNorm', 'keras.constraints.unit_norm') class UnitNorm(Constraint): """Constrains the weights incident to each hidden unit to have unit norm. Arguments: axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format="channels_last"`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`. """ def __init__(self, axis=0): self.axis = axis def __call__(self, w): return w / ( K.epsilon() + K.sqrt( math_ops.reduce_sum( math_ops.square(w), axis=self.axis, keepdims=True))) def get_config(self): return {'axis': self.axis} @keras_export('keras.constraints.MinMaxNorm', 'keras.constraints.min_max_norm') class MinMaxNorm(Constraint): """MinMaxNorm weight constraint. Constrains the weights incident to each hidden unit to have the norm between a lower bound and an upper bound. Arguments: min_value: the minimum norm for the incoming weights. max_value: the maximum norm for the incoming weights. rate: rate for enforcing the constraint: weights will be rescaled to yield `(1 - rate) * norm + rate * norm.clip(min_value, max_value)`. Effectively, this means that rate=1.0 stands for strict enforcement of the constraint, while rate<1.0 means that weights will be rescaled at each step to slowly move towards a value inside the desired interval. axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format="channels_last"`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`. """ def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0): self.min_value = min_value self.max_value = max_value self.rate = rate self.axis = axis def __call__(self, w): norms = K.sqrt( math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True)) desired = ( self.rate * K.clip(norms, self.min_value, self.max_value) + (1 - self.rate) * norms) return w * (desired / (K.epsilon() + norms)) def get_config(self): return { 'min_value': self.min_value, 'max_value': self.max_value, 'rate': self.rate, 'axis': self.axis } @keras_export('keras.constraints.RadialConstraint', 'keras.constraints.radial_constraint') class RadialConstraint(Constraint): """Constrains `Conv2D` kernel weights to be the same for each radius. For example, the desired output for the following 4-by-4 kernel:: ``` kernel = [[v_00, v_01, v_02, v_03], [v_10, v_11, v_12, v_13], [v_20, v_21, v_22, v_23], [v_30, v_31, v_32, v_33]] ``` is this:: ``` kernel = [[v_11, v_11, v_11, v_11], [v_11, v_33, v_33, v_11], [v_11, v_33, v_33, v_11], [v_11, v_11, v_11, v_11]] ``` This constraint can be applied to any `Conv2D` layer version, including `Conv2DTranspose` and `SeparableConv2D`, and with either `"channels_last"` or `"channels_first"` data format. The method assumes the weight tensor is of shape `(rows, cols, input_depth, output_depth)`. """ def __call__(self, w): w_shape = w.shape if w_shape.rank is None or w_shape.rank != 4: raise ValueError( 'The weight tensor must be of rank 4, but is of shape: %s' % w_shape) height, width, channels, kernels = w_shape w = K.reshape(w, (height, width, channels * kernels)) # TODO(cpeter): Switch map_fn for a faster tf.vectorized_map once K.switch # is supported. w = K.map_fn( self._kernel_constraint, K.stack(array_ops.unstack(w, axis=-1), axis=0)) return K.reshape(K.stack(array_ops.unstack(w, axis=0), axis=-1), (height, width, channels, kernels)) def _kernel_constraint(self, kernel): """Radially constraints a kernel with shape (height, width, channels).""" padding = K.constant([[1, 1], [1, 1]], dtype='int32') kernel_shape = K.shape(kernel)[0] start = K.cast(kernel_shape / 2, 'int32') kernel_new = K.switch( K.cast(math_ops.floormod(kernel_shape, 2), 'bool'), lambda: kernel[start - 1:start, start - 1:start], lambda: kernel[start - 1:start, start - 1:start] + K.zeros( # pylint: disable=g-long-lambda (2, 2), dtype=kernel.dtype)) index = K.switch( K.cast(math_ops.floormod(kernel_shape, 2), 'bool'), lambda: K.constant(0, dtype='int32'), lambda: K.constant(1, dtype='int32')) while_condition = lambda index, *args: K.less(index, start) def body_fn(i, array): return i + 1, array_ops.pad( array, padding, constant_values=kernel[start + i, start + i]) _, kernel_new = control_flow_ops.while_loop( while_condition, body_fn, [index, kernel_new], shape_invariants=[index.get_shape(), tensor_shape.TensorShape([None, None])]) return kernel_new # Aliases. max_norm = MaxNorm non_neg = NonNeg unit_norm = UnitNorm min_max_norm = MinMaxNorm radial_constraint = RadialConstraint # Legacy aliases. maxnorm = max_norm nonneg = non_neg unitnorm = unit_norm @keras_export('keras.constraints.serialize') def serialize(constraint): return serialize_keras_object(constraint) @keras_export('keras.constraints.deserialize') def deserialize(config, custom_objects=None): return deserialize_keras_object( config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='constraint') @keras_export('keras.constraints.get') def get(identifier): if identifier is None: return None if isinstance(identifier, dict): return deserialize(identifier) elif isinstance(identifier, six.string_types): config = {'class_name': str(identifier), 'config': {}} return deserialize(config) elif callable(identifier): return identifier else: raise ValueError('Could not interpret constraint identifier: ' + str(identifier))
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/constraints.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras loss functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import numpy as np from tensorflow.python import keras from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.utils import losses_utils from tensorflow.python.platform import test try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None ALL_LOSSES = [keras.losses.mean_squared_error, keras.losses.mean_absolute_error, keras.losses.mean_absolute_percentage_error, keras.losses.mean_squared_logarithmic_error, keras.losses.squared_hinge, keras.losses.hinge, keras.losses.categorical_crossentropy, keras.losses.binary_crossentropy, keras.losses.kullback_leibler_divergence, keras.losses.poisson, keras.losses.cosine_similarity, keras.losses.logcosh, keras.losses.categorical_hinge] class _MSEMAELoss(object): """Loss function with internal state, for testing serialization code.""" def __init__(self, mse_fraction): self.mse_fraction = mse_fraction def __call__(self, y_true, y_pred, sample_weight=None): return (self.mse_fraction * keras.losses.mse(y_true, y_pred) + (1 - self.mse_fraction) * keras.losses.mae(y_true, y_pred)) def get_config(self): return {'mse_fraction': self.mse_fraction} class KerasLossesTest(test.TestCase): def test_objective_shapes_3d(self): with self.cached_session(): y_a = keras.backend.variable(np.random.random((5, 6, 7))) y_b = keras.backend.variable(np.random.random((5, 6, 7))) for obj in ALL_LOSSES: objective_output = obj(y_a, y_b) self.assertListEqual(objective_output.shape.as_list(), [5, 6]) def test_objective_shapes_2d(self): with self.cached_session(): y_a = keras.backend.variable(np.random.random((6, 7))) y_b = keras.backend.variable(np.random.random((6, 7))) for obj in ALL_LOSSES: objective_output = obj(y_a, y_b) self.assertListEqual(objective_output.shape.as_list(), [ 6, ]) def test_cce_one_hot(self): with self.cached_session(): y_a = keras.backend.variable(np.random.randint(0, 7, (5, 6))) y_b = keras.backend.variable(np.random.random((5, 6, 7))) objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b) assert keras.backend.eval(objective_output).shape == (5, 6) y_a = keras.backend.variable(np.random.randint(0, 7, (6,))) y_b = keras.backend.variable(np.random.random((6, 7))) objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b) assert keras.backend.eval(objective_output).shape == (6,) @test_util.run_in_graph_and_eager_modes def test_categorical_crossentropy_loss(self): target = keras.backend.variable(np.random.randint(0, 1, (5, 1))) logits = keras.backend.variable(np.random.random((5, 1))) softmax_output = keras.backend.softmax(logits) output_from_logit = keras.losses.categorical_crossentropy( target, logits, from_logits=True) output_from_softmax = keras.losses.categorical_crossentropy( target, softmax_output) np.testing.assert_allclose( keras.backend.eval(output_from_logit), keras.backend.eval(output_from_softmax), atol=1e-5) @test_util.run_in_graph_and_eager_modes def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self): t = keras.backend.placeholder() p = keras.backend.placeholder() o = keras.losses.categorical_crossentropy(t, p) t_val = ops.convert_to_tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) p_val = ops.convert_to_tensor([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) f = keras.backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.105, .116, .062], 1e-3) # from logits p_val = ops.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) o = keras.losses.categorical_crossentropy(t, p, from_logits=True) f = keras.backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.002, 0, .17], 1e-3) @test_util.run_in_graph_and_eager_modes def test_sparse_categorical_crossentropy_loss(self): target = keras.backend.variable(np.random.randint(0, 1, (5, 1))) logits = keras.backend.variable(np.random.random((5, 1))) softmax_output = keras.backend.softmax(logits) output_from_logit = keras.losses.sparse_categorical_crossentropy( target, logits, from_logits=True) output_from_softmax = keras.losses.sparse_categorical_crossentropy( target, softmax_output) np.testing.assert_allclose( keras.backend.eval(output_from_logit), keras.backend.eval(output_from_softmax), atol=1e-5) @test_util.run_in_graph_and_eager_modes def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self): t = keras.backend.placeholder() p = keras.backend.placeholder() o = keras.losses.sparse_categorical_crossentropy(t, p) t_val = ops.convert_to_tensor([0, 1, 2]) p_val = ops.convert_to_tensor([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) f = keras.backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.105, .116, .062], 1e-3) # from logits p_val = ops.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) o = keras.losses.sparse_categorical_crossentropy(t, p, from_logits=True) f = keras.backend.function([t, p], o) result = f([t_val, p_val]) self.assertArrayNear(result, [.002, 0, .17], 1e-3) @test_util.run_in_graph_and_eager_modes def test_binary_crossentropy_loss(self): target = keras.backend.variable(np.random.randint(0, 1, (5, 1))) logits = keras.backend.variable(np.random.random((5, 1))) sigmoid_output = keras.backend.sigmoid(logits) output_from_logit = keras.losses.binary_crossentropy( target, logits, from_logits=True) output_from_sigmoid = keras.losses.binary_crossentropy( target, sigmoid_output) np.testing.assert_allclose( keras.backend.eval(output_from_logit), keras.backend.eval(output_from_sigmoid), atol=1e-5) def test_serialization(self): fn = keras.losses.get('mse') config = keras.losses.serialize(fn) new_fn = keras.losses.deserialize(config) self.assertEqual(fn, new_fn) def test_categorical_hinge(self): y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]])) y_true = keras.backend.variable(np.array([[0, 1, 0], [1, 0, 0]])) expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0 loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred)) self.assertAllClose(expected_loss, np.mean(loss)) def test_serializing_loss_class(self): orig_loss_class = _MSEMAELoss(0.3) with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}): serialized = keras.losses.serialize(orig_loss_class) with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}): deserialized = keras.losses.deserialize(serialized) assert isinstance(deserialized, _MSEMAELoss) assert deserialized.mse_fraction == 0.3 def test_serializing_model_with_loss_class(self): tmpdir = self.get_temp_dir() self.addCleanup(shutil.rmtree, tmpdir) model_filename = os.path.join(tmpdir, 'custom_loss.h5') with self.cached_session(): with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}): loss = _MSEMAELoss(0.3) inputs = keras.layers.Input((2,)) outputs = keras.layers.Dense(1, name='model_output')(inputs) model = keras.models.Model(inputs, outputs) model.compile(optimizer='sgd', loss={'model_output': loss}) model.fit(np.random.rand(256, 2), np.random.rand(256, 1)) if h5py is None: return model.save(model_filename) with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}): loaded_model = keras.models.load_model(model_filename) loaded_model.predict(np.random.rand(128, 2)) def test_loss_wrapper(self): loss_fn = keras.losses.get('mse') mse_obj = keras.losses.LossFunctionWrapper(loss_fn, name=loss_fn.__name__) self.assertEqual(mse_obj.name, 'mean_squared_error') self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.AUTO) y_true = constant_op.constant([[1., 9.], [2., 5.]]) y_pred = constant_op.constant([[4., 8.], [12., 3.]]) sample_weight = constant_op.constant([1.2, 0.5]) loss = mse_obj(y_true, y_pred, sample_weight=sample_weight) # mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2] # mse = [5, 52] # weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26] # reduced_weighted_mse = (6 + 26) / 2 = self.assertAllClose(self.evaluate(loss), 16, 1e-2) def test_invalid_reduction(self): with self.assertRaisesRegexp(ValueError, 'Invalid Reduction Key Foo.'): keras.losses.MeanSquaredError(reduction='Foo') mse_obj = keras.losses.MeanSquaredError() y = constant_op.constant([1]) mse_obj.reduction = 'Bar' with self.assertRaisesRegexp(ValueError, 'Invalid Reduction Key Bar.'): mse_obj(y, y) @test_util.run_all_in_graph_and_eager_modes class MeanSquaredErrorTest(test.TestCase): def test_config(self): mse_obj = keras.losses.MeanSquaredError( reduction=losses_utils.ReductionV2.SUM, name='mse_1') self.assertEqual(mse_obj.name, 'mse_1') self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): mse_obj = keras.losses.MeanSquaredError() y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3)) loss = mse_obj(y_true, y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): mse_obj = keras.losses.MeanSquaredError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mse_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 49.5, 3) def test_scalar_weighted(self): mse_obj = keras.losses.MeanSquaredError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mse_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 113.85, 3) def test_sample_weighted(self): mse_obj = keras.losses.MeanSquaredError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = mse_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3) def test_timestep_weighted(self): mse_obj = keras.losses.MeanSquaredError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=dtypes.float32) sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = mse_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3) def test_zero_weighted(self): mse_obj = keras.losses.MeanSquaredError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mse_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_invalid_sample_weight(self): mse_obj = keras.losses.MeanSquaredError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1)) sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'weights can not be broadcast to values'): mse_obj(y_true, y_pred, sample_weight=sample_weight) def test_no_reduction(self): mse_obj = keras.losses.MeanSquaredError( reduction=losses_utils.ReductionV2.NONE) y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mse_obj(y_true, y_pred, sample_weight=2.3) loss = self.evaluate(loss) self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3) def test_sum_reduction(self): mse_obj = keras.losses.MeanSquaredError( reduction=losses_utils.ReductionV2.SUM) y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mse_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3) @test_util.run_all_in_graph_and_eager_modes class MeanAbsoluteErrorTest(test.TestCase): def test_config(self): mae_obj = keras.losses.MeanAbsoluteError( reduction=losses_utils.ReductionV2.SUM, name='mae_1') self.assertEqual(mae_obj.name, 'mae_1') self.assertEqual(mae_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): mae_obj = keras.losses.MeanAbsoluteError() y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3)) loss = mae_obj(y_true, y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): mae_obj = keras.losses.MeanAbsoluteError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mae_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 5.5, 3) def test_scalar_weighted(self): mae_obj = keras.losses.MeanAbsoluteError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mae_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 12.65, 3) def test_sample_weighted(self): mae_obj = keras.losses.MeanAbsoluteError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = mae_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 81.4 / 6, 3) def test_timestep_weighted(self): mae_obj = keras.losses.MeanAbsoluteError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=dtypes.float32) sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = mae_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 83 / 6, 3) def test_zero_weighted(self): mae_obj = keras.losses.MeanAbsoluteError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mae_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_invalid_sample_weight(self): mae_obj = keras.losses.MeanAbsoluteError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1)) sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'weights can not be broadcast to values'): mae_obj(y_true, y_pred, sample_weight=sample_weight) def test_no_reduction(self): mae_obj = keras.losses.MeanAbsoluteError( reduction=losses_utils.ReductionV2.NONE) y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mae_obj(y_true, y_pred, sample_weight=2.3) loss = self.evaluate(loss) self.assertArrayNear(loss, [10.7333, 14.5666], 1e-3) def test_sum_reduction(self): mae_obj = keras.losses.MeanAbsoluteError( reduction=losses_utils.ReductionV2.SUM) y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mae_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 25.29999, 3) @test_util.run_all_in_graph_and_eager_modes class MeanAbsolutePercentageErrorTest(test.TestCase): def test_config(self): mape_obj = keras.losses.MeanAbsolutePercentageError( reduction=losses_utils.ReductionV2.SUM, name='mape_1') self.assertEqual(mape_obj.name, 'mape_1') self.assertEqual(mape_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): mape_obj = keras.losses.MeanAbsolutePercentageError() y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mape_obj(y_true, y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): mape_obj = keras.losses.MeanAbsolutePercentageError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mape_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 211.8518, 3) def test_scalar_weighted(self): mape_obj = keras.losses.MeanAbsolutePercentageError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mape_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 487.259, 3) def test_sample_weighted(self): mape_obj = keras.losses.MeanAbsolutePercentageError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = mape_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 422.8888, 3) def test_timestep_weighted(self): mape_obj = keras.losses.MeanAbsolutePercentageError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=dtypes.float32) sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = mape_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 694.4445, 3) def test_zero_weighted(self): mape_obj = keras.losses.MeanAbsolutePercentageError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mape_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_no_reduction(self): mape_obj = keras.losses.MeanAbsolutePercentageError( reduction=losses_utils.ReductionV2.NONE) y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = mape_obj(y_true, y_pred, sample_weight=2.3) loss = self.evaluate(loss) self.assertArrayNear(loss, [621.8518, 352.6666], 1e-3) @test_util.run_all_in_graph_and_eager_modes class MeanSquaredLogarithmicErrorTest(test.TestCase): def test_config(self): msle_obj = keras.losses.MeanSquaredLogarithmicError( reduction=losses_utils.ReductionV2.SUM, name='mape_1') self.assertEqual(msle_obj.name, 'mape_1') self.assertEqual(msle_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): msle_obj = keras.losses.MeanSquaredLogarithmicError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = msle_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 1.4370, 3) def test_scalar_weighted(self): msle_obj = keras.losses.MeanSquaredLogarithmicError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = msle_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 3.3051, 3) def test_sample_weighted(self): msle_obj = keras.losses.MeanSquaredLogarithmicError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = msle_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 3.7856, 3) def test_timestep_weighted(self): msle_obj = keras.losses.MeanSquaredLogarithmicError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=dtypes.float32) sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = msle_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 2.6473, 3) def test_zero_weighted(self): msle_obj = keras.losses.MeanSquaredLogarithmicError() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = msle_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) @test_util.run_all_in_graph_and_eager_modes class CosineSimilarityTest(test.TestCase): def l2_norm(self, x, axis): epsilon = 1e-12 square_sum = np.sum(np.square(x), axis=axis, keepdims=True) x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon)) return np.multiply(x, x_inv_norm) def setup(self, axis=1): self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32) self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32) y_true = self.l2_norm(self.np_y_true, axis) y_pred = self.l2_norm(self.np_y_pred, axis) self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,)) self.y_true = constant_op.constant(self.np_y_true) self.y_pred = constant_op.constant(self.np_y_pred) def test_config(self): cosine_obj = keras.losses.CosineSimilarity( axis=2, reduction=losses_utils.ReductionV2.SUM, name='cosine_loss') self.assertEqual(cosine_obj.name, 'cosine_loss') self.assertEqual(cosine_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): self.setup() cosine_obj = keras.losses.CosineSimilarity() loss = cosine_obj(self.y_true, self.y_pred) expected_loss = np.mean(self.expected_loss) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_scalar_weighted(self): self.setup() cosine_obj = keras.losses.CosineSimilarity() sample_weight = 2.3 loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = np.mean(self.expected_loss * sample_weight) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_sample_weighted(self): self.setup() cosine_obj = keras.losses.CosineSimilarity() sample_weight = np.asarray([1.2, 3.4]) loss = cosine_obj( self.y_true, self.y_pred, sample_weight=constant_op.constant(sample_weight)) expected_loss = np.mean(self.expected_loss * sample_weight) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_timestep_weighted(self): self.setup() cosine_obj = keras.losses.CosineSimilarity() np_y_true = self.np_y_true.reshape((2, 3, 1)) np_y_pred = self.np_y_pred.reshape((2, 3, 1)) sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape((2, 3)) y_true = self.l2_norm(np_y_true, 2) y_pred = self.l2_norm(np_y_pred, 2) expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(2,)) y_true = constant_op.constant(np_y_true) y_pred = constant_op.constant(np_y_pred) loss = cosine_obj( y_true, y_pred, sample_weight=constant_op.constant(sample_weight)) expected_loss = np.mean(expected_loss * sample_weight) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_zero_weighted(self): self.setup() cosine_obj = keras.losses.CosineSimilarity() loss = cosine_obj(self.y_true, self.y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0., 3) def test_axis(self): self.setup(axis=1) cosine_obj = keras.losses.CosineSimilarity(axis=1) loss = cosine_obj(self.y_true, self.y_pred) expected_loss = np.mean(self.expected_loss) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) @test_util.run_all_in_graph_and_eager_modes class BinaryCrossentropyTest(test.TestCase): def test_config(self): bce_obj = keras.losses.BinaryCrossentropy( reduction=losses_utils.ReductionV2.SUM, name='bce_1') self.assertEqual(bce_obj.name, 'bce_1') self.assertEqual(bce_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=dtypes.float32) bce_obj = keras.losses.BinaryCrossentropy() loss = bce_obj(y_true, y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) # Test with logits. logits = constant_op.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) bce_obj = keras.losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2]) y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2]) bce_obj = keras.losses.BinaryCrossentropy() loss = bce_obj(y_true, y_pred) # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999 # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON] # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON)) # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON), # -log(Y_MAX + EPSILON), -log(1)] # = [0, 15.33, 0, 0] # Reduced loss = 15.33 / 4 self.assertAlmostEqual(self.evaluate(loss), 3.833, 3) # Test with logits. y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]]) logits = constant_op.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) bce_obj = keras.losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # = [((100 - 100 * 1 + log(1 + exp(-100))) + # (0 + 100 * 0 + log(1 + exp(-100))) + # (100 - 100 * 1 + log(1 + exp(-100))), # ((100 - 100 * 0 + log(1 + exp(-100))) + # (100 - 100 * 1 + log(1 + exp(-100))) + # (0 + 100 * 1 + log(1 + exp(-100))))] # = [(0 + 0 + 0) / 3, 200 / 3] # Reduced loss = (0 + 66.666) / 2 self.assertAlmostEqual(self.evaluate(loss), 33.333, 3) def test_scalar_weighted(self): bce_obj = keras.losses.BinaryCrossentropy() y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2]) y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2]) loss = bce_obj(y_true, y_pred, sample_weight=2.3) # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999 # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON] # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON)) # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON), # -log(Y_MAX + EPSILON), -log(1)] # = [0, 15.33, 0, 0] # Weighted loss = [0, 15.33 * 2.3, 0, 0] # Reduced loss = 15.33 * 2.3 / 4 self.assertAlmostEqual(self.evaluate(loss), 8.817, 3) # Test with logits. y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]]) logits = constant_op.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) bce_obj = keras.losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits, sample_weight=2.3) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Loss = [(0 + 0 + 0) / 3, 200 / 3] # Weighted loss = [0 * 2.3, 66.666 * 2.3] # Reduced loss = (0 + 66.666 * 2.3) / 2 self.assertAlmostEqual(self.evaluate(loss), 76.667, 3) def test_sample_weighted(self): bce_obj = keras.losses.BinaryCrossentropy() y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2]) y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2]) sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = bce_obj(y_true, y_pred, sample_weight=sample_weight) # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999 # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON] # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON)) # = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON), # -log(Y_MAX + EPSILON), -log(1)] # = [0, 15.33, 0, 0] # Reduced loss = 15.33 * 1.2 / 4 self.assertAlmostEqual(self.evaluate(loss), 4.6, 3) # Test with logits. y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]]) logits = constant_op.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) weights = constant_op.constant([4, 3]) bce_obj = keras.losses.BinaryCrossentropy(from_logits=True) loss = bce_obj(y_true, logits, sample_weight=weights) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Loss = [(0 + 0 + 0)/3, 200 / 3] # Weighted loss = [0 * 4, 66.666 * 3] # Reduced loss = (0 + 66.666 * 3) / 2 self.assertAlmostEqual(self.evaluate(loss), 100, 3) def test_no_reduction(self): y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]]) logits = constant_op.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]]) bce_obj = keras.losses.BinaryCrossentropy( from_logits=True, reduction=losses_utils.ReductionV2.NONE) loss = bce_obj(y_true, logits) # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Loss = [(0 + 0 + 0)/3, (200)/3] self.assertAllClose((0., 66.6666), self.evaluate(loss), 3) def test_label_smoothing(self): logits = constant_op.constant([[100.0, -100.0, -100.0]]) y_true = constant_op.constant([[1, 0, 1]]) label_smoothing = 0.1 # Loss: max(x, 0) - x * z + log(1 + exp(-abs(x))) # (where x = logits and z = y_true) # Label smoothing: z' = z * (1 - L) + 0.5L # 1 = 1 - 0.5L # 0 = 0.5L # Applying the above two fns to the given input: # (100 - 100 * (1 - 0.5 L) + 0 + # 0 + 100 * (0.5 L) + 0 + # 0 + 100 * (1 - 0.5 L) + 0) * (1/3) # = (100 + 50L) * 1/3 bce_obj = keras.losses.BinaryCrossentropy( from_logits=True, label_smoothing=label_smoothing) loss = bce_obj(y_true, logits) expected_value = (100.0 + 50.0 * label_smoothing) / 3.0 self.assertAlmostEqual(self.evaluate(loss), expected_value, 3) @test_util.run_all_in_graph_and_eager_modes class CategoricalCrossentropyTest(test.TestCase): def test_config(self): cce_obj = keras.losses.CategoricalCrossentropy( reduction=losses_utils.ReductionV2.SUM, name='bce_1') self.assertEqual(cce_obj.name, 'bce_1') self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=dtypes.int64) y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=dtypes.float32) cce_obj = keras.losses.CategoricalCrossentropy() loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) # Test with logits. logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]]) cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): cce_obj = keras.losses.CategoricalCrossentropy() y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y_pred = constant_op.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32) loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), .3239, 3) # Test with logits. logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), .0573, 3) def test_scalar_weighted(self): cce_obj = keras.losses.CategoricalCrossentropy() y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y_pred = constant_op.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32) loss = cce_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .7449, 3) # Test with logits. logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .1317, 3) def test_sample_weighted(self): cce_obj = keras.losses.CategoricalCrossentropy() y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y_pred = constant_op.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32) sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1)) loss = cce_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3) # Test with logits. logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3) def test_no_reduction(self): y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = keras.losses.CategoricalCrossentropy( from_logits=True, reduction=losses_utils.ReductionV2.NONE) loss = cce_obj(y_true, logits) self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3) def test_label_smoothing(self): logits = constant_op.constant([[100.0, -100.0, -100.0]]) y_true = constant_op.constant([[1, 0, 0]]) label_smoothing = 0.1 # Softmax Cross Entropy Loss: -\sum_i p_i \log q_i # where for a softmax activation # \log q_i = x_i - \log \sum_j \exp x_j # = x_i - x_max - \log \sum_j \exp (x_j - x_max) # For our activations, [100, -100, -100] # \log ( exp(0) + exp(-200) + exp(-200) ) = 0 # so our log softmaxes become: [0, -200, -200] # Label smoothing: z' = z * (1 - L) + L/n # 1 = 1 - L + L/n # 0 = L/n # Applying the above two fns to the given input: # -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n cce_obj = keras.losses.CategoricalCrossentropy( from_logits=True, label_smoothing=label_smoothing) loss = cce_obj(y_true, logits) expected_value = 400.0 * label_smoothing / 3.0 self.assertAlmostEqual(self.evaluate(loss), expected_value, 3) @test_util.run_all_in_graph_and_eager_modes class SparseCategoricalCrossentropyTest(test.TestCase): def test_config(self): cce_obj = keras.losses.SparseCategoricalCrossentropy( reduction=losses_utils.ReductionV2.SUM, name='scc') self.assertEqual(cce_obj.name, 'scc') self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct_unweighted(self): y_true = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64) y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=dtypes.float32) cce_obj = keras.losses.SparseCategoricalCrossentropy() loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) # Test with logits. logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]]) cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): cce_obj = keras.losses.SparseCategoricalCrossentropy() y_true = constant_op.constant([0, 1, 2]) y_pred = constant_op.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32) loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), .3239, 3) # Test with logits. logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits) self.assertAlmostEqual(self.evaluate(loss), .0573, 3) def test_scalar_weighted(self): cce_obj = keras.losses.SparseCategoricalCrossentropy() y_true = constant_op.constant([[0], [1], [2]]) y_pred = constant_op.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32) loss = cce_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .7449, 3) # Test with logits. logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), .1317, 3) def test_sample_weighted(self): cce_obj = keras.losses.SparseCategoricalCrossentropy() y_true = constant_op.constant([[0], [1], [2]]) y_pred = constant_op.constant( [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32) sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1)) loss = cce_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3) # Test with logits. logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss = cce_obj(y_true, logits, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3) def test_no_reduction(self): y_true = constant_op.constant([[0], [1], [2]]) logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]]) cce_obj = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=losses_utils.ReductionV2.NONE) loss = cce_obj(y_true, logits) self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3) @test_util.run_all_in_graph_and_eager_modes class HingeTest(test.TestCase): def test_config(self): hinge_obj = keras.losses.Hinge( reduction=losses_utils.ReductionV2.SUM, name='hinge_loss') self.assertEqual(hinge_obj.name, 'hinge_loss') self.assertEqual(hinge_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): hinge_obj = keras.losses.Hinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4] # = [0.6, 0.4125] # reduced loss = (0.6 + 0.4125) / 2 loss = hinge_obj(y_true, y_pred) self.assertAllClose(0.506, self.evaluate(loss), atol=1e-3) def test_scalar_weighted(self): hinge_obj = keras.losses.Hinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4] # = [0.6, 0.4125] # weighted_loss = [0.6 * 2.3, 0.4125 * 2.3] # reduced loss = (0.6 + 0.4125) * 2.3 / 2 loss = hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 1.164, 3) # Verify we get the same output when the same input is given loss_2 = hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAllClose(self.evaluate(loss), self.evaluate(loss_2), 1e-3) def test_sample_weighted(self): hinge_obj = keras.losses.Hinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4] # = [0.6, 0.4125] # weighted loss = [0.6 * 1.2, 0.4125 * 3.4] # reduced loss = (0.6 * 1.2 + 0.4125 * 3.4) / 2 sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(self.evaluate(loss), 1.061, 1e-3) def test_timestep_weighted(self): hinge_obj = keras.losses.Hinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1)) y_pred = constant_op.constant( [[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1)) sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4)) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]] # y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]], # [[0.25], [1], [0.5], [0.6]]] # 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]], # [[0.75], [0], [0.5], [0.4]]] # loss = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # weighted loss = [[2.1, 4.8, 4.5, 0], [3, 0, 0.5, 1.2]] # reduced loss = (2.1 + 4.8 + 4.5 + 0 + 3 + 0 + 0.5 + 1.2) / 8 loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(self.evaluate(loss), 2.012, 1e-3) def test_zero_weighted(self): hinge_obj = keras.losses.Hinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) loss = hinge_obj(y_true, y_pred, sample_weight=0) self.assertAllClose(self.evaluate(loss), 0., 1e-3) @test_util.run_all_in_graph_and_eager_modes class SquaredHingeTest(test.TestCase): def test_config(self): sq_hinge_obj = keras.losses.SquaredHinge( reduction=losses_utils.ReductionV2.SUM, name='sq_hinge_loss') self.assertEqual(sq_hinge_obj.name, 'sq_hinge_loss') self.assertEqual(sq_hinge_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): sq_hinge_obj = keras.losses.SquaredHinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0], # [0.5625, 0, 0.25, 0.16]] # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4] # = [0.485, 0.2431] # reduced loss = (0.485 + 0.2431) / 2 loss = sq_hinge_obj(y_true, y_pred) self.assertAllClose(self.evaluate(loss), 0.364, 1e-3) def test_scalar_weighted(self): sq_hinge_obj = keras.losses.SquaredHinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0], # [0.5625, 0, 0.25, 0.16]] # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4] # = [0.485, 0.2431] # weighted loss = [0.485 * 2.3, 0.2431 * 2.3] # reduced loss = (0.485 + 0.2431) * 2.3 / 2 loss = sq_hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAllClose(self.evaluate(loss), 0.837, 1e-3) # Verify we get the same output when the same input is given loss_2 = sq_hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): sq_hinge_obj = keras.losses.SquaredHinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]] # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]] # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]] # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]] # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0], # [0.5625, 0, 0.25, 0.16]] # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4] # = [0.485, 0.2431] # weighted loss = [0.485 * 1.2, 0.2431 * 3.4] # reduced loss = (0.485 * 1.2 + 0.2431 * 3.4) / 2 sample_weight = constant_op.constant([1.2, 3.4]) loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(self.evaluate(loss), 0.704, 1e-3) def test_timestep_weighted(self): sq_hinge_obj = keras.losses.SquaredHinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1)) y_pred = constant_op.constant( [[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1)) sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4)) # loss = max(0, 1-y_true * y_pred), where y_true is -1/1 # y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]] # y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]], # [[0.25], [1], [0.5], [0.6]]] # 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]], # [[0.75], [0], [0.5], [0.4]]] # loss = [[0.49, 0.64, 0.81, 0], [0.5625, 0, 0.25, 0.16]] # weighted loss = [[1.47, 3.84, 4.05, 0], [2.25, 0, 0.25, 0.48]] # reduced loss = (1.47 + 3.84 + 4.05 + 0 + 2.25 + 0 + 0.25 + 0.48) / 8 loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(self.evaluate(loss), 1.542, 1e-3) def test_zero_weighted(self): sq_hinge_obj = keras.losses.SquaredHinge() y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]]) y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]]) loss = sq_hinge_obj(y_true, y_pred, sample_weight=0) self.assertAllClose(self.evaluate(loss), 0., 1e-3) @test_util.run_all_in_graph_and_eager_modes class CategoricalHingeTest(test.TestCase): def test_config(self): cat_hinge_obj = keras.losses.CategoricalHinge( reduction=losses_utils.ReductionV2.SUM, name='cat_hinge_loss') self.assertEqual(cat_hinge_obj.name, 'cat_hinge_loss') self.assertEqual(cat_hinge_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): cat_hinge_obj = keras.losses.CategoricalHinge() y_true = constant_op.constant([1, 9, 2, -5], shape=(2, 2)) y_pred = constant_op.constant([4, 8, 12, 8], shape=(2, 2), dtype=dtypes.float32) loss = cat_hinge_obj(y_true, y_pred) # pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16] # neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0, 48] # cat_hinge = max(0., neg - pos + 1.) = [0, 65] # reduced_loss = (0 + 65)/2 = 32.5 self.assertAlmostEqual(self.evaluate(loss), 32.5, 3) def test_scalar_weighted(self): cat_hinge_obj = keras.losses.CategoricalHinge() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = cat_hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), 83.95, 3) # Verify we get the same output when the same input is given loss_2 = cat_hinge_obj(y_true, y_pred, sample_weight=2.3) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): cat_hinge_obj = keras.losses.CategoricalHinge() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 124.1, 3) def test_timestep_weighted(self): cat_hinge_obj = keras.losses.CategoricalHinge() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=dtypes.float32) sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3)) loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 4.0, 3) def test_zero_weighted(self): cat_hinge_obj = keras.losses.CategoricalHinge() y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=dtypes.float32) loss = cat_hinge_obj(y_true, y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0., 3) @test_util.run_all_in_graph_and_eager_modes class LogCoshTest(test.TestCase): def setup(self): y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3)) y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3)) self.batch_size = 6 error = y_pred - y_true self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2) self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32) self.y_true = constant_op.constant(y_true) def test_config(self): logcosh_obj = keras.losses.LogCosh( reduction=losses_utils.ReductionV2.SUM, name='logcosh_loss') self.assertEqual(logcosh_obj.name, 'logcosh_loss') self.assertEqual(logcosh_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): self.setup() logcosh_obj = keras.losses.LogCosh() loss = logcosh_obj(self.y_true, self.y_pred) expected_loss = np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_scalar_weighted(self): self.setup() logcosh_obj = keras.losses.LogCosh() sample_weight = 2.3 loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = sample_weight * np.sum( self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) # Verify we get the same output when the same input is given loss_2 = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): self.setup() logcosh_obj = keras.losses.LogCosh() sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = np.multiply( self.expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))) expected_loss = np.sum(expected_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_timestep_weighted(self): self.setup() logcosh_obj = keras.losses.LogCosh() y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1) y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1) error = y_pred - y_true expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2) sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1)) y_pred = constant_op.constant(y_pred, dtype=dtypes.float32) y_true = constant_op.constant(y_true) loss = logcosh_obj( y_true, y_pred, sample_weight=constant_op.constant(sample_weight, shape=(2, 3))) expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_zero_weighted(self): self.setup() logcosh_obj = keras.losses.LogCosh() sample_weight = 0 loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0., 3) @test_util.run_all_in_graph_and_eager_modes class PoissonTest(test.TestCase): def setup(self): self.np_y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3)) self.np_y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3)) self.batch_size = 6 self.expected_losses = self.np_y_pred - np.multiply(self.np_y_true, np.log(self.np_y_pred)) self.y_pred = constant_op.constant(self.np_y_pred, dtype=dtypes.float32) self.y_true = constant_op.constant(self.np_y_true) def test_config(self): poisson_obj = keras.losses.Poisson( reduction=losses_utils.ReductionV2.SUM, name='poisson') self.assertEqual(poisson_obj.name, 'poisson') self.assertEqual(poisson_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): self.setup() poisson_obj = keras.losses.Poisson() loss = poisson_obj(self.y_true, self.y_pred) expected_loss = np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_scalar_weighted(self): self.setup() poisson_obj = keras.losses.Poisson() sample_weight = 2.3 loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = sample_weight * np.sum( self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) # Verify we get the same output when the same input is given loss_2 = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): self.setup() poisson_obj = keras.losses.Poisson() sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = np.multiply( self.expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))) expected_loss = np.sum(expected_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_timestep_weighted(self): self.setup() poisson_obj = keras.losses.Poisson() y_true = self.np_y_true.reshape(2, 3, 1) y_pred = self.np_y_pred.reshape(2, 3, 1) sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3, 1) expected_losses = y_pred - np.multiply(y_true, np.log(y_pred)) y_pred = constant_op.constant(y_pred, dtype=dtypes.float32) y_true = constant_op.constant(y_true) loss = poisson_obj( y_true, y_pred, sample_weight=constant_op.constant(sample_weight, shape=(2, 3))) expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_zero_weighted(self): self.setup() poisson_obj = keras.losses.Poisson() loss = poisson_obj(self.y_true, self.y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0., 3) @test_util.run_all_in_graph_and_eager_modes class KLDivergenceTest(test.TestCase): def setup(self): self.np_y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3)) self.np_y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3)) self.batch_size = 2 self.expected_losses = np.multiply(self.np_y_true, np.log(self.np_y_true / self.np_y_pred)) self.y_pred = constant_op.constant(self.np_y_pred, dtype=dtypes.float32) self.y_true = constant_op.constant(self.np_y_true) def test_config(self): k_obj = keras.losses.KLDivergence( reduction=losses_utils.ReductionV2.SUM, name='kld') self.assertEqual(k_obj.name, 'kld') self.assertEqual(k_obj.reduction, losses_utils.ReductionV2.SUM) def test_unweighted(self): self.setup() k_obj = keras.losses.KLDivergence() loss = k_obj(self.y_true, self.y_pred) expected_loss = np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_scalar_weighted(self): self.setup() k_obj = keras.losses.KLDivergence() sample_weight = 2.3 loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = sample_weight * np.sum( self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) # Verify we get the same output when the same input is given loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): self.setup() k_obj = keras.losses.KLDivergence() sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1)) loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight) expected_loss = np.multiply( self.expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(2, 3)) expected_loss = np.sum(expected_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_timestep_weighted(self): self.setup() k_obj = keras.losses.KLDivergence() y_true = self.np_y_true.reshape(2, 3, 1) y_pred = self.np_y_pred.reshape(2, 3, 1) sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3) expected_losses = np.sum( np.multiply(y_true, np.log(y_true / y_pred)), axis=-1) y_pred = constant_op.constant(y_pred, dtype=dtypes.float32) y_true = constant_op.constant(y_true) loss = k_obj( y_true, y_pred, sample_weight=constant_op.constant(sample_weight)) num_timesteps = 3 expected_loss = np.sum(expected_losses * sample_weight) / ( self.batch_size * num_timesteps) self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3) def test_zero_weighted(self): self.setup() k_obj = keras.losses.KLDivergence() loss = k_obj(self.y_true, self.y_pred, sample_weight=0) self.assertAlmostEqual(self.evaluate(loss), 0., 3) @test_util.run_all_in_graph_and_eager_modes class HuberLossTest(test.TestCase): def huber_loss(self, y_true, y_pred, delta=1.0): error = y_pred - y_true abs_error = np.abs(error) quadratic = np.minimum(abs_error, delta) linear = np.subtract(abs_error, quadratic) return np.add( np.multiply(0.5, np.multiply(quadratic, quadratic)), np.multiply(delta, linear)) def setup(self, delta=1.0): self.np_y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3)) self.np_y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3)) self.batch_size = 6 self.expected_losses = self.huber_loss(self.np_y_true, self.np_y_pred, delta) self.y_pred = constant_op.constant(self.np_y_pred) self.y_true = constant_op.constant(self.np_y_true) def test_config(self): h_obj = keras.losses.Huber( reduction=losses_utils.ReductionV2.SUM, name='huber') self.assertEqual(h_obj.name, 'huber') self.assertEqual(h_obj.reduction, losses_utils.ReductionV2.SUM) def test_all_correct(self): self.setup() h_obj = keras.losses.Huber() loss = h_obj(self.y_true, self.y_true) self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) def test_unweighted(self): self.setup() h_obj = keras.losses.Huber() loss = h_obj(self.y_true, self.y_pred) actual_loss = np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) def test_scalar_weighted(self): self.setup() h_obj = keras.losses.Huber() sample_weight = 2.3 loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) # Verify we get the same output when the same input is given loss_2 = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3) def test_sample_weighted(self): self.setup() h_obj = keras.losses.Huber() sample_weight = constant_op.constant((1.2, 3.4), shape=(2, 1)) loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) actual_loss = np.multiply( self.expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))) actual_loss = np.sum(actual_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) def test_timestep_weighted(self): self.setup() h_obj = keras.losses.Huber() y_pred = self.np_y_pred.reshape((2, 3, 1)) y_true = self.np_y_true.reshape((2, 3, 1)) expected_losses = self.huber_loss(y_true, y_pred) y_pred = constant_op.constant(y_pred) y_true = constant_op.constant(y_true) sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1)) loss = h_obj( y_true, y_pred, sample_weight=constant_op.constant(sample_weight, shape=(2, 3))) actual_loss = np.multiply(expected_losses, sample_weight) actual_loss = np.sum(actual_loss) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) def test_zero_weighted(self): self.setup() h_obj = keras.losses.Huber() sample_weight = 0 loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) self.assertAlmostEqual(self.evaluate(loss), 0., 3) def test_non_default_delta(self): self.setup(delta=0.8) h_obj = keras.losses.Huber(delta=0.8) sample_weight = 2.3 loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight) actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/losses_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for custom training loops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.framework import ops from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class LayerWithLosses(keras.layers.Layer): def build(self, input_shape): self.v = self.add_weight( name='hey', shape=(), initializer='ones', regularizer=keras.regularizers.l1(100)) def call(self, inputs): self.add_loss(math_ops.reduce_sum(inputs)) return self.v * inputs class LayerWithMetrics(keras.layers.Layer): def build(self, input_shape): self.mean = keras.metrics.Mean(name='mean_object') def call(self, inputs): self.add_metric( math_ops.reduce_mean(inputs), name='mean_tensor', aggregation='mean') self.add_metric(self.mean(inputs)) return inputs class LayerWithTrainingArg(keras.layers.Layer): def call(self, inputs, training=None): self.training = training if training: return inputs else: return 0. * inputs def add_loss_step(defun): optimizer = keras.optimizer_v2.adam.Adam() model = testing_utils.get_model_from_layers([LayerWithLosses()], input_shape=(10,)) def train_step(x): with backprop.GradientTape() as tape: model(x) assert len(model.losses) == 2 loss = math_ops.reduce_sum(model.losses) gradients = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(gradients, model.trainable_weights)) return loss if defun: train_step = def_function.function(train_step) x = array_ops.ones((10, 10)) return train_step(x) def batch_norm_step(defun): optimizer = keras.optimizer_v2.adadelta.Adadelta() model = testing_utils.get_model_from_layers([ keras.layers.BatchNormalization(momentum=0.9), keras.layers.Dense(1, kernel_initializer='zeros', activation='softmax') ], input_shape=(10,)) def train_step(x, y): with backprop.GradientTape() as tape: y_pred = model(x, training=True) loss = keras.losses.binary_crossentropy(y, y_pred) gradients = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(gradients, model.trainable_weights)) return loss, model(x, training=False) if defun: train_step = def_function.function(train_step) x, y = array_ops.ones((10, 10)), array_ops.ones((10, 1)) return train_step(x, y) def add_metric_step(defun): optimizer = keras.optimizer_v2.rmsprop.RMSprop() model = testing_utils.get_model_from_layers([ LayerWithMetrics(), keras.layers.Dense(1, kernel_initializer='zeros', activation='softmax') ], input_shape=(10,)) def train_step(x, y): with backprop.GradientTape() as tape: y_pred_1 = model(x) y_pred_2 = model(2 * x) y_pred = y_pred_1 + y_pred_2 loss = keras.losses.mean_squared_error(y, y_pred) gradients = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(gradients, model.trainable_weights)) assert len(model.metrics) == 2 return [m.result() for m in model.metrics] if defun: train_step = def_function.function(train_step) x, y = array_ops.ones((10, 10)), array_ops.zeros((10, 1)) metrics = train_step(x, y) assert np.allclose(metrics[0], 1.5) assert np.allclose(metrics[1], 1.5) return metrics @keras_parameterized.run_with_all_model_types class CustomTrainingLoopTest(keras_parameterized.TestCase): @parameterized.named_parameters(('add_loss_step', add_loss_step), ('add_metric_step', add_metric_step), ('batch_norm_step', batch_norm_step)) def test_eager_and_tf_function(self, train_step): eager_result = train_step(defun=False) fn_result = train_step(defun=True) self.assertAllClose(eager_result, fn_result) @parameterized.named_parameters(('eager', False), ('defun', True)) def test_training_arg_propagation(self, defun): model = testing_utils.get_model_from_layers([LayerWithTrainingArg()], input_shape=(1,)) def train_step(x): return model(x), model(x, training=False), model(x, training=True) if defun: train_step = def_function.function(train_step) x = array_ops.ones((1, 1)) results = train_step(x) self.assertAllClose(results[0], array_ops.zeros((1, 1))) self.assertAllClose(results[1], array_ops.zeros((1, 1))) self.assertAllClose(results[2], array_ops.ones((1, 1))) @parameterized.named_parameters(('eager', False), ('defun', True)) def test_learning_phase_propagation(self, defun): class MyModel(keras.layers.Layer): def __init__(self): super(MyModel, self).__init__() self.layer = LayerWithTrainingArg() def call(self, inputs): return self.layer(inputs) model = MyModel() def train_step(x): no_learning_phase_out = model(x) self.assertIsNone(model.layer.training) with keras.backend.learning_phase_scope(0): inf_learning_phase_out = model(x) self.assertEqual(model.layer.training, 0) with keras.backend.learning_phase_scope(1): train_learning_phase_out = model(x) self.assertEqual(model.layer.training, 1) return [ no_learning_phase_out, inf_learning_phase_out, train_learning_phase_out ] if defun: train_step = def_function.function(train_step) x = array_ops.ones((1, 1)) results = train_step(x) self.assertAllClose(results[0], array_ops.zeros((1, 1))) self.assertAllClose(results[1], array_ops.zeros((1, 1))) self.assertAllClose(results[2], array_ops.ones((1, 1))) @parameterized.named_parameters(('eager', False), ('defun', True)) def test_training_arg_priorities(self, defun): class MyModel(keras.layers.Layer): def __init__(self): super(MyModel, self).__init__() self.layer = LayerWithTrainingArg() def call(self, inputs, training=False): return self.layer(inputs) model = MyModel() def train_step(x): explicit_out = model(x, training=True) default_out = model(x) with keras.backend.learning_phase_scope(1): parent_out = model(x, training=False) lr_out = model(x) return [explicit_out, default_out, parent_out, lr_out] if defun: train_step = def_function.function(train_step) x = array_ops.ones((1, 1)) results = train_step(x) self.assertAllClose(results[0], array_ops.ones((1, 1))) self.assertAllClose(results[1], array_ops.zeros((1, 1))) self.assertAllClose(results[2], array_ops.zeros((1, 1))) self.assertAllClose(results[3], array_ops.ones((1, 1))) if __name__ == '__main__': ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/custom_training_loop_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for compiled Model subclassing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import model_subclassing_test_util as model_util from tensorflow.python.keras import testing_utils from tensorflow.python.ops import math_ops from tensorflow.python.platform import test try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None @keras_parameterized.run_all_keras_modes class ModelSubclassCompiledTest(keras_parameterized.TestCase): def test_single_io_workflow_with_np_arrays(self): num_classes = 2 num_samples = 100 input_dim = 50 model = model_util.SimpleTestModel( num_classes=num_classes, use_dp=True, use_bn=True) model.compile( loss='mse', optimizer='rmsprop', metrics=['acc', keras.metrics.CategoricalAccuracy()], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) def test_multi_io_workflow_with_np_arrays(self): num_classes = (2, 3) num_samples = 1000 input_dim = 50 model = model_util.MultiIOTestModel( num_classes=num_classes, use_dp=True, use_bn=True) model.compile( loss='mse', optimizer='rmsprop', metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) _ = model.evaluate([x1, x2], [y1, y2], verbose=0) def test_single_io_workflow_with_datasets(self): num_classes = 2 num_samples = 10 input_dim = 50 with self.cached_session(): model = model_util.SimpleTestModel( num_classes=num_classes, use_dp=True, use_bn=True) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((num_samples, input_dim), dtype=np.float32) y = np.zeros((num_samples, num_classes), dtype=np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=2, steps_per_epoch=10, verbose=0) _ = model.evaluate(dataset, steps=10, verbose=0) def test_attributes(self): # layers, weights, trainable_weights, non_trainable_weights, inputs, outputs num_classes = (2, 3) num_samples = 100 input_dim = 50 model = model_util.MultiIOTestModel(num_classes=num_classes, use_bn=True) x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) self.assertEqual(model.name, 'test_model') self.assertEqual(model.built, False) self.assertEqual(len(model.weights), 0) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch([x1, x2], [y1, y2]) self.assertEqual(model.built, True) self.assertEqual(len(model.layers), 4) self.assertEqual(len(model.weights), 10) self.assertEqual(len(model.trainable_weights), 8) self.assertEqual(len(model.non_trainable_weights), 2) self.assertEqual(len(model.inputs), 2) self.assertEqual(len(model.outputs), 2) def test_updates(self): # test that updates get run during training num_samples = 100 input_dim = 50 class BNNet(keras.Model): def __init__(self): super(BNNet, self).__init__() self.bn = keras.layers.BatchNormalization(beta_initializer='ones', gamma_initializer='ones') def call(self, inputs): return self.bn(inputs) x = np.ones((num_samples, input_dim)) y = np.ones((num_samples, input_dim)) model = BNNet() model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) y_ref = model.predict(x) model.train_on_batch(x, y) y_new = model.predict(x) self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1) def test_training_and_inference_behavior(self): # test that dropout is applied in training and not inference num_samples = 100 input_dim = 50 class DPNet(keras.Model): def __init__(self): super(DPNet, self).__init__() self.dp = keras.layers.Dropout(0.5) self.dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones') def call(self, inputs): x = self.dp(inputs) return self.dense(x) model = DPNet() x = np.ones((num_samples, input_dim)) y = model.predict(x) self.assertEqual(np.sum(y), np.sum(x)) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) loss = model.train_on_batch(x, y) self.assertGreater(loss, 0.1) def test_training_methods(self): # test fit, train_on_batch # on different input types: list, dict num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = model_util.MultiIOTestModel(num_classes=num_classes, use_bn=True) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) model.fit({'input_1': x1, 'input_2': x2}, {'output_1': y1, 'output_2': y2}, epochs=2, batch_size=32) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0, validation_data=([x1, x2], [y1, y2])) model = model_util.MultiIOTestModel(num_classes=num_classes, use_bn=True) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch([x1, x2], [y1, y2]) model.train_on_batch({'input_1': x1, 'input_2': x2}, {'output_1': y1, 'output_2': y2}) def test_inference_methods(self): # test predict, evaluate, test_on_batch, predict_on_batch # on different input types: list, dict num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = model_util.MultiIOTestModel(num_classes=num_classes, use_bn=True) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.evaluate([x1, x2], [y1, y2]) model.test_on_batch([x1, x2], [y1, y2]) model = model_util.MultiIOTestModel(num_classes=num_classes, use_bn=True) model.predict([x1, x2]) model = model_util.MultiIOTestModel(num_classes=num_classes, use_bn=True) model.predict_on_batch([x1, x2]) def test_saving(self): num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = model_util.MultiIOTestModel(num_classes=num_classes, use_bn=True) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) y_ref_1, y_ref_2 = model.predict([x1, x2]) tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt') model.save_weights(tf_format_name) if h5py is not None: hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5') model.save_weights(hdf5_format_name) model = model_util.MultiIOTestModel(num_classes=num_classes, use_bn=True) if h5py is not None: with self.assertRaises(ValueError): model.load_weights(hdf5_format_name) model.load_weights(tf_format_name) y1, y2 = model.predict([x1, x2]) self.assertAllClose(y_ref_1, y1, atol=1e-5) self.assertAllClose(y_ref_2, y2, atol=1e-5) if h5py is not None: model.load_weights(hdf5_format_name) y1, y2 = model.predict([x1, x2]) self.assertAllClose(y_ref_1, y1, atol=1e-5) self.assertAllClose(y_ref_2, y2, atol=1e-5) def test_subclass_nested_in_subclass(self): num_classes = 2 num_samples = 100 input_dim = 50 model = model_util.NestedTestModel1(num_classes=num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 8 + len(model.test_net.weights)) self.assertEqual(len(model.non_trainable_weights), 2 + len(model.test_net.non_trainable_weights)) self.assertEqual(len(model.trainable_weights), 6 + len(model.test_net.trainable_weights)) def test_graph_nested_in_subclass(self): num_classes = 2 num_samples = 100 input_dim = 50 model = model_util.NestedTestModel2(num_classes=num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 8 + len(model.test_net.weights)) self.assertEqual(len(model.non_trainable_weights), 2 + len(model.test_net.non_trainable_weights)) self.assertEqual(len(model.trainable_weights), 6 + len(model.test_net.trainable_weights)) def test_subclass_nested_in_graph(self): num_classes = 2 num_samples = 100 input_dim = 50 model = model_util.get_nested_model_3( input_dim=input_dim, num_classes=num_classes) model.compile( loss='mse', optimizer='rmsprop', metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 16) self.assertEqual(len(model.non_trainable_weights), 4) self.assertEqual(len(model.trainable_weights), 12) def test_subclass_nested_in_sequential(self): num_classes = 2 num_samples = 100 input_dim = 50 class Inner(keras.Model): def __init__(self): super(Inner, self).__init__() self.dense1 = keras.layers.Dense(32, activation='relu') self.dense2 = keras.layers.Dense(num_classes, activation='relu') self.bn = keras.layers.BatchNormalization() def call(self, inputs): x = self.dense1(inputs) x = self.dense2(x) return self.bn(x) model = keras.Sequential([Inner()]) model.compile( loss='mse', optimizer='rmsprop', metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 8) self.assertEqual(len(model.non_trainable_weights), 2) self.assertEqual(len(model.trainable_weights), 6) def test_support_for_manual_training_arg(self): # In most cases, the `training` argument is left unspecified, in which # case it defaults to value corresponding to the Model method being used # (fit -> True, predict -> False, etc). # If the user writes their model `call` method to take # an explicit `training` argument, we must check that the correct value # is being passed to the model for each method call. class DPNet(keras.Model): def __init__(self): super(DPNet, self).__init__() self.dp = keras.layers.Dropout(0.5) self.dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones') def call(self, inputs, training=False): x = self.dp(inputs, training=training) return self.dense(x) model = DPNet() x = np.ones((10, 10)) y = model.predict(x) self.assertEqual(np.sum(y), np.sum(x)) model.compile( loss='mse', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) loss = model.train_on_batch(x, y) self.assertGreater(loss, 0.1) def test_no_loss_in_compile(self): class InternalLossModel(keras.Model): def __init__(self): super(InternalLossModel, self).__init__() self.dense = keras.layers.Dense(1) def call(self, inputs): out = self.dense(inputs) self.add_loss(math_ops.reduce_sum(out)) return out model = InternalLossModel() x = np.ones((10, 10)) model.predict(x) model.compile( optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x) model.evaluate(x) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/model_subclassing_compiled_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Integration tests for Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.ops import nn_ops as nn from tensorflow.python.ops import rnn_cell from tensorflow.python.platform import test class KerasIntegrationTest(keras_parameterized.TestCase): def _save_and_reload_model(self, model): self.temp_dir = self.get_temp_dir() fpath = os.path.join(self.temp_dir, 'test_model_%s' % (random.randint(0, 1e7),)) if context.executing_eagerly(): save_format = 'tf' else: if (not isinstance(model, keras.Sequential) and not model._is_graph_network): return model # Not supported save_format = 'h5' model.save(fpath, save_format=save_format) model = keras.models.load_model(fpath) return model @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class VectorClassificationIntegrationTest(keras_parameterized.TestCase): def test_vector_classification(self): np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=100, test_samples=0, input_shape=(10,), num_classes=2) y_train = keras.utils.to_categorical(y_train) model = testing_utils.get_model_from_layers( [keras.layers.Dense(16, activation='relu'), keras.layers.Dropout(0.1), keras.layers.Dense(y_train.shape[-1], activation='softmax')], input_shape=x_train.shape[1:]) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit(x_train, y_train, epochs=10, batch_size=10, validation_data=(x_train, y_train), verbose=2) self.assertGreater(history.history['val_acc'][-1], 0.7) _, val_acc = model.evaluate(x_train, y_train) self.assertAlmostEqual(history.history['val_acc'][-1], val_acc) predictions = model.predict(x_train) self.assertEqual(predictions.shape, (x_train.shape[0], 2)) def test_vector_classification_shared_model(self): # Test that Sequential models that feature internal updates # and internal losses can be shared. np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=100, test_samples=0, input_shape=(10,), num_classes=2) y_train = keras.utils.to_categorical(y_train) base_model = testing_utils.get_model_from_layers( [keras.layers.Dense(16, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-5), bias_regularizer=keras.regularizers.l2(1e-5)), keras.layers.BatchNormalization()], input_shape=x_train.shape[1:]) x = keras.layers.Input(x_train.shape[1:]) y = base_model(x) y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y) model = keras.models.Model(x, y) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) if not testing_utils.should_run_eagerly(): self.assertEqual(len(model.get_losses_for(None)), 2) self.assertEqual(len(model.get_updates_for(x)), 2) history = model.fit(x_train, y_train, epochs=10, batch_size=10, validation_data=(x_train, y_train), verbose=2) self.assertGreater(history.history['val_acc'][-1], 0.7) _, val_acc = model.evaluate(x_train, y_train) self.assertAlmostEqual(history.history['val_acc'][-1], val_acc) predictions = model.predict(x_train) self.assertEqual(predictions.shape, (x_train.shape[0], 2)) @keras_parameterized.run_all_keras_modes class SequentialIntegrationTest(KerasIntegrationTest): def test_sequential_save_and_pop(self): # Test the following sequence of actions: # - construct a Sequential model and train it # - save it # - load it # - pop its last layer and add a new layer instead # - continue training np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=100, test_samples=0, input_shape=(10,), num_classes=2) y_train = keras.utils.to_categorical(y_train) model = keras.Sequential([ keras.layers.Dense(16, activation='relu'), keras.layers.Dropout(0.1), keras.layers.Dense(y_train.shape[-1], activation='softmax') ]) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x_train, y_train, epochs=1, batch_size=10, validation_data=(x_train, y_train), verbose=2) model = self._save_and_reload_model(model) # TODO(b/134537740): model.pop doesn't update model outputs properly when # model.outputs is already defined, so just set to `None` for now. model.inputs = None model.outputs = None model.pop() model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax')) # TODO(b/134523282): There is an bug with Sequential models, so the model # must be marked as compiled=False to ensure the next compile goes through. model._is_compiled = False model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit(x_train, y_train, epochs=10, batch_size=10, validation_data=(x_train, y_train), verbose=2) self.assertGreater(history.history['val_acc'][-1], 0.7) model = self._save_and_reload_model(model) _, val_acc = model.evaluate(x_train, y_train) self.assertAlmostEqual(history.history['val_acc'][-1], val_acc) predictions = model.predict(x_train) self.assertEqual(predictions.shape, (x_train.shape[0], 2)) # See b/122473407 @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TimeseriesClassificationIntegrationTest(keras_parameterized.TestCase): @keras_parameterized.run_with_all_model_types def test_timeseries_classification(self): np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=100, test_samples=0, input_shape=(4, 10), num_classes=2) y_train = keras.utils.to_categorical(y_train) layers = [ keras.layers.LSTM(5, return_sequences=True), keras.layers.GRU(y_train.shape[-1], activation='softmax') ] model = testing_utils.get_model_from_layers( layers, input_shape=x_train.shape[1:]) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit(x_train, y_train, epochs=15, batch_size=10, validation_data=(x_train, y_train), verbose=2) self.assertGreater(history.history['val_acc'][-1], 0.7) _, val_acc = model.evaluate(x_train, y_train) self.assertAlmostEqual(history.history['val_acc'][-1], val_acc) predictions = model.predict(x_train) self.assertEqual(predictions.shape, (x_train.shape[0], 2)) def test_timeseries_classification_sequential_tf_rnn(self): np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=100, test_samples=0, input_shape=(4, 10), num_classes=2) y_train = keras.utils.to_categorical(y_train) model = keras.models.Sequential() model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True, input_shape=x_train.shape[1:])) model.add(keras.layers.RNN(rnn_cell.GRUCell(y_train.shape[-1], activation='softmax', dtype=dtypes.float32))) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit(x_train, y_train, epochs=15, batch_size=10, validation_data=(x_train, y_train), verbose=2) self.assertGreater(history.history['val_acc'][-1], 0.7) _, val_acc = model.evaluate(x_train, y_train) self.assertAlmostEqual(history.history['val_acc'][-1], val_acc) predictions = model.predict(x_train) self.assertEqual(predictions.shape, (x_train.shape[0], 2)) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class ImageClassificationIntegrationTest(keras_parameterized.TestCase): def test_image_classification(self): np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=100, test_samples=0, input_shape=(10, 10, 3), num_classes=2) y_train = keras.utils.to_categorical(y_train) layers = [ keras.layers.Conv2D(4, 3, padding='same', activation='relu'), keras.layers.Conv2D(8, 3, padding='same'), keras.layers.BatchNormalization(), keras.layers.Conv2D(8, 3, padding='same'), keras.layers.Flatten(), keras.layers.Dense(y_train.shape[-1], activation='softmax') ] model = testing_utils.get_model_from_layers( layers, input_shape=x_train.shape[1:]) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) history = model.fit(x_train, y_train, epochs=10, batch_size=10, validation_data=(x_train, y_train), verbose=2) self.assertGreater(history.history['val_acc'][-1], 0.7) _, val_acc = model.evaluate(x_train, y_train) self.assertAlmostEqual(history.history['val_acc'][-1], val_acc) predictions = model.predict(x_train) self.assertEqual(predictions.shape, (x_train.shape[0], 2)) @keras_parameterized.run_all_keras_modes class ActivationV2IntegrationTest(keras_parameterized.TestCase): """Tests activation function V2 in model exporting and loading. This test is to verify in TF 2.x, when 'tf.nn.softmax' is used as an activition function, its model exporting and loading work as expected. Check b/123041942 for details. """ def test_serialization_v2_model(self): np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=100, test_samples=0, input_shape=(10,), num_classes=2) y_train = keras.utils.to_categorical(y_train) model = keras.Sequential([ keras.layers.Flatten(input_shape=x_train.shape[1:]), keras.layers.Dense(10, activation=nn.relu), # To mimic 'tf.nn.softmax' used in TF 2.x. keras.layers.Dense(y_train.shape[-1], activation=nn.softmax_v2), ]) # Check if 'softmax' is in model.get_config(). last_layer_activation = model.get_layer(index=2).get_config()['activation'] self.assertEqual(last_layer_activation, 'softmax') model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['accuracy'], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x_train, y_train, epochs=2, batch_size=10, validation_data=(x_train, y_train), verbose=2) output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model') model.save(output_path, save_format='tf') loaded_model = keras.models.load_model(output_path) self.assertEqual(model.summary(), loaded_model.summary()) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/integration_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for unit-testing Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import itertools import unittest from absl.testing import parameterized from tensorflow.python import keras from tensorflow.python import tf2 from tensorflow.python.eager import context from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test from tensorflow.python.util import nest from tensorflow.python.util.compat import collections_abc class TestCase(test.TestCase, parameterized.TestCase): def tearDown(self): keras.backend.clear_session() super(TestCase, self).tearDown() # TODO(kaftan): Possibly enable 'subclass_custom_build' when tests begin to pass # it. Or perhaps make 'subclass' always use a custom build method. def run_with_all_model_types( test_or_class=None, exclude_models=None): """Execute the decorated test with all Keras model types. This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once for each Keras model type. The Keras model types are: ['functional', 'subclass', 'sequential'] Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. Various methods in `testing_utils` to get models will auto-generate a model of the currently active Keras model type. This allows unittests to confirm the equivalence between different Keras models. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_with_all_model_types( exclude_models = ['sequential']) def test_foo(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` This test tries building a small mlp as both a functional model and as a subclass model. We can also annotate the whole class if we want this to apply to all tests in the class: ```python @testing_utils.run_with_all_model_types(exclude_models = ['sequential']) class MyTests(testing_utils.KerasTestCase): def test_foo(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. exclude_models: A collection of Keras model types to not run. (May also be a single model type not wrapped in a collection). Defaults to None. Returns: Returns a decorator that will run the decorated test method multiple times: once for each desired Keras model type. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency. """ model_types = ['functional', 'subclass', 'sequential'] params = [('_%s' % model, model) for model in model_types if model not in nest.flatten(exclude_models)] def single_method_decorator(f): """Decorator that constructs the test cases.""" # Use named_parameters so it can be individually run from the command line @parameterized.named_parameters(*params) @functools.wraps(f) def decorated(self, model_type, *args, **kwargs): """A run of a single test case w/ the specified model type.""" if model_type == 'functional': _test_functional_model_type(f, self, *args, **kwargs) elif model_type == 'subclass': _test_subclass_model_type(f, self, *args, **kwargs) elif model_type == 'sequential': _test_sequential_model_type(f, self, *args, **kwargs) else: raise ValueError('Unknown model type: %s' % (model_type,)) return decorated return _test_or_class_decorator(test_or_class, single_method_decorator) def _test_functional_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('functional'): f(test_or_class, *args, **kwargs) def _test_subclass_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('subclass'): f(test_or_class, *args, **kwargs) def _test_sequential_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('sequential'): f(test_or_class, *args, **kwargs) def run_all_keras_modes(test_or_class=None, config=None, always_skip_v1=False, always_skip_eager=False): """Execute the decorated test with all keras execution modes. This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once executing in legacy graph mode, once running eagerly and with `should_run_eagerly` returning True, and once running eagerly with `should_run_eagerly` returning False. If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and the test will only run twice. Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_all_keras_modes def test_foo(self): model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile( optimizer, loss, metrics=metrics, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` This test will try compiling & fitting the small functional mlp using all three Keras execution modes. Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. always_skip_v1: If True, does not try running the legacy graph mode even when Tensorflow v2 behavior is not enabled. always_skip_eager: If True, does not execute the decorated test with eager execution modes. Returns: Returns a decorator that will run the decorated test method multiple times. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency. """ params = [('_v2_function', 'v2_function'), ('_v2_funcgraph', 'v2_funcgraph')] if not always_skip_eager: params.append(('_v2_eager', 'v2_eager')) if not (always_skip_v1 or tf2.enabled()): params.append(('_v1_session', 'v1_session')) def single_method_decorator(f): """Decorator that constructs the test cases.""" # Use named_parameters so it can be individually run from the command line @parameterized.named_parameters(*params) @functools.wraps(f) def decorated(self, run_mode, *args, **kwargs): """A run of a single test case w/ specified run mode.""" if run_mode == 'v1_session': _v1_session_test(f, self, config, *args, **kwargs) elif run_mode == 'v2_funcgraph': _v2_graph_functions_test(f, self, *args, **kwargs) elif run_mode == 'v2_eager': _v2_eager_test(f, self, *args, **kwargs) elif run_mode == 'v2_function': _v2_function_test(f, self, *args, **kwargs) else: return ValueError('Unknown run mode %s' % run_mode) return decorated return _test_or_class_decorator(test_or_class, single_method_decorator) def _v1_session_test(f, test_or_class, config, *args, **kwargs): with context.graph_mode(), testing_utils.run_eagerly_scope(False): with testing_utils.experimental_run_tf_function_scope(False): with test_or_class.test_session(use_gpu=True, config=config): f(test_or_class, *args, **kwargs) def _v2_graph_functions_test(f, test_or_class, *args, **kwargs): with context.eager_mode(): with testing_utils.run_eagerly_scope(False): with testing_utils.experimental_run_tf_function_scope(False): f(test_or_class, *args, **kwargs) def _v2_eager_test(f, test_or_class, *args, **kwargs): with context.eager_mode(): with testing_utils.run_eagerly_scope(True): with testing_utils.experimental_run_tf_function_scope(True): f(test_or_class, *args, **kwargs) def _v2_function_test(f, test_or_class, *args, **kwargs): with context.eager_mode(): with testing_utils.run_eagerly_scope(False): with testing_utils.experimental_run_tf_function_scope(True): f(test_or_class, *args, **kwargs) def _test_or_class_decorator(test_or_class, single_method_decorator): """Decorate a test or class with a decorator intended for one method. If the test_or_class is a class: This will apply the decorator to all test methods in the class. If the test_or_class is an iterable of already-parameterized test cases: This will apply the decorator to all the cases, and then flatten the resulting cross-product of test cases. This allows stacking the Keras parameterized decorators w/ each other, and to apply them to test methods that have already been marked with an absl parameterized decorator. Otherwise, treat the obj as a single method and apply the decorator directly. Args: test_or_class: A test method (that may have already been decorated with a parameterized decorator, or a test class that extends keras_parameterized.TestCase single_method_decorator: A parameterized decorator intended for a single test method. Returns: The decorated result. """ def _decorate_test_or_class(obj): if isinstance(obj, collections_abc.Iterable): return itertools.chain.from_iterable( single_method_decorator(method) for method in obj) if isinstance(obj, type): cls = obj for name, value in cls.__dict__.copy().items(): if callable(value) and name.startswith( unittest.TestLoader.testMethodPrefix): setattr(cls, name, single_method_decorator(value)) cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__, cls.__dict__.copy()) return cls return single_method_decorator(obj) if test_or_class is not None: return _decorate_test_or_class(test_or_class) return _decorate_test_or_class
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/keras_parameterized.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrappers for Keras models, providing compatibility with other frameworks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.wrappers import scikit_learn del absolute_import del division del print_function
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/wrappers/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Scikit-learn API wrapper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test INPUT_DIM = 5 HIDDEN_DIM = 5 TRAIN_SAMPLES = 10 TEST_SAMPLES = 5 NUM_CLASSES = 2 BATCH_SIZE = 5 EPOCHS = 1 def build_fn_clf(hidden_dim): model = keras.models.Sequential() model.add(keras.layers.Dense(INPUT_DIM, input_shape=(INPUT_DIM,))) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(hidden_dim)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(NUM_CLASSES)) model.add(keras.layers.Activation('softmax')) model.compile( optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) return model def assert_classification_works(clf): np.random.seed(42) (x_train, y_train), (x_test, _) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) clf.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS) score = clf.score(x_train, y_train, batch_size=BATCH_SIZE) assert np.isscalar(score) and np.isfinite(score) preds = clf.predict(x_test, batch_size=BATCH_SIZE) assert preds.shape == (TEST_SAMPLES,) for prediction in np.unique(preds): assert prediction in range(NUM_CLASSES) proba = clf.predict_proba(x_test, batch_size=BATCH_SIZE) assert proba.shape == (TEST_SAMPLES, NUM_CLASSES) assert np.allclose(np.sum(proba, axis=1), np.ones(TEST_SAMPLES)) def build_fn_reg(hidden_dim): model = keras.models.Sequential() model.add(keras.layers.Dense(INPUT_DIM, input_shape=(INPUT_DIM,))) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(hidden_dim)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(1)) model.add(keras.layers.Activation('linear')) model.compile( optimizer='sgd', loss='mean_absolute_error', metrics=['accuracy']) return model def assert_regression_works(reg): np.random.seed(42) (x_train, y_train), (x_test, _) = testing_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) reg.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS) score = reg.score(x_train, y_train, batch_size=BATCH_SIZE) assert np.isscalar(score) and np.isfinite(score) preds = reg.predict(x_test, batch_size=BATCH_SIZE) assert preds.shape == (TEST_SAMPLES,) class ScikitLearnAPIWrapperTest(test.TestCase): def test_classify_build_fn(self): with self.cached_session(): clf = keras.wrappers.scikit_learn.KerasClassifier( build_fn=build_fn_clf, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_classification_works(clf) def test_classify_class_build_fn(self): class ClassBuildFnClf(object): def __call__(self, hidden_dim): return build_fn_clf(hidden_dim) with self.cached_session(): clf = keras.wrappers.scikit_learn.KerasClassifier( build_fn=ClassBuildFnClf(), hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_classification_works(clf) def test_classify_inherit_class_build_fn(self): class InheritClassBuildFnClf(keras.wrappers.scikit_learn.KerasClassifier): def __call__(self, hidden_dim): return build_fn_clf(hidden_dim) with self.cached_session(): clf = InheritClassBuildFnClf( build_fn=None, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_classification_works(clf) def test_regression_build_fn(self): with self.cached_session(): reg = keras.wrappers.scikit_learn.KerasRegressor( build_fn=build_fn_reg, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_regression_works(reg) def test_regression_class_build_fn(self): class ClassBuildFnReg(object): def __call__(self, hidden_dim): return build_fn_reg(hidden_dim) with self.cached_session(): reg = keras.wrappers.scikit_learn.KerasRegressor( build_fn=ClassBuildFnReg(), hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_regression_works(reg) def test_regression_inherit_class_build_fn(self): class InheritClassBuildFnReg(keras.wrappers.scikit_learn.KerasRegressor): def __call__(self, hidden_dim): return build_fn_reg(hidden_dim) with self.cached_session(): reg = InheritClassBuildFnReg( build_fn=None, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, epochs=EPOCHS) assert_regression_works(reg) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/wrappers/scikit_learn_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper for using the Scikit-Learn API with Keras models. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import types import numpy as np from tensorflow.python.keras import losses from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.utils.generic_utils import has_arg from tensorflow.python.keras.utils.np_utils import to_categorical from tensorflow.python.util.tf_export import keras_export class BaseWrapper(object): """Base class for the Keras scikit-learn wrapper. Warning: This class should not be used directly. Use descendant classes instead. Arguments: build_fn: callable function or class instance **sk_params: model parameters & fitting parameters The `build_fn` should construct, compile and return a Keras model, which will then be used to fit/predict. One of the following three values could be passed to `build_fn`: 1. A function 2. An instance of a class that implements the `__call__` method 3. None. This means you implement a class that inherits from either `KerasClassifier` or `KerasRegressor`. The `__call__` method of the present class will then be treated as the default `build_fn`. `sk_params` takes both model parameters and fitting parameters. Legal model parameters are the arguments of `build_fn`. Note that like all other estimators in scikit-learn, `build_fn` should provide default values for its arguments, so that you could create the estimator without passing any values to `sk_params`. `sk_params` could also accept parameters for calling `fit`, `predict`, `predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`). fitting (predicting) parameters are selected in the following order: 1. Values passed to the dictionary arguments of `fit`, `predict`, `predict_proba`, and `score` methods 2. Values passed to `sk_params` 3. The default values of the `keras.models.Sequential` `fit`, `predict`, `predict_proba` and `score` methods When using scikit-learn's `grid_search` API, legal tunable parameters are those you could pass to `sk_params`, including fitting parameters. In other words, you could use `grid_search` to search for the best `batch_size` or `epochs` as well as the model parameters. """ def __init__(self, build_fn=None, **sk_params): self.build_fn = build_fn self.sk_params = sk_params self.check_params(sk_params) def check_params(self, params): """Checks for user typos in `params`. Arguments: params: dictionary; the parameters to be checked Raises: ValueError: if any member of `params` is not a valid argument. """ legal_params_fns = [ Sequential.fit, Sequential.predict, Sequential.predict_classes, Sequential.evaluate ] if self.build_fn is None: legal_params_fns.append(self.__call__) elif (not isinstance(self.build_fn, types.FunctionType) and not isinstance(self.build_fn, types.MethodType)): legal_params_fns.append(self.build_fn.__call__) else: legal_params_fns.append(self.build_fn) for params_name in params: for fn in legal_params_fns: if has_arg(fn, params_name): break else: if params_name != 'nb_epoch': raise ValueError('{} is not a legal parameter'.format(params_name)) def get_params(self, **params): # pylint: disable=unused-argument """Gets parameters for this estimator. Arguments: **params: ignored (exists for API compatibility). Returns: Dictionary of parameter names mapped to their values. """ res = copy.deepcopy(self.sk_params) res.update({'build_fn': self.build_fn}) return res def set_params(self, **params): """Sets the parameters of this estimator. Arguments: **params: Dictionary of parameter names mapped to their values. Returns: self """ self.check_params(params) self.sk_params.update(params) return self def fit(self, x, y, **kwargs): """Constructs a new model with `build_fn` & fit the model to `(x, y)`. Arguments: x : array-like, shape `(n_samples, n_features)` Training samples where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)` True labels for `x`. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.fit` Returns: history : object details about the training history at each epoch. """ if self.build_fn is None: self.model = self.__call__(**self.filter_sk_params(self.__call__)) elif (not isinstance(self.build_fn, types.FunctionType) and not isinstance(self.build_fn, types.MethodType)): self.model = self.build_fn( **self.filter_sk_params(self.build_fn.__call__)) else: self.model = self.build_fn(**self.filter_sk_params(self.build_fn)) if (losses.is_categorical_crossentropy(self.model.loss) and len(y.shape) != 2): y = to_categorical(y) fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit)) fit_args.update(kwargs) history = self.model.fit(x, y, **fit_args) return history def filter_sk_params(self, fn, override=None): """Filters `sk_params` and returns those in `fn`'s arguments. Arguments: fn : arbitrary function override: dictionary, values to override `sk_params` Returns: res : dictionary containing variables in both `sk_params` and `fn`'s arguments. """ override = override or {} res = {} for name, value in self.sk_params.items(): if has_arg(fn, name): res.update({name: value}) res.update(override) return res @keras_export('keras.wrappers.scikit_learn.KerasClassifier') class KerasClassifier(BaseWrapper): """Implementation of the scikit-learn classifier API for Keras. """ def fit(self, x, y, **kwargs): """Constructs a new model with `build_fn` & fit the model to `(x, y)`. Arguments: x : array-like, shape `(n_samples, n_features)` Training samples where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)` True labels for `x`. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.fit` Returns: history : object details about the training history at each epoch. Raises: ValueError: In case of invalid shape for `y` argument. """ y = np.array(y) if len(y.shape) == 2 and y.shape[1] > 1: self.classes_ = np.arange(y.shape[1]) elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1: self.classes_ = np.unique(y) y = np.searchsorted(self.classes_, y) else: raise ValueError('Invalid shape for y: ' + str(y.shape)) self.n_classes_ = len(self.classes_) return super(KerasClassifier, self).fit(x, y, **kwargs) def predict(self, x, **kwargs): """Returns the class predictions for the given test data. Arguments: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.predict_classes`. Returns: preds: array-like, shape `(n_samples,)` Class predictions. """ kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs) classes = self.model.predict_classes(x, **kwargs) return self.classes_[classes] def predict_proba(self, x, **kwargs): """Returns class probability estimates for the given test data. Arguments: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.predict_classes`. Returns: proba: array-like, shape `(n_samples, n_outputs)` Class probability estimates. In the case of binary classification, to match the scikit-learn API, will return an array of shape `(n_samples, 2)` (instead of `(n_sample, 1)` as in Keras). """ kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs) probs = self.model.predict_proba(x, **kwargs) # check if binary classification if probs.shape[1] == 1: # first column is probability of class 0 and second is of class 1 probs = np.hstack([1 - probs, probs]) return probs def score(self, x, y, **kwargs): """Returns the mean accuracy on the given test data and labels. Arguments: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)` True labels for `x`. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.evaluate`. Returns: score: float Mean accuracy of predictions on `x` wrt. `y`. Raises: ValueError: If the underlying model isn't configured to compute accuracy. You should pass `metrics=["accuracy"]` to the `.compile()` method of the model. """ y = np.searchsorted(self.classes_, y) kwargs = self.filter_sk_params(Sequential.evaluate, kwargs) loss_name = self.model.loss if hasattr(loss_name, '__name__'): loss_name = loss_name.__name__ if loss_name == 'categorical_crossentropy' and len(y.shape) != 2: y = to_categorical(y) outputs = self.model.evaluate(x, y, **kwargs) if not isinstance(outputs, list): outputs = [outputs] for name, output in zip(self.model.metrics_names, outputs): if name in ['accuracy', 'acc']: return output raise ValueError('The model is not configured to compute accuracy. ' 'You should pass `metrics=["accuracy"]` to ' 'the `model.compile()` method.') @keras_export('keras.wrappers.scikit_learn.KerasRegressor') class KerasRegressor(BaseWrapper): """Implementation of the scikit-learn regressor API for Keras. """ def predict(self, x, **kwargs): """Returns predictions for the given test data. Arguments: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.predict`. Returns: preds: array-like, shape `(n_samples,)` Predictions. """ kwargs = self.filter_sk_params(Sequential.predict, kwargs) return np.squeeze(self.model.predict(x, **kwargs)) def score(self, x, y, **kwargs): """Returns the mean loss on the given test data and labels. Arguments: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. y: array-like, shape `(n_samples,)` True labels for `x`. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.evaluate`. Returns: score: float Mean accuracy of predictions on `x` wrt. `y`. """ kwargs = self.filter_sk_params(Sequential.evaluate, kwargs) loss = self.model.evaluate(x, y, **kwargs) if isinstance(loss, list): return -loss[0] return -loss
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/wrappers/scikit_learn.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Attention layers that can be used in sequence DNN/CNN models. This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2. Attention is formed by three tensors: Query, Key and Value. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.util.tf_export import keras_export class BaseDenseAttention(Layer): """Base Attention class for Dense networks. This class is suitable for Dense or CNN networks, and not for RNN networks. Implementations of attention mechanisms should inherit from this class, and reuse the `apply_attention_scores()` method. Args: causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such that position `i` cannot attend to positions `j > i`. This prevents the flow of information from the future towards the past. Call Arguments: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * value: Value `Tensor` of shape `[batch_size, Tv, dim]`. * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not given, will use `value` for both `key` and `value`, which is the most common case. mask: List of the following tensors: * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`. If given, the output will be zero at the positions where `mask==False`. * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`. If given, will apply the mask such that values at positions where `mask==False` do not contribute to the result. Output shape: Attention outputs of shape `[batch_size, Tq, dim]`. """ def __init__(self, causal=False, **kwargs): super(BaseDenseAttention, self).__init__(**kwargs) self.causal = causal self.supports_masking = True def _calculate_scores(self, query, key): """Calculates attention scores. Args: query: Query tensor of shape `[batch_size, Tq, dim]`. key: Key tensor of shape `[batch_size, Tv, dim]`. Returns: Tensor of shape `[batch_size, Tq, Tv]`. """ return NotImplementedError def _apply_scores(self, scores, value, scores_mask=None): """Applies attention scores to the given value tensor. To use this method in your attention layer, follow the steps: * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape `[batch_size, Tv]` to calculate the attention `scores`. * Pass `scores` and `value` tensors to this method. The method applies `scores_mask`, calculates `attention_distribution = softmax(scores)`, then returns `matmul(attention_distribution, value). * Apply `query_mask` and return the result. Args: scores: Scores float tensor of shape `[batch_size, Tq, Tv]`. value: Value tensor of shape `[batch_size, Tv, dim]`. scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or `[batch_size, Tq, Tv]`. If given, scores at positions where `scores_mask==False` do not contribute to the result. It must contain at least one `True` value in each line along the last dimension. Returns: Tensor of shape `[batch_size, Tq, dim]`. """ if scores_mask is not None: padding_mask = math_ops.logical_not(scores_mask) # Bias so padding positions do not contribute to attention distribution. scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx()) attention_distribution = nn.softmax(scores) return math_ops.matmul(attention_distribution, value) # TODO(b/125916026): Consider exposing a __call__ method with named args. def call(self, inputs, mask=None): self._validate_call_args(inputs=inputs, mask=mask) q = inputs[0] v = inputs[1] k = inputs[2] if len(inputs) > 2 else v q_mask = mask[0] if mask else None v_mask = mask[1] if mask else None scores = self._calculate_scores(query=q, key=k) if v_mask is not None: # Mask of shape [batch_size, 1, Tv]. v_mask = array_ops.expand_dims(v_mask, axis=-2) if self.causal: # Creates a lower triangular mask, so position i cannot attend to # positions j>i. This prevents the flow of information from the future # into the past. scores_shape = array_ops.shape(scores) # causal_mask_shape = [1, Tq, Tv]. causal_mask_shape = array_ops.concat( [array_ops.ones_like(scores_shape[:-2]), scores_shape[-2:]], axis=0) causal_mask = _lower_triangular_mask(causal_mask_shape) else: causal_mask = None scores_mask = _merge_masks(v_mask, causal_mask) result = self._apply_scores(scores=scores, value=v, scores_mask=scores_mask) if q_mask is not None: # Mask of shape [batch_size, Tq, 1]. q_mask = array_ops.expand_dims(q_mask, axis=-1) result *= math_ops.cast(q_mask, dtype=result.dtype) return result def compute_mask(self, inputs, mask=None): self._validate_call_args(inputs=inputs, mask=mask) if mask: q_mask = mask[0] if q_mask is None: return None return ops.convert_to_tensor(q_mask) return None def _validate_call_args(self, inputs, mask): """Validates arguments of the call method.""" class_name = self.__class__.__name__ if not isinstance(inputs, list): raise ValueError( '{} layer must be called on a list of inputs, namely [query, value] ' 'or [query, value, key].'.format(class_name)) if len(inputs) < 2 or len(inputs) > 3: raise ValueError( '{} layer accepts inputs list of length 2 or 3, ' 'namely [query, value] or [query, value, key]. ' 'Given length: {}'.format(class_name, len(inputs))) if mask: if not isinstance(mask, list): raise ValueError( '{} layer mask must be a list, ' 'namely [query_mask, value_mask].'.format(class_name)) if len(mask) != 2: raise ValueError( '{} layer mask must be a list of length 2, namely [query_mask, ' 'value_mask]. Given length: {}'.format(class_name, len(mask))) def get_config(self): config = {'causal': self.causal} base_config = super(BaseDenseAttention, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Attention') class Attention(BaseDenseAttention): """Dot-product attention layer, a.k.a. Luong-style attention. Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of shape `[batch_size, Tv, dim]` and `key` tensor of shape `[batch_size, Tv, dim]`. The calculation follows the steps: 1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot product: `scores = tf.matmul(query, key, transpose_b=True)`. 2. Use scores to calculate a distribution with shape `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`. 3. Use `distribution` to create a linear combination of `value` with shape `batch_size, Tq, dim]`: `return tf.matmul(distribution, value)`. Args: use_scale: If `True`, will create a scalar variable to scale the attention scores. causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such that position `i` cannot attend to positions `j > i`. This prevents the flow of information from the future towards the past. Call Arguments: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * value: Value `Tensor` of shape `[batch_size, Tv, dim]`. * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not given, will use `value` for both `key` and `value`, which is the most common case. mask: List of the following tensors: * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`. If given, the output will be zero at the positions where `mask==False`. * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`. If given, will apply the mask such that values at positions where `mask==False` do not contribute to the result. Output shape: Attention outputs of shape `[batch_size, Tq, dim]`. The meaning of `query`, `value` and `key` depend on the application. In the case of text similarity, for example, `query` is the sequence embeddings of the first piece of text and `value` is the sequence embeddings of the second piece of text. `key` is usually the same tensor as `value`. Here is a code example for using `Attention` in a CNN+Attention network: ```python # Variable-length int sequences. query_input = tf.keras.Input(shape=(None,), dtype='int32') value_input = tf.keras.Input(shape=(None,), dtype='int32') # Embedding lookup. token_embedding = tf.keras.layers.Embedding(max_tokens, dimension) # Query embeddings of shape [batch_size, Tq, dimension]. query_embeddings = token_embedding(query_input) # Value embeddings of shape [batch_size, Tv, dimension]. value_embeddings = token_embedding(query_input) # CNN layer. cnn_layer = tf.keras.layers.Conv1D( filters=100, kernel_size=4, # Use 'same' padding so outputs have the same shape as inputs. padding='same') # Query encoding of shape [batch_size, Tq, filters]. query_seq_encoding = cnn_layer(query_embeddings) # Value encoding of shape [batch_size, Tv, filters]. value_seq_encoding = cnn_layer(value_embeddings) # Query-value attention of shape [batch_size, Tq, filters]. query_value_attention_seq = tf.keras.layers.Attention()( [query_seq_encoding, value_seq_encoding]) # Reduce over the sequence axis to produce encodings of shape # [batch_size, filters]. query_encoding = tf.keras.layers.GlobalAveragePooling1D()( query_seq_encoding) query_value_attention = tf.keras.layers.GlobalAveragePooling1D()( query_value_attention_seq) # Concatenate query and document encodings to produce a DNN input layer. input_layer = tf.keras.layers.Concatenate()( [query_encoding, query_value_attention]) # Add DNN layers, and create Model. # ... ``` """ def __init__(self, use_scale=False, **kwargs): super(Attention, self).__init__(**kwargs) self.use_scale = use_scale def build(self, input_shape): """Creates scale variable if use_scale==True.""" if self.use_scale: self.scale = self.add_weight( name='scale', shape=(), initializer=init_ops.ones_initializer(), dtype=self.dtype, trainable=True) else: self.scale = None super(Attention, self).build(input_shape) def _calculate_scores(self, query, key): """Calculates attention scores as a query-key dot product. Args: query: Query tensor of shape `[batch_size, Tq, dim]`. key: Key tensor of shape `[batch_size, Tv, dim]`. Returns: Tensor of shape `[batch_size, Tq, Tv]`. """ scores = math_ops.matmul(query, key, transpose_b=True) if self.scale is not None: scores *= self.scale return scores def get_config(self): config = {'use_scale': self.use_scale} base_config = super(Attention, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.AdditiveAttention') class AdditiveAttention(BaseDenseAttention): """Additive attention layer, a.k.a. Bahdanau-style attention. Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of shape `[batch_size, Tv, dim]` and `key` tensor of shape `[batch_size, Tv, dim]`. The calculation follows the steps: 1. Reshape `query` and `value` into shapes `[batch_size, Tq, 1, dim]` and `[batch_size, 1, Tv, dim]` respectively. 2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear sum: `scores = tf.reduce_sum(tf.tanh(query + value), axis=-1)` 3. Use scores to calculate a distribution with shape `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`. 4. Use `distribution` to create a linear combination of `value` with shape `batch_size, Tq, dim]`: `return tf.matmul(distribution, value)`. Args: use_scale: If `True`, will create a variable to scale the attention scores. causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such that position `i` cannot attend to positions `j > i`. This prevents the flow of information from the future towards the past. Call Arguments: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * value: Value `Tensor` of shape `[batch_size, Tv, dim]`. * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not given, will use `value` for both `key` and `value`, which is the most common case. mask: List of the following tensors: * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`. If given, the output will be zero at the positions where `mask==False`. * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`. If given, will apply the mask such that values at positions where `mask==False` do not contribute to the result. Output shape: Attention outputs of shape `[batch_size, Tq, dim]`. The meaning of `query`, `value` and `key` depend on the application. In the case of text similarity, for example, `query` is the sequence embeddings of the first piece of text and `value` is the sequence embeddings of the second piece of text. `key` is usually the same tensor as `value`. Here is a code example for using `AdditiveAttention` in a CNN+Attention network: ```python # Variable-length int sequences. query_input = tf.keras.Input(shape=(None,), dtype='int32') value_input = tf.keras.Input(shape=(None,), dtype='int32') # Embedding lookup. token_embedding = tf.keras.layers.Embedding(max_tokens, dimension) # Query embeddings of shape [batch_size, Tq, dimension]. query_embeddings = token_embedding(query_input) # Value embeddings of shape [batch_size, Tv, dimension]. value_embeddings = token_embedding(query_input) # CNN layer. cnn_layer = tf.keras.layers.Conv1D( filters=100, kernel_size=4, # Use 'same' padding so outputs have the same shape as inputs. padding='same') # Query encoding of shape [batch_size, Tq, filters]. query_seq_encoding = cnn_layer(query_embeddings) # Value encoding of shape [batch_size, Tv, filters]. value_seq_encoding = cnn_layer(value_embeddings) # Query-value attention of shape [batch_size, Tq, filters]. query_value_attention_seq = tf.keras.layers.AdditiveAttention()( [query_seq_encoding, value_seq_encoding]) # Reduce over the sequence axis to produce encodings of shape # [batch_size, filters]. query_encoding = tf.keras.layers.GlobalAveragePooling1D()( query_seq_encoding) query_value_attention = tf.keras.layers.GlobalAveragePooling1D()( query_value_attention_seq) # Concatenate query and document encodings to produce a DNN input layer. input_layer = tf.keras.layers.Concatenate()( [query_encoding, query_value_attention]) # Add DNN layers, and create Model. # ... ``` """ def __init__(self, use_scale=True, **kwargs): super(AdditiveAttention, self).__init__(**kwargs) self.use_scale = use_scale def build(self, input_shape): v_shape = tensor_shape.TensorShape(input_shape[1]) dim = v_shape[-1] if isinstance(dim, tensor_shape.Dimension): dim = dim.value if self.use_scale: self.scale = self.add_weight( name='scale', shape=[dim], initializer=init_ops.glorot_uniform_initializer(), dtype=self.dtype, trainable=True) else: self.scale = None super(AdditiveAttention, self).build(input_shape) def _calculate_scores(self, query, key): """Calculates attention scores as a nonlinear sum of query and key. Args: query: Query tensor of shape `[batch_size, Tq, dim]`. key: Key tensor of shape `[batch_size, Tv, dim]`. Returns: Tensor of shape `[batch_size, Tq, Tv]`. """ # Reshape tensors to enable broadcasting. # Reshape into [batch_size, Tq, 1, dim]. q_reshaped = array_ops.expand_dims(query, axis=-2) # Reshape into [batch_size, 1, Tv, dim]. k_reshaped = array_ops.expand_dims(key, axis=-3) if self.use_scale: scale = self.scale else: scale = 1. return math_ops.reduce_sum( scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1) def get_config(self): config = {'use_scale': self.use_scale} base_config = super(AdditiveAttention, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _lower_triangular_mask(shape): """Creates a lower-triangular boolean mask over the last 2 dimensions.""" row_index = math_ops.cumsum( array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2) col_index = math_ops.cumsum( array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1) return math_ops.greater_equal(row_index, col_index) def _merge_masks(x, y): if x is None: return y if y is None: return x return math_ops.logical_and(x, y)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/dense_attention.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for advanced activation layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class AdvancedActivationsTest(keras_parameterized.TestCase): def test_leaky_relu(self): for alpha in [0., .5, -1.]: testing_utils.layer_test(keras.layers.LeakyReLU, kwargs={'alpha': alpha}, input_shape=(2, 3, 4)) def test_prelu(self): testing_utils.layer_test(keras.layers.PReLU, kwargs={}, input_shape=(2, 3, 4)) def test_prelu_share(self): testing_utils.layer_test(keras.layers.PReLU, kwargs={'shared_axes': 1}, input_shape=(2, 3, 4)) def test_elu(self): for alpha in [0., .5, -1.]: testing_utils.layer_test(keras.layers.ELU, kwargs={'alpha': alpha}, input_shape=(2, 3, 4)) def test_thresholded_relu(self): testing_utils.layer_test(keras.layers.ThresholdedReLU, kwargs={'theta': 0.5}, input_shape=(2, 3, 4)) def test_softmax(self): testing_utils.layer_test(keras.layers.Softmax, kwargs={'axis': 1}, input_shape=(2, 3, 4)) def test_relu(self): testing_utils.layer_test(keras.layers.ReLU, kwargs={'max_value': 10}, input_shape=(2, 3, 4)) x = keras.backend.ones((3, 4)) if not context.executing_eagerly(): # Test that we use `leaky_relu` when appropriate in graph mode. self.assertTrue( 'LeakyRelu' in keras.layers.ReLU(negative_slope=0.2)(x).name) # Test that we use `relu` when appropriate in graph mode. self.assertTrue('Relu' in keras.layers.ReLU()(x).name) # Test that we use `relu6` when appropriate in graph mode. self.assertTrue('Relu6' in keras.layers.ReLU(max_value=6)(x).name) def test_relu_with_invalid_arg(self): with self.assertRaisesRegexp( ValueError, 'max_value of Relu layer cannot be negative value: -10'): testing_utils.layer_test(keras.layers.ReLU, kwargs={'max_value': -10}, input_shape=(2, 3, 4)) with self.assertRaisesRegexp( ValueError, 'negative_slope of Relu layer cannot be negative value: -2'): with self.cached_session(): testing_utils.layer_test( keras.layers.ReLU, kwargs={'negative_slope': -2}, input_shape=(2, 3, 4)) @keras_parameterized.run_with_all_model_types def test_layer_as_activation(self): layer = keras.layers.Dense(1, activation=keras.layers.ReLU()) model = testing_utils.get_model_from_layers([layer], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/advanced_activations_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN. See also: lstm_test.py, gru_test.py, simplernn_test.py. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.layers import recurrent as rnn_v1 from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2 from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import special_math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.training.tracking import util as trackable_util from tensorflow.python.util import nest from tensorflow.python.util import object_identity # Used for nested input/output/state RNN test. NestedInput = collections.namedtuple('NestedInput', ['t1', 't2']) NestedState = collections.namedtuple('NestedState', ['s1', 's2']) @keras_parameterized.run_all_keras_modes class RNNTest(keras_parameterized.TestCase): def test_minimal_rnn_cell_non_layer(self): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = units self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output = states[0] output = keras.backend.dot(inputs, self.kernel) + prev_output return output, [output] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(32, 8), MinimalRNNCell(32, 32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_minimal_rnn_cell_non_layer_multiple_states(self): class MinimalRNNCell(object): def __init__(self, units, input_dim): self.units = units self.state_size = (units, units) self.kernel = keras.backend.variable( np.random.random((input_dim, units))) def call(self, inputs, states): prev_output_1 = states[0] prev_output_2 = states[1] output = keras.backend.dot(inputs, self.kernel) output += prev_output_1 output -= prev_output_2 return output, [output * 2, output * 3] # Basic test case. cell = MinimalRNNCell(32, 5) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8, 5), MinimalRNNCell(16, 8), MinimalRNNCell(32, 16)] layer = keras.layers.RNN(cells) self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32))) self.assertEqual(layer.cell.output_size, 32) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_minimal_rnn_cell_layer(self): class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = keras.backend.dot(inputs, self.kernel) output = h + keras.backend.dot(prev_output, self.recurrent_kernel) return output, [output] def get_config(self): config = {'units': self.units} base_config = super(MinimalRNNCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = keras.Input((None, 5)) cell = MinimalRNNCell(32) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}): layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) # Test stacking. cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacked RNN serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}): layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) def test_minimal_rnn_cell_abstract_rnn_cell(self): class MinimalRNNCell(keras.layers.AbstractRNNCell): def __init__(self, units, **kwargs): self.units = units super(MinimalRNNCell, self).__init__(**kwargs) @property def state_size(self): return self.units def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = keras.backend.dot(inputs, self.kernel) output = h + keras.backend.dot(prev_output, self.recurrent_kernel) return output, output @property def output_size(self): return self.units cell = MinimalRNNCell(32) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer="rmsprop", loss="mse", run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [MinimalRNNCell(8), MinimalRNNCell(16), MinimalRNNCell(32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_rnn_with_time_major(self): batch = 10 time_step = 5 embedding_dim = 4 units = 3 # Test basic case. x = keras.Input((time_step, embedding_dim)) time_major_x = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(x) layer = keras.layers.SimpleRNN( units, time_major=True, return_sequences=True) self.assertEqual( layer.compute_output_shape((time_step, None, embedding_dim)).as_list(), [time_step, None, units]) y = layer(time_major_x) self.assertEqual(layer.output_shape, (time_step, None, units)) y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(y) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, time_step, embedding_dim)), np.zeros((batch, time_step, units))) # Test stacking. x = keras.Input((time_step, embedding_dim)) time_major_x = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(x) cell_units = [10, 8, 6] cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)] layer = keras.layers.RNN(cells, time_major=True, return_sequences=True) y = layer(time_major_x) self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1])) y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(y) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, time_step, embedding_dim)), np.zeros((batch, time_step, cell_units[-1]))) # Test masking. x = keras.Input((time_step, embedding_dim)) time_major = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(x) mask = keras.layers.Masking()(time_major) rnn = keras.layers.SimpleRNN( units, time_major=True, return_sequences=True)(mask) y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(rnn) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, time_step, embedding_dim)), np.zeros((batch, time_step, units))) # Test layer output x = keras.Input((time_step, embedding_dim)) rnn_1 = keras.layers.SimpleRNN(units, return_sequences=True) y = rnn_1(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, time_step, embedding_dim)), np.zeros((batch, time_step, units))) x_np = np.random.random((batch, time_step, embedding_dim)) y_np_1 = model.predict(x_np) time_major = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(x) rnn_2 = keras.layers.SimpleRNN( units, time_major=True, return_sequences=True) y_2 = rnn_2(time_major) y_2 = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(y_2) model_2 = keras.models.Model(x, y_2) rnn_2.set_weights(rnn_1.get_weights()) y_np_2 = model_2.predict(x_np) self.assertAllClose(y_np_1, y_np_2, atol=1e-4) def test_rnn_cell_with_constants_layer(self): # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) cell = RNNCellWithConstants(32, constant_size=3) layer = keras.layers.RNN(cell) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) # test flat list inputs. with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer([x, c]) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_3, atol=1e-4) # Test stacking. cells = [keras.layers.recurrent.GRUCell(8), RNNCellWithConstants(12, constant_size=3), RNNCellWithConstants(32, constant_size=3)] layer = keras.layers.recurrent.RNN(cells) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test GRUCell reset_after property. x = keras.Input((None, 5)) c = keras.Input((3,)) cells = [keras.layers.recurrent.GRUCell(32, reset_after=True)] layer = keras.layers.recurrent.RNN(cells) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test stacked RNN serialization x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.recurrent.RNN.from_config(config.copy()) y = layer(x, constants=c) model = keras.models.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) def test_rnn_cell_with_non_keras_constants(self): # Test basic case. x = keras.Input((None, 5)) c = array_ops.zeros([6, 3], dtype=dtypes.float32) cell = RNNCellWithConstants(32, constant_size=3) layer = keras.layers.RNN(cell) y = layer(x, constants=c) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [keras.layers.recurrent.GRUCell(8), RNNCellWithConstants(12, constant_size=3), RNNCellWithConstants(32, constant_size=3)] layer = keras.layers.recurrent.RNN(cells) y = layer(x, constants=c) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_rnn_cell_with_constants_layer_passing_initial_state(self): # Test basic case. x = keras.Input((None, 5)) c = keras.Input((3,)) s = keras.Input((32,)) cell = RNNCellWithConstants(32, constant_size=3) layer = keras.layers.RNN(cell) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))], np.zeros((6, 32)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) s_np = np.random.random((6, 32)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, s_np, c_np]) weights = model.get_weights() config = layer.get_config() custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer(x, initial_state=s, constants=c) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, s_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) # verify that state is used y_np_2_different_s = model.predict([x_np, s_np + 10., c_np]) with self.assertRaises(AssertionError): self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4) # test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.RNN.from_config(config.copy()) y = layer([x, s, c]) model = keras.models.Model([x, s, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, s_np, c_np]) self.assertAllClose(y_np, y_np_3, atol=1e-4) def test_rnn_cell_with_non_keras_constants_and_initial_state(self): # Test basic case. x = keras.Input((None, 5)) c = array_ops.zeros([6, 3], dtype=dtypes.float32) s = array_ops.zeros([6, 32], dtype=dtypes.float32) cell = RNNCellWithConstants(32, constant_size=3) layer = keras.layers.RNN(cell) y = layer(x, initial_state=s, constants=c) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) # Test stacking. cells = [keras.layers.recurrent.GRUCell(8), RNNCellWithConstants(12, constant_size=3), RNNCellWithConstants(32, constant_size=3)] layer = keras.layers.recurrent.RNN(cells) s = [array_ops.zeros([6, 8], dtype=dtypes.float32), array_ops.zeros([6, 12], dtype=dtypes.float32), array_ops.zeros([6, 32], dtype=dtypes.float32)] y = layer(x, initial_state=s, constants=c) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32))) def test_stacked_rnn_attributes(self): if context.executing_eagerly(): self.skipTest('reduce_sum is not available in eager mode.') cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)] layer = keras.layers.RNN(cells) layer.build((None, None, 1)) # Test weights self.assertEqual(len(layer.trainable_weights), 6) cells[0].trainable = False self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 3) # Test `get_losses_for` and `losses` x = keras.Input((None, 1)) loss_1 = math_ops.reduce_sum(x) loss_2 = math_ops.reduce_sum(cells[0].kernel) cells[0].add_loss(loss_1, inputs=x) cells[0].add_loss(loss_2) self.assertEqual(len(layer.losses), 2) self.assertEqual(layer.get_losses_for(None), [loss_2]) self.assertEqual(layer.get_losses_for(x), [loss_1]) # Test `get_updates_for` and `updates` cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)] layer = keras.layers.RNN(cells) x = keras.Input((None, 1)) _ = layer(x) update_1 = state_ops.assign_add(cells[0].kernel, x[0, 0, 0] * cells[0].kernel) update_2 = state_ops.assign_add(cells[0].kernel, array_ops.ones_like(cells[0].kernel)) # TODO(b/128682878): Remove when RNNCells are __call__'d. with base_layer_utils.call_context().enter(layer, x, True, None): cells[0].add_update(update_1, inputs=x) cells[0].add_update(update_2) self.assertEqual(len(layer.updates), 2) self.assertEqual(len(layer.get_updates_for(None)), 1) self.assertEqual(len(layer.get_updates_for(x)), 1) def test_rnn_dynamic_trainability(self): layer_class = keras.layers.SimpleRNN embedding_dim = 4 units = 3 layer = layer_class(units) layer.build((None, None, embedding_dim)) self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) layer.trainable = False self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.non_trainable_weights), 3) layer.trainable = True self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) def test_state_reuse_with_dropout(self): layer_class = keras.layers.SimpleRNN embedding_dim = 4 units = 3 timesteps = 2 num_samples = 2 input1 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = layer_class(units, return_state=True, return_sequences=True, dropout=0.2) state = layer(input1)[1:] input2 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim)) output = layer_class(units)(input2, initial_state=state) model = keras.Model([input1, input2], output) inputs = [np.random.random((num_samples, timesteps, embedding_dim)), np.random.random((num_samples, timesteps, embedding_dim))] model.predict(inputs) def test_builtin_rnn_cell_serialization(self): for cell_class in [keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell]: # Test basic case. x = keras.Input((None, 5)) cell = cell_class(32) layer = keras.layers.RNN(cell) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) # Test stacking. cells = [cell_class(8), cell_class(12), cell_class(32)] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # Test stacked RNN serialization. x_np = np.random.random((6, 5, 5)) y_np = model.predict(x_np) weights = model.get_weights() config = layer.get_config() layer = keras.layers.RNN.from_config(config) y = layer(x) model = keras.models.Model(x, y) model.set_weights(weights) y_np_2 = model.predict(x_np) self.assertAllClose(y_np, y_np_2, atol=1e-4) @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( layer=[rnn_v1.SimpleRNN, rnn_v1.GRU, rnn_v1.LSTM, rnn_v2.GRU, rnn_v2.LSTM], unroll=[True, False])) def test_rnn_dropout(self, layer, unroll): rnn_layer = layer(3, dropout=0.1, recurrent_dropout=0.1, unroll=unroll) if not unroll: x = keras.Input((None, 5)) else: x = keras.Input((5, 5)) y = rnn_layer(x) model = keras.models.Model(x, y) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x_np = np.random.random((6, 5, 5)) y_np = np.random.random((6, 3)) model.train_on_batch(x_np, y_np) @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( cell=[keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell], unroll=[True, False])) def test_stacked_rnn_dropout(self, cell, unroll): cells = [cell(3, dropout=0.1, recurrent_dropout=0.1), cell(3, dropout=0.1, recurrent_dropout=0.1)] layer = keras.layers.RNN(cells, unroll=unroll) if not unroll: x = keras.Input((None, 5)) else: x = keras.Input((5, 5)) y = layer(x) model = keras.models.Model(x, y) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x_np = np.random.random((6, 5, 5)) y_np = np.random.random((6, 3)) model.train_on_batch(x_np, y_np) def test_dropout_mask_reuse(self): # The layer is created with recurrent_initializer = zero, so that the # the recurrent state won't affect the output. By doing this, we can verify # the output and see if the same mask is applied to for each timestep. rnn = keras.layers.SimpleRNN(3, dropout=0.5, kernel_initializer='ones', recurrent_initializer='zeros', return_sequences=True, unroll=True) inputs = constant_op.constant(1.0, shape=(6, 2, 5)) out = rnn(inputs, training=True) if not context.executing_eagerly(): self.evaluate(variables_lib.global_variables_initializer()) batch_1 = self.evaluate(out) batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :] self.assertAllClose(batch_1_t0, batch_1_t1) # This simulate the layer called with multiple batches in eager mode if context.executing_eagerly(): out2 = rnn(inputs, training=True) else: out2 = out batch_2 = self.evaluate(out2) batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :] self.assertAllClose(batch_2_t0, batch_2_t1) # Also validate that different dropout is used by between batches. self.assertNotAllClose(batch_1_t0, batch_2_t0) self.assertNotAllClose(batch_1_t1, batch_2_t1) def test_stacked_rnn_compute_output_shape(self): cells = [keras.layers.LSTMCell(3), keras.layers.LSTMCell(6)] embedding_dim = 4 timesteps = 2 layer = keras.layers.RNN(cells, return_state=True, return_sequences=True) output_shape = layer.compute_output_shape((None, timesteps, embedding_dim)) expected_output_shape = [(None, timesteps, 6), (None, 3), (None, 3), (None, 6), (None, 6)] self.assertEqual( [tuple(o.as_list()) for o in output_shape], expected_output_shape) # Test reverse_state_order = True for stacked cell. stacked_cell = keras.layers.StackedRNNCells( cells, reverse_state_order=True) layer = keras.layers.RNN( stacked_cell, return_state=True, return_sequences=True) output_shape = layer.compute_output_shape((None, timesteps, embedding_dim)) expected_output_shape = [(None, timesteps, 6), (None, 6), (None, 6), (None, 3), (None, 3)] self.assertEqual( [tuple(o.as_list()) for o in output_shape], expected_output_shape) def test_trackable_dependencies(self): rnn = keras.layers.SimpleRNN x = np.random.random((2, 2, 2)) y = np.random.random((2, 2)) model = keras.models.Sequential() model.add(rnn(2)) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, batch_size=1) # check whether the model variables are present in the # trackable list of objects checkpointed_objects = object_identity.ObjectIdentitySet( trackable_util.list_objects(model)) for v in model.variables: self.assertIn(v, checkpointed_objects) def test_high_dimension_RNN(self): # Basic test case. unit_a = 10 unit_b = 20 input_a = 5 input_b = 10 batch = 32 time_step = 4 cell = Minimal2DRNNCell(unit_a, unit_b) x = keras.Input((None, input_a, input_b)) layer = keras.layers.RNN(cell) y = layer(x) self.assertEqual(cell.state_size.as_list(), [unit_a, unit_b]) if not context.executing_eagerly(): init_state = layer.get_initial_state(x) self.assertEqual(len(init_state), 1) self.assertEqual(init_state[0].shape.as_list(), [None, unit_a, unit_b]) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, time_step, input_a, input_b)), np.zeros((batch, unit_a, unit_b))) self.assertEqual(model.output_shape, (None, unit_a, unit_b)) # Test stacking. cells = [ Minimal2DRNNCell(unit_a, unit_b), Minimal2DRNNCell(unit_a * 2, unit_b * 2), Minimal2DRNNCell(unit_a * 4, unit_b * 4) ] layer = keras.layers.RNN(cells) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, time_step, input_a, input_b)), np.zeros((batch, unit_a * 4, unit_b * 4))) self.assertEqual(model.output_shape, (None, unit_a * 4, unit_b * 4)) def test_high_dimension_RNN_with_init_state(self): unit_a = 10 unit_b = 20 input_a = 5 input_b = 10 batch = 32 time_step = 4 # Basic test case. cell = Minimal2DRNNCell(unit_a, unit_b) x = keras.Input((None, input_a, input_b)) s = keras.Input((unit_a, unit_b)) layer = keras.layers.RNN(cell) y = layer(x, initial_state=s) model = keras.models.Model([x, s], y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch([ np.zeros((batch, time_step, input_a, input_b)), np.zeros((batch, unit_a, unit_b)) ], np.zeros((batch, unit_a, unit_b))) self.assertEqual(model.output_shape, (None, unit_a, unit_b)) # Bad init state shape. bad_shape_a = unit_a * 2 bad_shape_b = unit_b * 2 cell = Minimal2DRNNCell(unit_a, unit_b) x = keras.Input((None, input_a, input_b)) s = keras.Input((bad_shape_a, bad_shape_b)) layer = keras.layers.RNN(cell) with self.assertRaisesWithPredicateMatch(ValueError, 'however `cell.state_size` is'): layer(x, initial_state=s) def test_inconsistent_output_state_size(self): batch = 32 time_step = 4 state_size = 5 input_size = 6 cell = PlusOneRNNCell(state_size) x = keras.Input((None, input_size)) layer = keras.layers.RNN(cell) y = layer(x) self.assertEqual(cell.state_size, state_size) if not context.executing_eagerly(): init_state = layer.get_initial_state(x) self.assertEqual(len(init_state), 1) self.assertEqual(init_state[0].shape.as_list(), [None, state_size]) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, time_step, input_size)), np.zeros((batch, input_size))) self.assertEqual(model.output_shape, (None, input_size)) def test_get_initial_state(self): cell = keras.layers.SimpleRNNCell(5) with self.assertRaisesRegexp(ValueError, 'batch_size and dtype cannot be None'): cell.get_initial_state(None, None, None) if not context.executing_eagerly(): inputs = keras.Input((None, 10)) initial_state = cell.get_initial_state(inputs, None, None) self.assertEqual(initial_state.shape.as_list(), [None, 5]) self.assertEqual(initial_state.dtype, inputs.dtype) batch = array_ops.shape(inputs)[0] dtype = inputs.dtype initial_state = cell.get_initial_state(None, batch, dtype) self.assertEqual(initial_state.shape.as_list(), [None, 5]) self.assertEqual(initial_state.dtype, inputs.dtype) else: batch = 8 inputs = np.random.random((batch, 10)) initial_state = cell.get_initial_state(inputs, None, None) self.assertEqual(initial_state.shape.as_list(), [8, 5]) self.assertEqual(initial_state.dtype, inputs.dtype) dtype = inputs.dtype initial_state = cell.get_initial_state(None, batch, dtype) self.assertEqual(initial_state.shape.as_list(), [batch, 5]) self.assertEqual(initial_state.dtype, inputs.dtype) def test_nested_input_output(self): batch = 10 t = 5 i1, i2, i3 = 3, 4, 5 o1, o2, o3 = 2, 3, 4 cell = NestedCell(o1, o2, o3) rnn = keras.layers.RNN(cell) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) outputs = rnn((input_1, input_2)) self.assertEqual(len(outputs), 2) self.assertEqual(outputs[0].shape.as_list(), [None, o1]) self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3]) model = keras.models.Model((input_1, input_2), outputs) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, o1)), np.zeros((batch, o2, o3))]) self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)]) cell = NestedCell(o1, o2, o3, use_tuple=True) rnn = keras.layers.RNN(cell) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) outputs = rnn(NestedInput(t1=input_1, t2=input_2)) self.assertEqual(len(outputs), 2) self.assertEqual(outputs[0].shape.as_list(), [None, o1]) self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3]) model = keras.models.Model([input_1, input_2], outputs) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, o1)), np.zeros((batch, o2, o3))]) self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)]) def test_nested_input_output_with_state(self): batch = 10 t = 5 i1, i2, i3 = 3, 4, 5 o1, o2, o3 = 2, 3, 4 cell = NestedCell(o1, o2, o3) rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) output1, output2, s1, s2 = rnn((input_1, input_2)) self.assertEqual(output1.shape.as_list(), [None, t, o1]) self.assertEqual(output2.shape.as_list(), [None, t, o2, o3]) self.assertEqual(s1.shape.as_list(), [None, o1]) self.assertEqual(s2.shape.as_list(), [None, o2, o3]) model = keras.models.Model([input_1, input_2], [output1, output2]) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))]) self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)]) cell = NestedCell(o1, o2, o3, use_tuple=True) rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2)) self.assertEqual(output1.shape.as_list(), [None, t, o1]) self.assertEqual(output2.shape.as_list(), [None, t, o2, o3]) self.assertEqual(s1.shape.as_list(), [None, o1]) self.assertEqual(s2.shape.as_list(), [None, o2, o3]) model = keras.models.Model([input_1, input_2], [output1, output2]) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))], [np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))]) self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)]) def test_nest_input_output_with_init_state(self): batch = 10 t = 5 i1, i2, i3 = 3, 4, 5 o1, o2, o3 = 2, 3, 4 cell = NestedCell(o1, o2, o3) rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) init_s1 = keras.Input((o1,)) init_s2 = keras.Input((o2, o3)) output1, output2, s1, s2 = rnn((input_1, input_2), initial_state=(init_s1, init_s2)) self.assertEqual(output1.shape.as_list(), [None, t, o1]) self.assertEqual(output2.shape.as_list(), [None, t, o2, o3]) self.assertEqual(s1.shape.as_list(), [None, o1]) self.assertEqual(s2.shape.as_list(), [None, o2, o3]) model = keras.models.Model([input_1, input_2, init_s1, init_s2], [output1, output2]) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3)), np.zeros((batch, o1)), np.zeros((batch, o2, o3))], [np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))]) self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)]) cell = NestedCell(o1, o2, o3, use_tuple=True) rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True) input_1 = keras.Input((t, i1)) input_2 = keras.Input((t, i2, i3)) init_s1 = keras.Input((o1,)) init_s2 = keras.Input((o2, o3)) init_state = NestedState(s1=init_s1, s2=init_s2) output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2), initial_state=init_state) self.assertEqual(output1.shape.as_list(), [None, t, o1]) self.assertEqual(output2.shape.as_list(), [None, t, o2, o3]) self.assertEqual(s1.shape.as_list(), [None, o1]) self.assertEqual(s2.shape.as_list(), [None, o2, o3]) model = keras.models.Model([input_1, input_2, init_s1, init_s2], [output1, output2]) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3)), np.zeros((batch, o1)), np.zeros((batch, o2, o3))], [np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))]) self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)]) def test_peephole_lstm_cell(self): def _run_cell(cell_fn, **kwargs): inputs = array_ops.one_hot([1, 2, 3, 4], 4) cell = cell_fn(5, **kwargs) cell.build(inputs.shape) initial_state = cell.get_initial_state( inputs=inputs, batch_size=4, dtype=dtypes.float32) inputs, _ = cell(inputs, initial_state) output = inputs if not context.executing_eagerly(): self.evaluate(variables_lib.global_variables_initializer()) output = self.evaluate(output) return output random_seed.set_random_seed(12345) # `recurrent_activation` kwarg is set to sigmoid as that is hardcoded into # rnn_cell.LSTMCell. no_peephole_output = _run_cell( keras.layers.LSTMCell, kernel_initializer='ones', recurrent_activation='sigmoid', implementation=1) first_implementation_output = _run_cell( keras.layers.PeepholeLSTMCell, kernel_initializer='ones', recurrent_activation='sigmoid', implementation=1) second_implementation_output = _run_cell( keras.layers.PeepholeLSTMCell, kernel_initializer='ones', recurrent_activation='sigmoid', implementation=2) tf_lstm_cell_output = _run_cell( rnn_cell.LSTMCell, use_peepholes=True, initializer=init_ops.ones_initializer) self.assertNotAllClose(first_implementation_output, no_peephole_output) self.assertAllClose(first_implementation_output, second_implementation_output) self.assertAllClose(first_implementation_output, tf_lstm_cell_output) def test_masking_rnn_with_output_and_states(self): class Cell(keras.layers.Layer): def __init__(self): self.state_size = None self.output_size = None super(Cell, self).__init__() def build(self, input_shape): self.state_size = input_shape[-1] self.output_size = input_shape[-1] def call(self, inputs, states): return inputs, [s + 1 for s in states] x = keras.Input((3, 1), name='x') x_masked = keras.layers.Masking()(x) s_0 = keras.Input((1,), name='s_0') y, s = keras.layers.RNN( Cell(), return_state=True)(x_masked, initial_state=s_0) model = keras.models.Model([x, s_0], [y, s]) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # last time step masked x_np = np.array([[[1.], [2.], [0.]]]) s_0_np = np.array([[10.]]) y_np, s_np = model.predict([x_np, s_0_np]) # 1 is added to initial state two times self.assertAllClose(s_np, s_0_np + 2) # Expect last output to be the same as last output before masking self.assertAllClose(y_np, x_np[:, 1, :]) def test_zero_output_for_masking(self): for unroll in [True, False]: cell = keras.layers.SimpleRNNCell(5) x = keras.Input((5, 5)) mask = keras.layers.Masking() layer = keras.layers.RNN( cell, return_sequences=True, zero_output_for_mask=True, unroll=unroll) masked_input = mask(x) y = layer(masked_input) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) np_x = np.ones((6, 5, 5)) result_1 = model.predict(np_x) # set the time 4 and 5 for last record to be zero (masked). np_x[5, 3:] = 0 result_2 = model.predict(np_x) # expect the result_2 has same output, except the time 4,5 for last # record. result_1[5, 3:] = 0 self.assertAllClose(result_1, result_2) def test_unroll_single_step(self): """Even if the time dimension is only one, we should be able to unroll.""" cell = keras.layers.SimpleRNNCell(5) x = keras.Input((1, 5)) layer = keras.layers.RNN(cell, return_sequences=True, unroll=True) y = layer(x) model = keras.models.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) np_x = np.ones((6, 1, 5)) result = model.predict(np_x) self.assertEqual((6, 1, 5), result.shape) def test_unroll_zero_step(self): """If the time dimension is None, we should fail to unroll.""" cell = keras.layers.SimpleRNNCell(5) x = keras.Input((None, 5)) layer = keras.layers.RNN(cell, return_sequences=True, unroll=True) with self.assertRaisesRegexp(ValueError, 'Cannot unroll a RNN.*'): layer(x) def test_full_input_spec(self): # See https://github.com/tensorflow/tensorflow/issues/25985 inputs = keras.layers.Input(batch_shape=(1, 1, 1)) state_h = keras.layers.Input(batch_shape=(1, 1)) state_c = keras.layers.Input(batch_shape=(1, 1)) states = [state_h, state_c] decoder_out = keras.layers.LSTM(1, stateful=True)( inputs, initial_state=states ) model = keras.Model([inputs, state_h, state_c], decoder_out) model.reset_states() def test_reset_states(self): # See https://github.com/tensorflow/tensorflow/issues/25852 with self.assertRaisesRegexp(ValueError, 'it needs to know its batch size'): simple_rnn = keras.layers.SimpleRNN(1, stateful=True) simple_rnn.reset_states() with self.assertRaisesRegexp(ValueError, 'it needs to know its batch size'): cell = Minimal2DRNNCell(1, 2) custom_rnn = keras.layers.RNN(cell, stateful=True) custom_rnn.reset_states() @parameterized.parameters( [keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell]) def test_stateful_rnn_with_stacking(self, cell): # See https://github.com/tensorflow/tensorflow/issues/28614. batch = 12 timesteps = 10 input_dim = 8 output_dim = 64 cells = [cell(32), cell(64)] x = keras.Input(batch_shape=(batch, None, input_dim)) layer = keras.layers.RNN(cells, stateful=True) y = layer(x) model = keras.Model(x, y) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, timesteps, input_dim)), np.zeros((batch, output_dim))) model.predict(np.ones((batch, timesteps, input_dim))) model.reset_states() model.predict(np.ones((batch, timesteps, input_dim))) new_states = nest.map_structure(lambda s: np.ones((batch, s)), layer.cell.state_size) layer.reset_states(new_states) model.predict(np.ones((batch, timesteps, input_dim))) def test_input_dim_length(self): simple_rnn = keras.layers.SimpleRNN(5, input_length=10, input_dim=8) self.assertEqual(simple_rnn._batch_input_shape, (None, 10, 8)) simple_rnn = keras.layers.SimpleRNN(5, input_dim=8) self.assertEqual(simple_rnn._batch_input_shape, (None, None, 8)) simple_rnn = keras.layers.SimpleRNN(5, input_length=10) self.assertEqual(simple_rnn._batch_input_shape, (None, 10, None)) @parameterized.parameters( [keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell]) def test_state_spec_with_stack_cell(self, cell): # See https://github.com/tensorflow/tensorflow/issues/27817 for more detail. batch = 12 timesteps = 10 input_dim = 8 output_dim = 8 def create_cell(): return [cell(output_dim), cell(output_dim), cell(output_dim)] inputs = keras.Input((timesteps, input_dim)) encoder_output = keras.layers.RNN(create_cell(), return_state=True)(inputs) states = encoder_output[1:] decoder_output = keras.layers.RNN( create_cell())(inputs, initial_state=states) model = keras.models.Model(inputs, decoder_output) model.compile( optimizer='rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.train_on_batch( np.zeros((batch, timesteps, input_dim)), np.zeros((batch, output_dim))) model.predict(np.ones((batch, timesteps, input_dim))) class RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, constant_size, **kwargs): self.units = units self.state_size = units self.constant_size = constant_size super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(self.constant_size, self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units, 'constant_size': self.constant_size} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Minimal2DRNNCell(keras.layers.Layer): """The minimal 2D RNN cell is a simple combination of 2 1-D RNN cell. Both internal state and output have 2 dimensions and are orthogonal between each other. """ def __init__(self, unit_a, unit_b, **kwargs): self.unit_a = unit_a self.unit_b = unit_b self.state_size = tensor_shape.as_shape([unit_a, unit_b]) self.output_size = tensor_shape.as_shape([unit_a, unit_b]) super(Minimal2DRNNCell, self).__init__(**kwargs) def build(self, input_shape): input_a = input_shape[-2] input_b = input_shape[-1] self.kernel = self.add_weight( shape=(input_a, input_b, self.unit_a, self.unit_b), initializer='uniform', name='kernel') self.recurring_kernel = self.add_weight( shape=(self.unit_a, self.unit_b, self.unit_a, self.unit_b), initializer='uniform', name='recurring_kernel') self.bias = self.add_weight( shape=(self.unit_a, self.unit_b), initializer='uniform', name='bias') self.built = True def call(self, inputs, states): prev_output = states[0] h = special_math_ops.einsum('bij,ijkl->bkl', inputs, self.kernel) h += array_ops.expand_dims(self.bias, axis=0) output = h + special_math_ops.einsum('bij,ijkl->bkl', prev_output, self.recurring_kernel) return output, [output] class PlusOneRNNCell(keras.layers.Layer): """Add one to the input and state. This cell is used for testing state_size and output_size.""" def __init__(self, num_unit, **kwargs): self.state_size = num_unit super(PlusOneRNNCell, self).__init__(**kwargs) def build(self, input_shape): self.output_size = input_shape[-1] def call(self, inputs, states): return inputs + 1, [states[0] + 1] class NestedCell(keras.layers.Layer): def __init__(self, unit_1, unit_2, unit_3, use_tuple=False, **kwargs): self.unit_1 = unit_1 self.unit_2 = unit_2 self.unit_3 = unit_3 self.use_tuple = use_tuple super(NestedCell, self).__init__(**kwargs) # A nested state. if use_tuple: self.state_size = NestedState( s1=unit_1, s2=tensor_shape.TensorShape([unit_2, unit_3])) else: self.state_size = (unit_1, tensor_shape.TensorShape([unit_2, unit_3])) self.output_size = (unit_1, tensor_shape.TensorShape([unit_2, unit_3])) def build(self, inputs_shape): # expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)] if self.use_tuple: input_1 = inputs_shape.t1[1] input_2, input_3 = inputs_shape.t2[1:] else: input_1 = inputs_shape[0][1] input_2, input_3 = inputs_shape[1][1:] self.kernel_1 = self.add_weight( shape=(input_1, self.unit_1), initializer='uniform', name='kernel_1') self.kernel_2_3 = self.add_weight( shape=(input_2, input_3, self.unit_2, self.unit_3), initializer='uniform', name='kernel_2_3') def call(self, inputs, states): # inputs should be in [(batch, input_1), (batch, input_2, input_3)] # state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)] flatten_inputs = nest.flatten(inputs) s1, s2 = states output_1 = math_ops.matmul(flatten_inputs[0], self.kernel_1) output_2_3 = special_math_ops.einsum('bij,ijkl->bkl', flatten_inputs[1], self.kernel_2_3) state_1 = s1 + output_1 state_2_3 = s2 + output_2_3 output = [output_1, output_2_3] new_states = NestedState(s1=state_1, s2=state_2_3) return output, new_states if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/recurrent_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for merge layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.keras import backend as K from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class MergeLayersTest(keras_parameterized.TestCase): def test_merge_add(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) i3 = keras.layers.Input(shape=(4, 5)) add_layer = keras.layers.Add() o = add_layer([i1, i2, i3]) self.assertListEqual(o.shape.as_list(), [None, 4, 5]) model = keras.models.Model([i1, i2, i3], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) self.assertEqual(out.shape, (2, 4, 5)) self.assertAllClose(out, x1 + x2 + x3, atol=1e-4) self.assertEqual( add_layer.compute_mask([i1, i2, i3], [None, None, None]), None) self.assertTrue( np.all( K.eval( add_layer.compute_mask( [i1, i2], [K.variable(x1), K.variable(x2)])))) with self.assertRaisesRegexp(ValueError, "`mask` should be a list."): add_layer.compute_mask([i1, i2, i3], x1) with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."): add_layer.compute_mask(i1, [None, None, None]) with self.assertRaisesRegexp(ValueError, " should have the same length."): add_layer.compute_mask([i1, i2, i3], [None, None]) def test_merge_subtract(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) i3 = keras.layers.Input(shape=(4, 5)) subtract_layer = keras.layers.Subtract() o = subtract_layer([i1, i2]) self.assertListEqual(o.shape.as_list(), [None, 4, 5]) model = keras.models.Model([i1, i2], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) out = model.predict([x1, x2]) self.assertEqual(out.shape, (2, 4, 5)) self.assertAllClose(out, x1 - x2, atol=1e-4) self.assertEqual(subtract_layer.compute_mask([i1, i2], [None, None]), None) self.assertTrue( np.all( K.eval( subtract_layer.compute_mask( [i1, i2], [K.variable(x1), K.variable(x2)])))) with self.assertRaisesRegexp(ValueError, "`mask` should be a list."): subtract_layer.compute_mask([i1, i2], x1) with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."): subtract_layer.compute_mask(i1, [None, None]) with self.assertRaisesRegexp(ValueError, "layer should be called on exactly 2 inputs"): subtract_layer([i1, i2, i3]) with self.assertRaisesRegexp(ValueError, "layer should be called on exactly 2 inputs"): subtract_layer([i1]) def test_merge_multiply(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) i3 = keras.layers.Input(shape=(4, 5)) o = keras.layers.multiply([i1, i2, i3]) self.assertListEqual(o.shape.as_list(), [None, 4, 5]) model = keras.models.Model([i1, i2, i3], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) self.assertEqual(out.shape, (2, 4, 5)) self.assertAllClose(out, x1 * x2 * x3, atol=1e-4) def test_merge_average(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) o = keras.layers.average([i1, i2]) self.assertListEqual(o.shape.as_list(), [None, 4, 5]) model = keras.models.Model([i1, i2], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) out = model.predict([x1, x2]) self.assertEqual(out.shape, (2, 4, 5)) self.assertAllClose(out, 0.5 * (x1 + x2), atol=1e-4) def test_merge_maximum(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) o = keras.layers.maximum([i1, i2]) self.assertListEqual(o.shape.as_list(), [None, 4, 5]) model = keras.models.Model([i1, i2], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) out = model.predict([x1, x2]) self.assertEqual(out.shape, (2, 4, 5)) self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4) def test_merge_minimum(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) o = keras.layers.minimum([i1, i2]) self.assertListEqual(o.shape.as_list(), [None, 4, 5]) model = keras.models.Model([i1, i2], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) out = model.predict([x1, x2]) self.assertEqual(out.shape, (2, 4, 5)) self.assertAllClose(out, np.minimum(x1, x2), atol=1e-4) def test_merge_concatenate(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) concat_layer = keras.layers.Concatenate(axis=1) o = concat_layer([i1, i2]) self.assertListEqual(o.shape.as_list(), [None, 8, 5]) model = keras.models.Model([i1, i2], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) out = model.predict([x1, x2]) self.assertEqual(out.shape, (2, 8, 5)) self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4) self.assertEqual(concat_layer.compute_mask([i1, i2], [None, None]), None) self.assertTrue( np.all( K.eval( concat_layer.compute_mask( [i1, i2], [K.variable(x1), K.variable(x2)])))) with self.assertRaisesRegexp(ValueError, "`mask` should be a list."): concat_layer.compute_mask([i1, i2], x1) with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."): concat_layer.compute_mask(i1, [None, None]) with self.assertRaisesRegexp(ValueError, "should have the same length"): concat_layer.compute_mask([i1, i2], [None]) with self.assertRaisesRegexp(ValueError, "layer should be called on a list of inputs"): concat_layer(i1) def test_merge_dot(self): i1 = keras.layers.Input(shape=(4,)) i2 = keras.layers.Input(shape=(4,)) o = keras.layers.dot([i1, i2], axes=1) self.assertListEqual(o.shape.as_list(), [None, 1]) model = keras.models.Model([i1, i2], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() _ = keras.layers.Dot(axes=1).get_config() x1 = np.random.random((2, 4)) x2 = np.random.random((2, 4)) out = model.predict([x1, x2]) self.assertEqual(out.shape, (2, 1)) expected = np.zeros((2, 1)) expected[0, 0] = np.dot(x1[0], x2[0]) expected[1, 0] = np.dot(x1[1], x2[1]) self.assertAllClose(out, expected, atol=1e-4) # Test with negative tuple of axes. o = keras.layers.dot([i1, i2], axes=(-1, -1)) self.assertListEqual(o.shape.as_list(), [None, 1]) model = keras.models.Model([i1, i2], o) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() out = model.predict([x1, x2]) self.assertEqual(out.shape, (2, 1)) self.assertAllClose(out, expected, atol=1e-4) # test compute_output_shape layer = keras.layers.Dot(axes=-1) self.assertEqual(layer.compute_output_shape([(4, 5), (4, 5)]), (4, 1)) @tf_test_util.run_all_in_graph_and_eager_modes class MergeLayersTestNoExecution(test.TestCase): def test_merge_elementwise_errors(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 6)) with self.assertRaises(ValueError): keras.layers.add([i1, i2]) with self.assertRaises(ValueError): keras.layers.add([i1]) with self.assertRaises(ValueError): keras.layers.add(i1) with self.assertRaises(ValueError): keras.layers.add([i1]) def test_concatenate_errors(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(3, 5)) with self.assertRaisesRegexp(ValueError, 'inputs with matching shapes'): keras.layers.concatenate([i1, i2], axis=-1) with self.assertRaisesRegexp(ValueError, 'called on a list'): keras.layers.concatenate(i1, axis=-1) with self.assertRaisesRegexp(ValueError, 'called on a list'): keras.layers.concatenate([i1], axis=-1) def test_dot_errors(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 6)) i3 = keras.layers.Input(shape=(4, 6)) with self.assertRaises(ValueError): keras.layers.dot([i1, i2], axes=-1) with self.assertRaises(ValueError): keras.layers.dot(i1, axes=-1) with self.assertRaises(ValueError): keras.layers.dot([i1], axes=-1) with self.assertRaises(ValueError): keras.layers.dot([i1, i2, i3], axes=-1) with self.assertRaises(ValueError): dot = keras.layers.Dot(1) dot.compute_output_shape(1) def test_merge_subtract(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) y = keras.layers.subtract([i1, i2]) self.assertEqual(y.shape.as_list(), [None, 4, 5]) # Test invalid use cases i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(3, 5)) with self.assertRaises(ValueError): keras.layers.subtract([i1, i2]) with self.assertRaises(ValueError): keras.layers.subtract([i1, i1, i1]) def test_merge_add_masking(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) m1 = keras.layers.Masking()(i1) layer = keras.layers.Add() o = layer([m1, i2]) self.assertListEqual(o.shape.as_list(), [None, 4, 5]) mask = layer.output_mask self.assertListEqual(mask.shape.as_list(), [None, 4]) def test_merge_add_dynamic_shape(self): i1 = keras.Input(batch_shape=(4, None), dtype='float32') i2 = keras.Input(batch_shape=(4, 5), dtype='float32') layer = keras.layers.Add() o = layer([i1, i2]) self.assertListEqual(o.shape.as_list(), [4, 5]) def test_merge_concatenate_masking(self): i1 = keras.layers.Input(shape=(4, 5)) i2 = keras.layers.Input(shape=(4, 5)) m1 = keras.layers.Masking()(i1) layer = keras.layers.Concatenate() o = layer([m1, i2]) self.assertListEqual(o.shape.as_list(), [None, 4, 10]) mask = layer.output_mask self.assertListEqual(mask.shape.as_list(), [None, 4]) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/merge_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for locally-connected layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from absl.testing import parameterized from tensorflow.python import keras from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test from tensorflow.python.training.rmsprop import RMSPropOptimizer _DATA_FORMAT_PADDING_IMPLEMENTATION = [{ 'data_format': 'channels_first', 'padding': 'valid', 'implementation': 1 }, { 'data_format': 'channels_first', 'padding': 'same', 'implementation': 1 }, { 'data_format': 'channels_last', 'padding': 'valid', 'implementation': 1 }, { 'data_format': 'channels_last', 'padding': 'same', 'implementation': 1 }, { 'data_format': 'channels_first', 'padding': 'valid', 'implementation': 2 }, { 'data_format': 'channels_first', 'padding': 'same', 'implementation': 2 }, { 'data_format': 'channels_last', 'padding': 'valid', 'implementation': 2 }, { 'data_format': 'channels_last', 'padding': 'same', 'implementation': 2 }, { 'data_format': 'channels_first', 'padding': 'valid', 'implementation': 3 }, { 'data_format': 'channels_first', 'padding': 'same', 'implementation': 3 }, { 'data_format': 'channels_last', 'padding': 'valid', 'implementation': 3 }, { 'data_format': 'channels_last', 'padding': 'same', 'implementation': 3 }] @tf_test_util.run_all_in_graph_and_eager_modes class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase): @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_1d(self, data_format, padding, implementation): with self.cached_session(): num_samples = 2 num_steps = 8 input_dim = 5 filter_length = 3 filters = 4 for strides in [1]: if padding == 'same' and strides != 1: continue kwargs = { 'filters': filters, 'kernel_size': filter_length, 'padding': padding, 'strides': strides, 'data_format': data_format, 'implementation': implementation } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs) else: testing_utils.layer_test( keras.layers.LocallyConnected1D, kwargs=kwargs, input_shape=(num_samples, num_steps, input_dim)) @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_1d_regularization(self, data_format, padding, implementation): num_samples = 2 num_steps = 8 input_dim = 5 filter_length = 3 filters = 4 kwargs = { 'filters': filters, 'kernel_size': filter_length, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'data_format': data_format, 'implementation': implementation, 'padding': padding } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs) else: with self.cached_session(): layer = keras.layers.LocallyConnected1D(**kwargs) layer.build((num_samples, num_steps, input_dim)) self.assertEqual(len(layer.losses), 2) layer( keras.backend.variable( np.ones((num_samples, num_steps, input_dim)))) self.assertEqual(len(layer.losses), 3) k_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) kwargs = { 'filters': filters, 'kernel_size': filter_length, 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, } with self.cached_session(): layer = keras.layers.LocallyConnected1D(**kwargs) layer.build((num_samples, num_steps, input_dim)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) @tf_test_util.run_all_in_graph_and_eager_modes class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase): @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_2d(self, data_format, padding, implementation): with self.cached_session(): num_samples = 8 filters = 3 stack_size = 4 num_row = 6 num_col = 10 for strides in [(1, 1), (2, 2)]: if padding == 'same' and strides != (1, 1): continue kwargs = { 'filters': filters, 'kernel_size': 3, 'padding': padding, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'strides': strides, 'data_format': data_format, 'implementation': implementation } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: testing_utils.layer_test( keras.layers.LocallyConnected2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size)) @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_2d_channels_first(self, data_format, padding, implementation): with self.cached_session(): num_samples = 8 filters = 3 stack_size = 4 num_row = 6 num_col = 10 kwargs = { 'filters': filters, 'kernel_size': 3, 'data_format': data_format, 'implementation': implementation, 'padding': padding } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: testing_utils.layer_test( keras.layers.LocallyConnected2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size)) @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_2d_regularization(self, data_format, padding, implementation): num_samples = 2 filters = 3 stack_size = 4 num_row = 6 num_col = 7 kwargs = { 'filters': filters, 'kernel_size': 3, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'implementation': implementation, 'padding': padding, 'data_format': data_format } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: with self.cached_session(): layer = keras.layers.LocallyConnected2D(**kwargs) layer.build((num_samples, num_row, num_col, stack_size)) self.assertEqual(len(layer.losses), 2) layer( keras.backend.variable( np.ones((num_samples, num_row, num_col, stack_size)))) self.assertEqual(len(layer.losses), 3) k_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) kwargs = { 'filters': filters, 'kernel_size': 3, 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, } with self.cached_session(): layer = keras.layers.LocallyConnected2D(**kwargs) layer.build((num_samples, num_row, num_col, stack_size)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) @tf_test_util.run_all_in_graph_and_eager_modes class LocallyConnectedImplementationModeTest(test.TestCase, parameterized.TestCase): @parameterized.parameters([ {'width': 1, 'data_format': 'channels_first'}, {'width': 1, 'data_format': 'channels_last'}, {'width': 6, 'data_format': 'channels_first'}, {'width': 6, 'data_format': 'channels_last'}, ]) def test_locallyconnected_implementation(self, width, data_format): with self.cached_session(): num_samples = 4 num_classes = 3 num_epochs = 2 np.random.seed(1) targets = np.random.randint(0, num_classes, (num_samples,)) height = 7 filters = 2 inputs = get_inputs(data_format, filters, height, num_samples, width) kernel_x = (3,) kernel_y = () if width == 1 else (2,) stride_x = (1,) stride_y = () if width == 1 else (3,) layers = 2 kwargs = { 'layers': layers, 'filters': filters, 'kernel_size': kernel_x + kernel_y, 'strides': stride_x + stride_y, 'data_format': data_format, 'num_classes': num_classes } model_1 = get_model(implementation=1, **kwargs) model_2 = get_model(implementation=2, **kwargs) model_3 = get_model(implementation=3, **kwargs) # Build models. model_1.train_on_batch(inputs, targets) model_2.train_on_batch(inputs, targets) model_3.train_on_batch(inputs, targets) # Copy weights. copy_model_weights(model_from=model_2, model_to=model_1) copy_model_weights(model_from=model_2, model_to=model_3) # Compare outputs at initialization. out_1 = model_1.call(inputs) out_2 = model_2.call(inputs) out_3 = model_3.call(inputs) self.assertAllCloseAccordingToType( out_2, out_1, rtol=1e-5, atol=1e-5) self.assertAllCloseAccordingToType( out_2, out_3, rtol=1e-5, atol=1e-5) self.assertAllCloseAccordingToType( out_1, out_3, rtol=1e-5, atol=1e-5) # Train. model_1.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples) model_2.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples) model_3.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples) # Compare outputs after a few training steps. out_1 = model_1.call(inputs) out_2 = model_2.call(inputs) out_3 = model_3.call(inputs) self.assertAllCloseAccordingToType( out_2, out_1, atol=2e-4) self.assertAllCloseAccordingToType( out_2, out_3, atol=2e-4) self.assertAllCloseAccordingToType( out_1, out_3, atol=2e-4) def test_make_2d(self): input_shapes = [ (0,), (0, 0), (1,), (2,), (3,), (1, 0), (0, 3), (1, 1), (1, 2), (3, 1), (2, 2), (3, 3), (1, 0, 1), (5, 2, 3), (3, 5, 6, 7, 0), (3, 2, 2, 4, 4), (1, 2, 3, 4, 7, 2), ] np.random.seed(1) for input_shape in input_shapes: inputs = np.random.normal(0, 1, input_shape) inputs_tf = keras.backend.variable(inputs) split_dim = np.random.randint(0, inputs.ndim + 1) shape_2d = (int(np.prod(inputs.shape[:split_dim])), int(np.prod(inputs.shape[split_dim:]))) inputs_2d = np.reshape(inputs, shape_2d) inputs_2d_tf = keras.layers.local.make_2d(inputs_tf, split_dim) inputs_2d_tf = keras.backend.get_value(inputs_2d_tf) self.assertAllCloseAccordingToType(inputs_2d, inputs_2d_tf) def get_inputs(data_format, filters, height, num_samples, width): if data_format == 'channels_first': if width == 1: input_shape = (filters, height) else: input_shape = (filters, height, width) elif data_format == 'channels_last': if width == 1: input_shape = (height, filters) else: input_shape = (height, width, filters) else: raise NotImplementedError(data_format) inputs = np.random.normal(0, 1, (num_samples,) + input_shape).astype(np.float32) return inputs def xent(y_true, y_pred): y_true = keras.backend.cast( keras.backend.reshape(y_true, (-1,)), keras.backend.dtypes_module.int32) return keras.backend.nn.sparse_softmax_cross_entropy_with_logits( labels=y_true, logits=y_pred) def get_model(implementation, filters, kernel_size, strides, layers, num_classes, data_format): model = keras.Sequential() if len(kernel_size) == 1: lc_layer = keras.layers.LocallyConnected1D elif len(kernel_size) == 2: lc_layer = keras.layers.LocallyConnected2D else: raise NotImplementedError(kernel_size) for _ in range(layers): model.add(lc_layer( padding='valid', kernel_initializer=keras.initializers.random_normal(), bias_initializer=keras.initializers.random_normal(), filters=filters, strides=strides, kernel_size=kernel_size, activation=keras.activations.relu, data_format=data_format, implementation=implementation)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(num_classes)) model.compile( optimizer=RMSPropOptimizer(0.01), metrics=[keras.metrics.categorical_accuracy], loss=xent ) return model def copy_lc_weights_2_to_1(lc_layer_2_from, lc_layer_1_to): lc_2_kernel, lc_2_bias = lc_layer_2_from.weights lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask data_format = lc_layer_2_from.data_format if data_format == 'channels_first': if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D): permutation = (3, 0, 1, 2) elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D): permutation = (4, 5, 0, 1, 2, 3) else: raise NotImplementedError(lc_layer_2_from) elif data_format == 'channels_last': if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D): permutation = (2, 0, 1, 3) elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D): permutation = (3, 4, 0, 1, 2, 5) else: raise NotImplementedError(lc_layer_2_from) else: raise NotImplementedError(data_format) lc_2_kernel_masked = keras.backend.permute_dimensions( lc_2_kernel_masked, permutation) lc_2_kernel_mask = keras.backend.math_ops.not_equal( lc_2_kernel_masked, 0) lc_2_kernel_flat = keras.backend.array_ops.boolean_mask( lc_2_kernel_masked, lc_2_kernel_mask) lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat, lc_layer_1_to.kernel.shape) lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped) lc_2_bias = keras.backend.get_value(lc_2_bias) lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias]) def copy_lc_weights_2_to_3(lc_layer_2_from, lc_layer_3_to): lc_2_kernel, lc_2_bias = lc_layer_2_from.weights lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask lc_2_kernel_masked = keras.layers.local.make_2d( lc_2_kernel_masked, split_dim=keras.backend.ndim(lc_2_kernel_masked) // 2) lc_2_kernel_masked = keras.backend.transpose(lc_2_kernel_masked) lc_2_kernel_mask = keras.backend.math_ops.not_equal(lc_2_kernel_masked, 0) lc_2_kernel_flat = keras.backend.array_ops.boolean_mask( lc_2_kernel_masked, lc_2_kernel_mask) lc_2_kernel_flat = keras.backend.get_value(lc_2_kernel_flat) lc_2_bias = keras.backend.get_value(lc_2_bias) lc_layer_3_to.set_weights([lc_2_kernel_flat, lc_2_bias]) def copy_model_weights(model_from, model_to): for l in range(len(model_from.layers)): layer_from = model_from.layers[l] layer_to = model_to.layers[l] if (isinstance( layer_from, (keras.layers.LocallyConnected2D, keras.layers.LocallyConnected1D)) and isinstance(layer_to, (keras.layers.LocallyConnected2D, keras.layers.LocallyConnected1D))): if layer_from.implementation == 2: if layer_to.implementation == 1: copy_lc_weights_2_to_1(layer_from, layer_to) elif layer_to.implementation == 3: copy_lc_weights_2_to_3(layer_from, layer_to) else: raise NotImplementedError else: raise NotImplementedError elif isinstance(layer_from, keras.layers.Dense): weights_2, bias_2 = layer_from.weights weights_2 = keras.backend.get_value(weights_2) bias_2 = keras.backend.get_value(bias_2) layer_to.set_weights([weights_2, bias_2]) else: continue if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/local_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=not-callable # pylint: disable=redefined-builtin """Layers that can merge several inputs into one. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.util.tf_export import keras_export class _Merge(Layer): """Generic merge layer for elementwise merge functions. Used to implement `Sum`, `Average`, etc. Arguments: **kwargs: standard layer keyword arguments. """ def __init__(self, **kwargs): super(_Merge, self).__init__(**kwargs) self.supports_masking = True def _merge_function(self, inputs): raise NotImplementedError def _compute_elemwise_op_output_shape(self, shape1, shape2): """Computes the shape of the resultant of an elementwise operation. Arguments: shape1: tuple or None. Shape of the first tensor shape2: tuple or None. Shape of the second tensor Returns: expected output shape when an element-wise operation is carried out on 2 tensors with shapes shape1 and shape2. tuple or None. Raises: ValueError: if shape1 and shape2 are not compatible for element-wise operations. """ if None in [shape1, shape2]: return None elif len(shape1) < len(shape2): return self._compute_elemwise_op_output_shape(shape2, shape1) elif not shape2: return shape1 output_shape = list(shape1[:-len(shape2)]) for i, j in zip(shape1[-len(shape2):], shape2): if i is None or j is None: output_shape.append(None) elif i == 1: output_shape.append(j) elif j == 1: output_shape.append(i) else: if i != j: raise ValueError( 'Operands could not be broadcast ' 'together with shapes ' + str(shape1) + ' ' + str(shape2)) output_shape.append(i) return tuple(output_shape) @tf_utils.shape_type_conversion def build(self, input_shape): # Used purely for shape validation. if not isinstance(input_shape, list): raise ValueError('A merge layer should be called on a list of inputs.') if len(input_shape) < 2: raise ValueError('A merge layer should be called ' 'on a list of at least 2 inputs. ' 'Got ' + str(len(input_shape)) + ' inputs.') batch_sizes = [s[0] for s in input_shape if s is not None] batch_sizes = set(batch_sizes) batch_sizes -= set([None]) if len(batch_sizes) > 1: raise ValueError( 'Can not merge tensors with different ' 'batch sizes. Got tensors with shapes : ' + str(input_shape)) if input_shape[0] is None: output_shape = None else: output_shape = input_shape[0][1:] for i in range(1, len(input_shape)): if input_shape[i] is None: shape = None else: shape = input_shape[i][1:] output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) # If the inputs have different ranks, we have to reshape them # to make them broadcastable. if None not in input_shape and len(set(map(len, input_shape))) == 1: self._reshape_required = False else: self._reshape_required = True def call(self, inputs): if not isinstance(inputs, list): raise ValueError('A merge layer should be called on a list of inputs.') if self._reshape_required: reshaped_inputs = [] input_ndims = list(map(K.ndim, inputs)) if None not in input_ndims: # If ranks of all inputs are available, # we simply expand each of them at axis=1 # until all of them have the same rank. max_ndim = max(input_ndims) for x in inputs: x_ndim = K.ndim(x) for _ in range(max_ndim - x_ndim): x = array_ops.expand_dims(x, axis=1) reshaped_inputs.append(x) return self._merge_function(reshaped_inputs) else: # Transpose all inputs so that batch size is the last dimension. # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size) transposed = False for x in inputs: x_ndim = K.ndim(x) if x_ndim is None: x_shape = array_ops.shape(x) batch_size = x_shape[0] new_shape = K.concatenate( [x_shape[1:], array_ops.expand_dims(batch_size, axis=-1)]) x_transposed = array_ops.reshape( x, array_ops.stack( [batch_size, math_ops.reduce_prod(x_shape[1:])], axis=0)) x_transposed = array_ops.transpose(x_transposed, perm=(1, 0)) x_transposed = array_ops.reshape(x_transposed, new_shape) reshaped_inputs.append(x_transposed) transposed = True elif x_ndim > 1: dims = list(range(1, x_ndim)) + [0] reshaped_inputs.append(array_ops.transpose(x, perm=dims)) transposed = True else: # We don't transpose inputs if they are 1D vectors or scalars. reshaped_inputs.append(x) y = self._merge_function(reshaped_inputs) y_ndim = K.ndim(y) if transposed: # If inputs have been transposed, we have to transpose the output too. if y_ndim is None: y_shape = array_ops.shape(y) y_ndim = array_ops.shape(y_shape)[0] batch_size = y_shape[y_ndim - 1] new_shape = K.concatenate([ array_ops.expand_dims(batch_size, axis=-1), y_shape[:y_ndim - 1] ]) y = array_ops.reshape(y, (-1, batch_size)) y = array_ops.transpose(y, perm=(1, 0)) y = array_ops.reshape(y, new_shape) elif y_ndim > 1: dims = [y_ndim - 1] + list(range(y_ndim - 1)) y = array_ops.transpose(y, perm=dims) return y else: return self._merge_function(inputs) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if input_shape[0] is None: output_shape = None else: output_shape = input_shape[0][1:] for i in range(1, len(input_shape)): if input_shape[i] is None: shape = None else: shape = input_shape[i][1:] output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) batch_sizes = [s[0] for s in input_shape if s is not None] batch_sizes = set(batch_sizes) batch_sizes -= set([None]) if len(batch_sizes) == 1: output_shape = (list(batch_sizes)[0],) + output_shape else: output_shape = (None,) + output_shape return output_shape def compute_mask(self, inputs, mask=None): if mask is None: return None if not isinstance(mask, list): raise ValueError('`mask` should be a list.') if not isinstance(inputs, list): raise ValueError('`inputs` should be a list.') if len(mask) != len(inputs): raise ValueError('The lists `inputs` and `mask` ' 'should have the same length.') if all(m is None for m in mask): return None masks = [array_ops.expand_dims(m, axis=0) for m in mask if m is not None] return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False) @keras_export('keras.layers.Add') class Add(_Merge): """Layer that adds a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). Examples: ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) # equivalent to `added = keras.layers.add([x1, x2])` added = keras.layers.Add()([x1, x2]) out = keras.layers.Dense(4)(added) model = keras.models.Model(inputs=[input1, input2], outputs=out) ``` """ def _merge_function(self, inputs): output = inputs[0] for i in range(1, len(inputs)): output += inputs[i] return output @keras_export('keras.layers.Subtract') class Subtract(_Merge): """Layer that subtracts two inputs. It takes as input a list of tensors of size 2, both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]), also of the same shape. Examples: ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) # Equivalent to subtracted = keras.layers.subtract([x1, x2]) subtracted = keras.layers.Subtract()([x1, x2]) out = keras.layers.Dense(4)(subtracted) model = keras.models.Model(inputs=[input1, input2], outputs=out) ``` """ @tf_utils.shape_type_conversion def build(self, input_shape): super(Subtract, self).build(input_shape) if len(input_shape) != 2: raise ValueError('A `Subtract` layer should be called ' 'on exactly 2 inputs') def _merge_function(self, inputs): if len(inputs) != 2: raise ValueError('A `Subtract` layer should be called ' 'on exactly 2 inputs') return inputs[0] - inputs[1] @keras_export('keras.layers.Multiply') class Multiply(_Merge): """Layer that multiplies (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). """ def _merge_function(self, inputs): output = inputs[0] for i in range(1, len(inputs)): output *= inputs[i] return output @keras_export('keras.layers.Average') class Average(_Merge): """Layer that averages a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). """ def _merge_function(self, inputs): output = inputs[0] for i in range(1, len(inputs)): output += inputs[i] return output / len(inputs) @keras_export('keras.layers.Maximum') class Maximum(_Merge): """Layer that computes the maximum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). """ def _merge_function(self, inputs): output = inputs[0] for i in range(1, len(inputs)): output = math_ops.maximum(output, inputs[i]) return output @keras_export('keras.layers.Minimum') class Minimum(_Merge): """Layer that computes the minimum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). """ def _merge_function(self, inputs): output = inputs[0] for i in range(1, len(inputs)): output = math_ops.minimum(output, inputs[i]) return output @keras_export('keras.layers.Concatenate') class Concatenate(_Merge): """Layer that concatenates a list of inputs. It takes as input a list of tensors, all of the same shape except for the concatenation axis, and returns a single tensor, the concatenation of all inputs. Arguments: axis: Axis along which to concatenate. **kwargs: standard layer keyword arguments. """ def __init__(self, axis=-1, **kwargs): super(Concatenate, self).__init__(**kwargs) self.axis = axis self.supports_masking = True self._reshape_required = False @tf_utils.shape_type_conversion def build(self, input_shape): # Used purely for shape validation. if not isinstance(input_shape, list) or len(input_shape) < 2: raise ValueError('A `Concatenate` layer should be called ' 'on a list of at least 2 inputs') if all(shape is None for shape in input_shape): return reduced_inputs_shapes = [list(shape) for shape in input_shape] shape_set = set() for i in range(len(reduced_inputs_shapes)): del reduced_inputs_shapes[i][self.axis] shape_set.add(tuple(reduced_inputs_shapes[i])) if len(shape_set) > 1: raise ValueError('A `Concatenate` layer requires ' 'inputs with matching shapes ' 'except for the concat axis. ' 'Got inputs shapes: %s' % (input_shape)) def _merge_function(self, inputs): return K.concatenate(inputs, axis=self.axis) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if not isinstance(input_shape, list): raise ValueError('A `Concatenate` layer should be called ' 'on a list of inputs.') input_shapes = input_shape output_shape = list(input_shapes[0]) for shape in input_shapes[1:]: if output_shape[self.axis] is None or shape[self.axis] is None: output_shape[self.axis] = None break output_shape[self.axis] += shape[self.axis] return tuple(output_shape) def compute_mask(self, inputs, mask=None): if mask is None: return None if not isinstance(mask, list): raise ValueError('`mask` should be a list.') if not isinstance(inputs, list): raise ValueError('`inputs` should be a list.') if len(mask) != len(inputs): raise ValueError('The lists `inputs` and `mask` ' 'should have the same length.') if all(m is None for m in mask): return None # Make a list of masks while making sure # the dimensionality of each mask # is the same as the corresponding input. masks = [] for input_i, mask_i in zip(inputs, mask): if mask_i is None: # Input is unmasked. Append all 1s to masks, masks.append(array_ops.ones_like(input_i, dtype='bool')) elif K.ndim(mask_i) < K.ndim(input_i): # Mask is smaller than the input, expand it masks.append(array_ops.expand_dims(mask_i, axis=-1)) else: masks.append(mask_i) concatenated = K.concatenate(masks, axis=self.axis) return K.all(concatenated, axis=-1, keepdims=False) def get_config(self): config = { 'axis': self.axis, } base_config = super(Concatenate, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Dot') class Dot(_Merge): """Layer that computes a dot product between samples in two tensors. E.g. if applied to a list of two tensors `a` and `b` of shape `(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)` where each entry `i` will be the dot product between `a[i]` and `b[i]`. Arguments: axes: Integer or tuple of integers, axis or axes along which to take the dot product. normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. **kwargs: Standard layer keyword arguments. """ def __init__(self, axes, normalize=False, **kwargs): super(Dot, self).__init__(**kwargs) if not isinstance(axes, int): if not isinstance(axes, (list, tuple)): raise TypeError('Invalid type for `axes` - ' 'should be a list or an int.') if len(axes) != 2: raise ValueError('Invalid format for `axes` - ' 'should contain two elements.') if not isinstance(axes[0], int) or not isinstance(axes[1], int): raise ValueError('Invalid format for `axes` - ' 'list elements should be "int".') self.axes = axes self.normalize = normalize self.supports_masking = True self._reshape_required = False @tf_utils.shape_type_conversion def build(self, input_shape): # Used purely for shape validation. if not isinstance(input_shape, list) or len(input_shape) != 2: raise ValueError('A `Dot` layer should be called ' 'on a list of 2 inputs.') shape1 = input_shape[0] shape2 = input_shape[1] if shape1 is None or shape2 is None: return if isinstance(self.axes, int): if self.axes < 0: axes = [self.axes % len(shape1), self.axes % len(shape2)] else: axes = [self.axes] * 2 else: axes = self.axes if shape1[axes[0]] != shape2[axes[1]]: raise ValueError('Dimension incompatibility ' '%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) + 'Layer shapes: %s, %s' % (shape1, shape2)) def _merge_function(self, inputs): if len(inputs) != 2: raise ValueError('A `Dot` layer should be called on exactly 2 inputs') x1 = inputs[0] x2 = inputs[1] if isinstance(self.axes, int): if self.axes < 0: axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)] else: axes = [self.axes] * 2 else: axes = [] for i in range(len(self.axes)): if self.axes[i] < 0: axes.append(self.axes[i] % K.ndim(inputs[i])) else: axes.append(self.axes[i]) if self.normalize: x1 = nn.l2_normalize(x1, axis=axes[0]) x2 = nn.l2_normalize(x2, axis=axes[1]) output = K.batch_dot(x1, x2, axes) return output @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if not isinstance(input_shape, list) or len(input_shape) != 2: raise ValueError('A `Dot` layer should be called ' 'on a list of 2 inputs.') shape1 = list(input_shape[0]) shape2 = list(input_shape[1]) if isinstance(self.axes, int): if self.axes < 0: axes = [self.axes % len(shape1), self.axes % len(shape2)] else: axes = [self.axes] * 2 else: axes = self.axes shape1.pop(axes[0]) shape2.pop(axes[1]) shape2.pop(0) output_shape = shape1 + shape2 if len(output_shape) == 1: output_shape += [1] return tuple(output_shape) def compute_mask(self, inputs, mask=None): return None def get_config(self): config = { 'axes': self.axes, 'normalize': self.normalize, } base_config = super(Dot, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.add') def add(inputs, **kwargs): """Functional interface to the `Add` layer. Arguments: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the sum of the inputs. Examples: ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) added = keras.layers.add([x1, x2]) out = keras.layers.Dense(4)(added) model = keras.models.Model(inputs=[input1, input2], outputs=out) ``` """ return Add(**kwargs)(inputs) @keras_export('keras.layers.subtract') def subtract(inputs, **kwargs): """Functional interface to the `Subtract` layer. Arguments: inputs: A list of input tensors (exactly 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the difference of the inputs. Examples: ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) subtracted = keras.layers.subtract([x1, x2]) out = keras.layers.Dense(4)(subtracted) model = keras.models.Model(inputs=[input1, input2], outputs=out) ``` """ return Subtract(**kwargs)(inputs) @keras_export('keras.layers.multiply') def multiply(inputs, **kwargs): """Functional interface to the `Multiply` layer. Arguments: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the element-wise product of the inputs. """ return Multiply(**kwargs)(inputs) @keras_export('keras.layers.average') def average(inputs, **kwargs): """Functional interface to the `Average` layer. Arguments: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the average of the inputs. """ return Average(**kwargs)(inputs) @keras_export('keras.layers.maximum') def maximum(inputs, **kwargs): """Functional interface to the `Maximum` layer that computes the maximum (element-wise) list of `inputs`. For example: ```python input1 = tf.keras.layers.Input(shape=(16,)) x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8) input2 = tf.keras.layers.Input(shape=(32,)) x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8) max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8) out = tf.keras.layers.Dense(4)(max_inp) model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) ``` Arguments: inputs: A list of input tensors (at least 2) of same shape. **kwargs: Standard layer keyword arguments. Returns: A tensor (of same shape as input tensor) with the element-wise maximum of the inputs. Raises: ValueError: If input tensors are of different shape. """ return Maximum(**kwargs)(inputs) @keras_export('keras.layers.minimum') def minimum(inputs, **kwargs): """Functional interface to the `Minimum` layer. Arguments: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the element-wise minimum of the inputs. """ return Minimum(**kwargs)(inputs) @keras_export('keras.layers.concatenate') def concatenate(inputs, axis=-1, **kwargs): """Functional interface to the `Concatenate` layer. Arguments: inputs: A list of input tensors (at least 2). axis: Concatenation axis. **kwargs: Standard layer keyword arguments. Returns: A tensor, the concatenation of the inputs alongside axis `axis`. """ return Concatenate(axis=axis, **kwargs)(inputs) @keras_export('keras.layers.dot') def dot(inputs, axes, normalize=False, **kwargs): """Functional interface to the `Dot` layer. Arguments: inputs: A list of input tensors (at least 2). axes: Integer or tuple of integers, axis or axes along which to take the dot product. normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. **kwargs: Standard layer keyword arguments. Returns: A tensor, the dot product of the samples from the inputs. """ return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/merge.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras convolution layers and image transformation layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import activations from tensorflow.python.keras import backend from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec # imports for backwards namespace compatibility # pylint: disable=unused-import from tensorflow.python.keras.layers.pooling import AveragePooling1D from tensorflow.python.keras.layers.pooling import AveragePooling2D from tensorflow.python.keras.layers.pooling import AveragePooling3D from tensorflow.python.keras.layers.pooling import MaxPooling1D from tensorflow.python.keras.layers.pooling import MaxPooling2D from tensorflow.python.keras.layers.pooling import MaxPooling3D # pylint: enable=unused-import from tensorflow.python.keras.utils import conv_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.util.tf_export import keras_export class Conv(Layer): """Abstract N-D convolution layer (private, used as implementation base). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the length of the convolution window. strides: An integer or tuple/list of n integers, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, ...)`. dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` the weights of this layer will be marked as trainable (and listed in `layer.trainable_weights`). name: A string, the name of the layer. fused: Use fused dilated convolution kernel. This is only supported on GPUs. """ def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, fused=False, **kwargs): super(Conv, self).__init__( trainable=trainable, name=name, activity_regularizer=regularizers.get(activity_regularizer), **kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple( kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) if (self.padding == 'causal' and not isinstance(self, (Conv1D, SeparableConv1D))): raise ValueError('Causal padding is only supported for `Conv1D`' 'and ``SeparableConv1D`.') self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2) self.fused = fused def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape.dims[channel_axis].value is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) kernel_shape = self.kernel_size + (input_dim, self.filters) self.kernel = self.add_weight( name='kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype) if self.use_bias: self.bias = self.add_weight( name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) else: self.bias = None self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim}) if self.padding == 'causal': op_padding = 'valid' else: op_padding = self.padding if not isinstance(op_padding, (list, tuple)): op_padding = op_padding.upper() self._convolution_op = nn_ops.Convolution( input_shape, filter_shape=self.kernel.shape, dilation_rate=self.dilation_rate, strides=self.strides, padding=op_padding, data_format=conv_utils.convert_data_format(self.data_format, self.rank + 2), fused=self.fused) self.built = True def call(self, inputs): outputs = self._convolution_op(inputs, self.kernel) if self.use_bias: if self.data_format == 'channels_first': if self.rank == 1: # nn.bias_add does not accept a 1D input tensor. bias = array_ops.reshape(self.bias, (1, self.filters, 1)) outputs += bias else: outputs = nn.bias_add(outputs, self.bias, data_format='NCHW') else: outputs = nn.bias_add(outputs, self.bias, data_format='NHWC') if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': space = input_shape[1:-1] new_space = [] for i in range(len(space)): new_dim = conv_utils.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return tensor_shape.TensorShape([input_shape[0]] + new_space + [self.filters]) else: space = input_shape[2:] new_space = [] for i in range(len(space)): new_dim = conv_utils.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return tensor_shape.TensorShape([input_shape[0], self.filters] + new_space) def get_config(self): config = { 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(Conv, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _compute_causal_padding(self): """Calculates padding for 'causal' option for 1-d conv layers.""" left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1) if self.data_format == 'channels_last': causal_padding = [[0, 0], [left_pad, 0], [0, 0]] else: causal_padding = [[0, 0], [0, 0], [left_pad, 0]] return causal_padding @keras_export('keras.layers.Conv1D', 'keras.layers.Convolution1D') class Conv1D(Conv): """1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved with the layer input over a single spatial (or temporal) dimension to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide an `input_shape` argument (tuple of integers or `None`, e.g. `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors, or `(None, 128)` for variable-length sequences of 128-dimensional vectors. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive). `"causal"` results in causal (dilated) convolutions, e.g. output[t] does not depend on input[t+1:]. Useful when modeling temporal data where the model should not violate the temporal order. See [WaveNet: A Generative Model for Raw Audio, section 2.1](https://arxiv.org/abs/1609.03499). data_format: A string, one of `channels_last` (default) or `channels_first`. dilation_rate: an integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. fused: Use fused dilated convolution kernel. This is only supported on GPUs. Examples: ```python # Small convolutional model for 128-length vectors with 6 timesteps # model.input_shape == (None, 6, 128) model = Sequential() model.add(Conv1D(32, 3, activation='relu', input_shape=(6, 128))) # now: model.output_shape == (None, 4, 32) ``` Input shape: 3D tensor with shape: `(batch_size, steps, input_dim)` Output shape: 3D tensor with shape: `(batch_size, new_steps, filters)` `steps` value might have changed due to padding or strides. """ def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, fused=False, **kwargs): super(Conv1D, self).__init__( rank=1, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), fused=fused, **kwargs) def call(self, inputs): if self.padding == 'causal': inputs = array_ops.pad(inputs, self._compute_causal_padding()) return super(Conv1D, self).call(inputs) @keras_export('keras.layers.Conv2D', 'keras.layers.Convolution2D') class Conv2D(Conv): """2D convolution layer (e.g. spatial convolution over images). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format="channels_last"`. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. fused: Use fused dilated convolution kernel. This is only supported on GPUs. Input shape: 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, fused=False, **kwargs): super(Conv2D, self).__init__( rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), fused=fused, **kwargs) @keras_export('keras.layers.Conv3D', 'keras.layers.Convolution3D') class Conv3D(Conv): """3D convolution layer (e.g. spatial convolution over volumes). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes with a single channel, in `data_format="channels_last"`. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along each spatial dimension. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: an integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: 5D tensor with shape: `(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if data_format='channels_first' or 5D tensor with shape: `(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if data_format='channels_last'. Output shape: 5D tensor with shape: `(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if data_format='channels_first' or 5D tensor with shape: `(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have changed due to padding. """ def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv3D, self).__init__( rank=3, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) @keras_export('keras.layers.Conv2DTranspose', 'keras.layers.Convolution2DTranspose') class Conv2DTranspose(Conv2D): """Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format="channels_last"`. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). output_padding: An integer or tuple/list of 2 integers, specifying the amount of padding along the height and width of the output tensor. Can be a single integer to specify the same value for all spatial dimensions. The amount of output padding along a given dimension must be lower than the stride along that same dimension. If set to `None` (default), the output shape is inferred. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: 4D tensor with shape: `(batch, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(batch, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. References: - [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1) - [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf) """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', output_padding=None, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv2DTranspose, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) self.output_padding = output_padding if self.output_padding is not None: self.output_padding = conv_utils.normalize_tuple( self.output_padding, 2, 'output_padding') for stride, out_pad in zip(self.strides, self.output_padding): if out_pad >= stride: raise ValueError('Stride ' + str(self.strides) + ' must be ' 'greater than output padding ' + str(self.output_padding)) def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if len(input_shape) != 4: raise ValueError('Inputs should have rank 4. Received input shape: ' + str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape.dims[channel_axis].value is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim}) kernel_shape = self.kernel_size + (self.filters, input_dim) self.kernel = self.add_weight( name='kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype) if self.use_bias: self.bias = self.add_weight( name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) else: self.bias = None self.built = True def call(self, inputs): inputs_shape = array_ops.shape(inputs) batch_size = inputs_shape[0] if self.data_format == 'channels_first': h_axis, w_axis = 2, 3 else: h_axis, w_axis = 1, 2 height, width = inputs_shape[h_axis], inputs_shape[w_axis] kernel_h, kernel_w = self.kernel_size stride_h, stride_w = self.strides if self.output_padding is None: out_pad_h = out_pad_w = None else: out_pad_h, out_pad_w = self.output_padding # Infer the dynamic output shape: out_height = conv_utils.deconv_output_length(height, kernel_h, padding=self.padding, output_padding=out_pad_h, stride=stride_h, dilation=self.dilation_rate[0]) out_width = conv_utils.deconv_output_length(width, kernel_w, padding=self.padding, output_padding=out_pad_w, stride=stride_w, dilation=self.dilation_rate[1]) if self.data_format == 'channels_first': output_shape = (batch_size, self.filters, out_height, out_width) else: output_shape = (batch_size, out_height, out_width, self.filters) output_shape_tensor = array_ops.stack(output_shape) outputs = backend.conv2d_transpose( inputs, self.kernel, output_shape_tensor, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if not context.executing_eagerly(): # Infer the static output shape: out_shape = self.compute_output_shape(inputs.shape) outputs.set_shape(out_shape) if self.use_bias: outputs = nn.bias_add( outputs, self.bias, data_format=conv_utils.convert_data_format(self.data_format, ndim=4)) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape = list(input_shape) if self.data_format == 'channels_first': c_axis, h_axis, w_axis = 1, 2, 3 else: c_axis, h_axis, w_axis = 3, 1, 2 kernel_h, kernel_w = self.kernel_size stride_h, stride_w = self.strides if self.output_padding is None: out_pad_h = out_pad_w = None else: out_pad_h, out_pad_w = self.output_padding output_shape[c_axis] = self.filters output_shape[h_axis] = conv_utils.deconv_output_length( output_shape[h_axis], kernel_h, padding=self.padding, output_padding=out_pad_h, stride=stride_h, dilation=self.dilation_rate[0]) output_shape[w_axis] = conv_utils.deconv_output_length( output_shape[w_axis], kernel_w, padding=self.padding, output_padding=out_pad_w, stride=stride_w, dilation=self.dilation_rate[1]) return tensor_shape.TensorShape(output_shape) def get_config(self): config = super(Conv2DTranspose, self).get_config() config['output_padding'] = self.output_padding return config @keras_export('keras.layers.Conv3DTranspose', 'keras.layers.Convolution3DTranspose') class Conv3DTranspose(Conv3D): """Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels if `data_format="channels_last"`. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). output_padding: An integer or tuple/list of 3 integers, specifying the amount of padding along the depth, height, and width. Can be a single integer to specify the same value for all spatial dimensions. The amount of output padding along a given dimension must be lower than the stride along that same dimension. If set to `None` (default), the output shape is inferred. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: an integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: 5D tensor with shape: `(batch, channels, depth, rows, cols)` if data_format='channels_first' or 5D tensor with shape: `(batch, depth, rows, cols, channels)` if data_format='channels_last'. Output shape: 5D tensor with shape: `(batch, filters, new_depth, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(batch, new_depth, new_rows, new_cols, filters)` if data_format='channels_last'. `depth` and `rows` and `cols` values might have changed due to padding. References: - [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1) - [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf) """ def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', output_padding=None, data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv3DTranspose, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) self.output_padding = output_padding if self.output_padding is not None: self.output_padding = conv_utils.normalize_tuple( self.output_padding, 3, 'output_padding') for stride, out_pad in zip(self.strides, self.output_padding): if out_pad >= stride: raise ValueError('Stride ' + str(self.strides) + ' must be ' 'greater than output padding ' + str(self.output_padding)) def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if len(input_shape) != 5: raise ValueError('Inputs should have rank 5, received input shape:', str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape.dims[channel_axis].value is None: raise ValueError('The channel dimension of the inputs ' 'should be defined, found None: ' + str(input_shape)) input_dim = int(input_shape[channel_axis]) kernel_shape = self.kernel_size + (self.filters, input_dim) self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim}) self.kernel = self.add_weight( 'kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype) if self.use_bias: self.bias = self.add_weight( 'bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) else: self.bias = None self.built = True def call(self, inputs): inputs_shape = array_ops.shape(inputs) batch_size = inputs_shape[0] if self.data_format == 'channels_first': d_axis, h_axis, w_axis = 2, 3, 4 else: d_axis, h_axis, w_axis = 1, 2, 3 depth = inputs_shape[d_axis] height = inputs_shape[h_axis] width = inputs_shape[w_axis] kernel_d, kernel_h, kernel_w = self.kernel_size stride_d, stride_h, stride_w = self.strides if self.output_padding is None: out_pad_d = out_pad_h = out_pad_w = None else: out_pad_d, out_pad_h, out_pad_w = self.output_padding # Infer the dynamic output shape: out_depth = conv_utils.deconv_output_length(depth, kernel_d, padding=self.padding, output_padding=out_pad_d, stride=stride_d) out_height = conv_utils.deconv_output_length(height, kernel_h, padding=self.padding, output_padding=out_pad_h, stride=stride_h) out_width = conv_utils.deconv_output_length(width, kernel_w, padding=self.padding, output_padding=out_pad_w, stride=stride_w) if self.data_format == 'channels_first': output_shape = (batch_size, self.filters, out_depth, out_height, out_width) strides = (1, 1, stride_d, stride_h, stride_w) else: output_shape = (batch_size, out_depth, out_height, out_width, self.filters) strides = (1, stride_d, stride_h, stride_w, 1) output_shape_tensor = array_ops.stack(output_shape) outputs = nn.conv3d_transpose( inputs, self.kernel, output_shape_tensor, strides, data_format=conv_utils.convert_data_format(self.data_format, ndim=5), padding=self.padding.upper()) if not context.executing_eagerly(): # Infer the static output shape: out_shape = self.compute_output_shape(inputs.shape) outputs.set_shape(out_shape) if self.use_bias: outputs = nn.bias_add( outputs, self.bias, data_format=conv_utils.convert_data_format(self.data_format, ndim=4)) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape = list(input_shape) if self.data_format == 'channels_first': c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4 else: c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3 kernel_d, kernel_h, kernel_w = self.kernel_size stride_d, stride_h, stride_w = self.strides if self.output_padding is None: out_pad_d = out_pad_h = out_pad_w = None else: out_pad_d, out_pad_h, out_pad_w = self.output_padding output_shape[c_axis] = self.filters output_shape[d_axis] = conv_utils.deconv_output_length( output_shape[d_axis], kernel_d, padding=self.padding, output_padding=out_pad_d, stride=stride_d) output_shape[h_axis] = conv_utils.deconv_output_length( output_shape[h_axis], kernel_h, padding=self.padding, output_padding=out_pad_h, stride=stride_h) output_shape[w_axis] = conv_utils.deconv_output_length( output_shape[w_axis], kernel_w, padding=self.padding, output_padding=out_pad_w, stride=stride_w) return tensor_shape.TensorShape(output_shape) def get_config(self): config = super(Conv3DTranspose, self).get_config() config.pop('dilation_rate') config['output_padding'] = self.output_padding return config class SeparableConv(Conv): """Abstract base layer for separable nD convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, ...)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` the weights of this layer will be marked as trainable (and listed in `layer.trainable_weights`). name: A string, the name of the layer. """ def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): super(SeparableConv, self).__init__( rank=rank, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activations.get(activation), use_bias=use_bias, bias_initializer=initializers.get(bias_initializer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.pointwise_initializer = initializers.get(pointwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.pointwise_regularizer = regularizers.get(pointwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.pointwise_constraint = constraints.get(pointwise_constraint) def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape.dims[channel_axis].value is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim}) depthwise_kernel_shape = self.kernel_size + (input_dim, self.depth_multiplier) pointwise_kernel_shape = ( 1,) * self.rank + (self.depth_multiplier * input_dim, self.filters) self.depthwise_kernel = self.add_weight( name='depthwise_kernel', shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint, trainable=True, dtype=self.dtype) self.pointwise_kernel = self.add_weight( name='pointwise_kernel', shape=pointwise_kernel_shape, initializer=self.pointwise_initializer, regularizer=self.pointwise_regularizer, constraint=self.pointwise_constraint, trainable=True, dtype=self.dtype) if self.use_bias: self.bias = self.add_weight( name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) else: self.bias = None self.built = True def call(self, inputs): raise NotImplementedError def get_config(self): config = { 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'depth_multiplier': self.depth_multiplier, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'depthwise_initializer': initializers.serialize(self.depthwise_initializer), 'pointwise_initializer': initializers.serialize(self.pointwise_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'depthwise_regularizer': regularizers.serialize(self.depthwise_regularizer), 'pointwise_regularizer': regularizers.serialize(self.pointwise_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'depthwise_constraint': constraints.serialize(self.depthwise_constraint), 'pointwise_constraint': constraints.serialize(self.pointwise_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(SeparableConv, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.SeparableConv1D', 'keras.layers.SeparableConvolution1D') class SeparableConv1D(SeparableConv): """Depthwise separable 1D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial dimensions of the filters. strides: A single integer specifying the strides of the convolution. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: A single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` the weights of this layer will be marked as trainable (and listed in `layer.trainable_weights`). name: A string, the name of the layer. """ def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, **kwargs): super(SeparableConv1D, self).__init__( rank=1, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activations.get(activation), use_bias=use_bias, depthwise_initializer=initializers.get(depthwise_initializer), pointwise_initializer=initializers.get(pointwise_initializer), bias_initializer=initializers.get(bias_initializer), depthwise_regularizer=regularizers.get(depthwise_regularizer), pointwise_regularizer=regularizers.get(pointwise_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), depthwise_constraint=constraints.get(depthwise_constraint), pointwise_constraint=constraints.get(pointwise_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) def call(self, inputs): if self.padding == 'causal': inputs = array_ops.pad(inputs, self._compute_causal_padding()) if self.data_format == 'channels_last': strides = (1,) + self.strides * 2 + (1,) spatial_start_dim = 1 else: strides = (1, 1) + self.strides * 2 spatial_start_dim = 2 # Explicitly broadcast inputs and kernels to 4D. # TODO(fchollet): refactor when a native separable_conv1d op is available. inputs = array_ops.expand_dims(inputs, spatial_start_dim) depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0) pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0) dilation_rate = (1,) + self.dilation_rate if self.padding == 'causal': op_padding = 'valid' else: op_padding = self.padding outputs = nn.separable_conv2d( inputs, depthwise_kernel, pointwise_kernel, strides=strides, padding=op_padding.upper(), rate=dilation_rate, data_format=conv_utils.convert_data_format(self.data_format, ndim=4)) if self.use_bias: outputs = nn.bias_add( outputs, self.bias, data_format=conv_utils.convert_data_format(self.data_format, ndim=4)) outputs = array_ops.squeeze(outputs, [spatial_start_dim]) if self.activation is not None: return self.activation(outputs) return outputs @keras_export('keras.layers.SeparableConv2D', 'keras.layers.SeparableConvolution2D') class SeparableConv2D(SeparableConv): """Depthwise separable 2D convolution. Separable convolutions consist in first performing a depthwise spatial convolution (which acts on each input channel separately) followed by a pointwise convolution which mixes together the resulting output channels. The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. Intuitively, separable convolutions can be understood as a way to factorize a convolution kernel into two smaller kernels, or as an extreme version of an Inception block. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: Initializer for the depthwise kernel matrix. pointwise_initializer: Initializer for the pointwise kernel matrix. bias_initializer: Initializer for the bias vector. depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix. pointwise_regularizer: Regularizer function applied to the pointwise kernel matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. depthwise_constraint: Constraint function applied to the depthwise kernel matrix. pointwise_constraint: Constraint function applied to the pointwise kernel matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: 4D tensor with shape: `(batch, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(batch, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, **kwargs): super(SeparableConv2D, self).__init__( rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activations.get(activation), use_bias=use_bias, depthwise_initializer=initializers.get(depthwise_initializer), pointwise_initializer=initializers.get(pointwise_initializer), bias_initializer=initializers.get(bias_initializer), depthwise_regularizer=regularizers.get(depthwise_regularizer), pointwise_regularizer=regularizers.get(pointwise_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), depthwise_constraint=constraints.get(depthwise_constraint), pointwise_constraint=constraints.get(pointwise_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) def call(self, inputs): # Apply the actual ops. if self.data_format == 'channels_last': strides = (1,) + self.strides + (1,) else: strides = (1, 1) + self.strides outputs = nn.separable_conv2d( inputs, self.depthwise_kernel, self.pointwise_kernel, strides=strides, padding=self.padding.upper(), rate=self.dilation_rate, data_format=conv_utils.convert_data_format(self.data_format, ndim=4)) if self.use_bias: outputs = nn.bias_add( outputs, self.bias, data_format=conv_utils.convert_data_format(self.data_format, ndim=4)) if self.activation is not None: return self.activation(outputs) return outputs @keras_export('keras.layers.DepthwiseConv2D') class DepthwiseConv2D(Conv2D): """Depthwise separable 2D convolution. Depthwise Separable convolutions consists in performing just the first step in a depthwise spatial convolution (which acts on each input channel separately). The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. Arguments: kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `'valid'` or `'same'` (case-insensitive). depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be 'channels_last'. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. 'linear' activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: Initializer for the depthwise kernel matrix. bias_initializer: Initializer for the bias vector. depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its 'activation'). depthwise_constraint: Constraint function applied to the depthwise kernel matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: 4D tensor with shape: `[batch, channels, rows, cols]` if data_format='channels_first' or 4D tensor with shape: `[batch, rows, cols, channels]` if data_format='channels_last'. Output shape: 4D tensor with shape: `[batch, filters, new_rows, new_cols]` if data_format='channels_first' or 4D tensor with shape: `[batch, new_rows, new_cols, filters]` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. """ def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer) def build(self, input_shape): if len(input_shape) < 4: raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. ' 'Received input shape:', str(input_shape)) input_shape = tensor_shape.TensorShape(input_shape) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = 3 if input_shape.dims[channel_axis].value is None: raise ValueError('The channel dimension of the inputs to ' '`DepthwiseConv2D` ' 'should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1], input_dim, self.depth_multiplier) self.depthwise_kernel = self.add_weight( shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, name='depthwise_kernel', regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint) if self.use_bias: self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim}) self.built = True def call(self, inputs): outputs = backend.depthwise_conv2d( inputs, self.depthwise_kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format) if self.use_bias: outputs = backend.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] out_filters = input_shape[1] * self.depth_multiplier elif self.data_format == 'channels_last': rows = input_shape[1] cols = input_shape[2] out_filters = input_shape[3] * self.depth_multiplier rows = conv_utils.conv_output_length(rows, self.kernel_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.kernel_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return (input_shape[0], out_filters, rows, cols) elif self.data_format == 'channels_last': return (input_shape[0], rows, cols, out_filters) def get_config(self): config = super(DepthwiseConv2D, self).get_config() config.pop('filters') config.pop('kernel_initializer') config.pop('kernel_regularizer') config.pop('kernel_constraint') config['depth_multiplier'] = self.depth_multiplier config['depthwise_initializer'] = initializers.serialize( self.depthwise_initializer) config['depthwise_regularizer'] = regularizers.serialize( self.depthwise_regularizer) config['depthwise_constraint'] = constraints.serialize( self.depthwise_constraint) return config @keras_export('keras.layers.UpSampling1D') class UpSampling1D(Layer): """Upsampling layer for 1D inputs. Repeats each temporal step `size` times along the time axis. Arguments: size: Integer. Upsampling factor. Input shape: 3D tensor with shape: `(batch, steps, features)`. Output shape: 3D tensor with shape: `(batch, upsampled_steps, features)`. """ def __init__(self, size=2, **kwargs): super(UpSampling1D, self).__init__(**kwargs) self.size = int(size) self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() size = self.size * input_shape[1] if input_shape[1] is not None else None return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]]) def call(self, inputs): output = backend.repeat_elements(inputs, self.size, axis=1) return output def get_config(self): config = {'size': self.size} base_config = super(UpSampling1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.UpSampling2D') class UpSampling2D(Layer): """Upsampling layer for 2D inputs. Repeats the rows and columns of the data by `size[0]` and `size[1]` respectively. Arguments: size: Int, or tuple of 2 integers. The upsampling factors for rows and columns. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". interpolation: A string, one of `nearest` or `bilinear`. Input shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, upsampled_rows, upsampled_cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, upsampled_rows, upsampled_cols)` """ def __init__(self, size=(2, 2), data_format=None, interpolation='nearest', **kwargs): super(UpSampling2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size') if interpolation not in {'nearest', 'bilinear'}: raise ValueError('`interpolation` argument should be one of `"nearest"` ' 'or `"bilinear"`.') self.interpolation = interpolation self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': height = self.size[0] * input_shape[ 2] if input_shape[2] is not None else None width = self.size[1] * input_shape[ 3] if input_shape[3] is not None else None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], height, width]) else: height = self.size[0] * input_shape[ 1] if input_shape[1] is not None else None width = self.size[1] * input_shape[ 2] if input_shape[2] is not None else None return tensor_shape.TensorShape( [input_shape[0], height, width, input_shape[3]]) def call(self, inputs): return backend.resize_images( inputs, self.size[0], self.size[1], self.data_format, interpolation=self.interpolation) def get_config(self): config = { 'size': self.size, 'data_format': self.data_format, 'interpolation': self.interpolation } base_config = super(UpSampling2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.UpSampling3D') class UpSampling3D(Layer): """Upsampling layer for 3D inputs. Repeats the 1st, 2nd and 3rd dimensions of the data by `size[0]`, `size[1]` and `size[2]` respectively. Arguments: size: Int, or tuple of 3 integers. The upsampling factors for dim1, dim2 and dim3. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, dim1, dim2, dim3, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, dim1, dim2, dim3)` Output shape: 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)` """ def __init__(self, size=(2, 2, 2), data_format=None, **kwargs): self.data_format = conv_utils.normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 3, 'size') self.input_spec = InputSpec(ndim=5) super(UpSampling3D, self).__init__(**kwargs) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': dim1 = self.size[0] * input_shape[ 2] if input_shape[2] is not None else None dim2 = self.size[1] * input_shape[ 3] if input_shape[3] is not None else None dim3 = self.size[2] * input_shape[ 4] if input_shape[4] is not None else None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], dim1, dim2, dim3]) else: dim1 = self.size[0] * input_shape[ 1] if input_shape[1] is not None else None dim2 = self.size[1] * input_shape[ 2] if input_shape[2] is not None else None dim3 = self.size[2] * input_shape[ 3] if input_shape[3] is not None else None return tensor_shape.TensorShape( [input_shape[0], dim1, dim2, dim3, input_shape[4]]) def call(self, inputs): return backend.resize_volumes( inputs, self.size[0], self.size[1], self.size[2], self.data_format) def get_config(self): config = {'size': self.size, 'data_format': self.data_format} base_config = super(UpSampling3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.ZeroPadding1D') class ZeroPadding1D(Layer): """Zero-padding layer for 1D input (e.g. temporal sequence). Arguments: padding: Int, or tuple of int (length 2), or dictionary. - If int: How many zeros to add at the beginning and end of the padding dimension (axis 1). - If tuple of int (length 2): How many zeros to add at the beginning and at the end of the padding dimension (`(left_pad, right_pad)`). Input shape: 3D tensor with shape `(batch, axis_to_pad, features)` Output shape: 3D tensor with shape `(batch, padded_axis, features)` """ def __init__(self, padding=1, **kwargs): super(ZeroPadding1D, self).__init__(**kwargs) self.padding = conv_utils.normalize_tuple(padding, 2, 'padding') self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): if input_shape[1] is not None: length = input_shape[1] + self.padding[0] + self.padding[1] else: length = None return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]]) def call(self, inputs): return backend.temporal_padding(inputs, padding=self.padding) def get_config(self): config = {'padding': self.padding} base_config = super(ZeroPadding1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.ZeroPadding2D') class ZeroPadding2D(Layer): """Zero-padding layer for 2D input (e.g. picture). This layer can add rows and columns of zeros at the top, bottom, left and right side of an image tensor. Arguments: padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric padding values for height and width: `(symmetric_height_pad, symmetric_width_pad)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_pad, bottom_pad), (left_pad, right_pad))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, padded_rows, padded_cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, padded_rows, padded_cols)` """ def __init__(self, padding=(1, 1), data_format=None, **kwargs): super(ZeroPadding2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding)) elif hasattr(padding, '__len__'): if len(padding) != 2: raise ValueError('`padding` should have two elements. ' 'Found: ' + str(padding)) height_padding = conv_utils.normalize_tuple(padding[0], 2, '1st entry of padding') width_padding = conv_utils.normalize_tuple(padding[1], 2, '2nd entry of padding') self.padding = (height_padding, width_padding) else: raise ValueError('`padding` should be either an int, ' 'a tuple of 2 ints ' '(symmetric_height_pad, symmetric_width_pad), ' 'or a tuple of 2 tuples of 2 ints ' '((top_pad, bottom_pad), (left_pad, right_pad)). ' 'Found: ' + str(padding)) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': if input_shape[2] is not None: rows = input_shape[2] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[3] is not None: cols = input_shape[3] + self.padding[1][0] + self.padding[1][1] else: cols = None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], rows, cols]) elif self.data_format == 'channels_last': if input_shape[1] is not None: rows = input_shape[1] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[2] is not None: cols = input_shape[2] + self.padding[1][0] + self.padding[1][1] else: cols = None return tensor_shape.TensorShape( [input_shape[0], rows, cols, input_shape[3]]) def call(self, inputs): return backend.spatial_2d_padding( inputs, padding=self.padding, data_format=self.data_format) def get_config(self): config = {'padding': self.padding, 'data_format': self.data_format} base_config = super(ZeroPadding2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.ZeroPadding3D') class ZeroPadding3D(Layer): """Zero-padding layer for 3D data (spatial or spatio-temporal). Arguments: padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. - If tuple of 3 ints: interpreted as two different symmetric padding values for height and width: `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`. - If tuple of 3 tuples of 2 ints: interpreted as `((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad, depth)` - If `data_format` is `"channels_first"`: `(batch, depth, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad)` Output shape: 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, first_padded_axis, second_padded_axis, third_axis_to_pad, depth)` - If `data_format` is `"channels_first"`: `(batch, depth, first_padded_axis, second_padded_axis, third_axis_to_pad)` """ def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs): super(ZeroPadding3D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding), (padding, padding)) elif hasattr(padding, '__len__'): if len(padding) != 3: raise ValueError('`padding` should have 3 elements. ' 'Found: ' + str(padding)) dim1_padding = conv_utils.normalize_tuple(padding[0], 2, '1st entry of padding') dim2_padding = conv_utils.normalize_tuple(padding[1], 2, '2nd entry of padding') dim3_padding = conv_utils.normalize_tuple(padding[2], 2, '3rd entry of padding') self.padding = (dim1_padding, dim2_padding, dim3_padding) else: raise ValueError( '`padding` should be either an int, ' 'a tuple of 3 ints ' '(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), ' 'or a tuple of 3 tuples of 2 ints ' '((left_dim1_pad, right_dim1_pad),' ' (left_dim2_pad, right_dim2_pad),' ' (left_dim3_pad, right_dim2_pad)). ' 'Found: ' + str(padding)) self.input_spec = InputSpec(ndim=5) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': if input_shape[2] is not None: dim1 = input_shape[2] + 2 * self.padding[0][0] else: dim1 = None if input_shape[3] is not None: dim2 = input_shape[3] + 2 * self.padding[1][0] else: dim2 = None if input_shape[4] is not None: dim3 = input_shape[4] + 2 * self.padding[2][0] else: dim3 = None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], dim1, dim2, dim3]) elif self.data_format == 'channels_last': if input_shape[1] is not None: dim1 = input_shape[1] + 2 * self.padding[0][1] else: dim1 = None if input_shape[2] is not None: dim2 = input_shape[2] + 2 * self.padding[1][1] else: dim2 = None if input_shape[3] is not None: dim3 = input_shape[3] + 2 * self.padding[2][1] else: dim3 = None return tensor_shape.TensorShape( [input_shape[0], dim1, dim2, dim3, input_shape[4]]) def call(self, inputs): return backend.spatial_3d_padding( inputs, padding=self.padding, data_format=self.data_format) def get_config(self): config = {'padding': self.padding, 'data_format': self.data_format} base_config = super(ZeroPadding3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Cropping1D') class Cropping1D(Layer): """Cropping layer for 1D input (e.g. temporal sequence). It crops along the time dimension (axis 1). Arguments: cropping: Int or tuple of int (length 2) How many units should be trimmed off at the beginning and end of the cropping dimension (axis 1). If a single int is provided, the same value will be used for both. Input shape: 3D tensor with shape `(batch, axis_to_crop, features)` Output shape: 3D tensor with shape `(batch, cropped_axis, features)` """ def __init__(self, cropping=(1, 1), **kwargs): super(Cropping1D, self).__init__(**kwargs) self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping') self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if input_shape[1] is not None: length = input_shape[1] - self.cropping[0] - self.cropping[1] else: length = None return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]]) def call(self, inputs): if self.cropping[1] == 0: return inputs[:, self.cropping[0]:, :] else: return inputs[:, self.cropping[0]:-self.cropping[1], :] def get_config(self): config = {'cropping': self.cropping} base_config = super(Cropping1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Cropping2D') class Cropping2D(Layer): """Cropping layer for 2D input (e.g. picture). It crops along spatial dimensions, i.e. height and width. Arguments: cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric cropping is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric cropping values for height and width: `(symmetric_height_crop, symmetric_width_crop)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_crop, bottom_crop), (left_crop, right_crop))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, cropped_rows, cropped_cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, cropped_rows, cropped_cols)` Examples: ```python # Crop the input 2D images or feature maps model = Sequential() model.add(Cropping2D(cropping=((2, 2), (4, 4)), input_shape=(28, 28, 3))) # now model.output_shape == (None, 24, 20, 3) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Cropping2D(cropping=((2, 2), (2, 2)))) # now model.output_shape == (None, 20, 16. 64) ``` """ def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs): super(Cropping2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(cropping, int): self.cropping = ((cropping, cropping), (cropping, cropping)) elif hasattr(cropping, '__len__'): if len(cropping) != 2: raise ValueError('`cropping` should have two elements. ' 'Found: ' + str(cropping)) height_cropping = conv_utils.normalize_tuple(cropping[0], 2, '1st entry of cropping') width_cropping = conv_utils.normalize_tuple(cropping[1], 2, '2nd entry of cropping') self.cropping = (height_cropping, width_cropping) else: raise ValueError('`cropping` should be either an int, ' 'a tuple of 2 ints ' '(symmetric_height_crop, symmetric_width_crop), ' 'or a tuple of 2 tuples of 2 ints ' '((top_crop, bottom_crop), (left_crop, right_crop)). ' 'Found: ' + str(cropping)) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() # pylint: disable=invalid-unary-operand-type if self.data_format == 'channels_first': return tensor_shape.TensorShape([ input_shape[0], input_shape[1], input_shape[2] - self.cropping[0][0] - self.cropping[0][1] if input_shape[2] else None, input_shape[3] - self.cropping[1][0] - self.cropping[1][1] if input_shape[3] else None ]) else: return tensor_shape.TensorShape([ input_shape[0], input_shape[1] - self.cropping[0][0] - self.cropping[0][1] if input_shape[1] else None, input_shape[2] - self.cropping[1][0] - self.cropping[1][1] if input_shape[2] else None, input_shape[3] ]) # pylint: enable=invalid-unary-operand-type def call(self, inputs): # pylint: disable=invalid-unary-operand-type if self.data_format == 'channels_first': if self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:] elif self.cropping[0][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1]] elif self.cropping[1][1] == 0: return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:] return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1]] else: if self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :] elif self.cropping[0][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1], :] elif self.cropping[1][1] == 0: return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:, :] return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[ 1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type # pylint: enable=invalid-unary-operand-type def get_config(self): config = {'cropping': self.cropping, 'data_format': self.data_format} base_config = super(Cropping2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Cropping3D') class Cropping3D(Layer): """Cropping layer for 3D data (e.g. spatial or spatio-temporal). Arguments: cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. - If int: the same symmetric cropping is applied to depth, height, and width. - If tuple of 3 ints: interpreted as two different symmetric cropping values for depth, height, and width: `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`. - If tuple of 3 tuples of 2 ints: interpreted as `((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim3_crop))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop, depth)` - If `data_format` is `"channels_first"`: `(batch, depth, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop)` Output shape: 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis, depth)` - If `data_format` is `"channels_first"`: `(batch, depth, first_cropped_axis, second_cropped_axis, third_cropped_axis)` """ def __init__(self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs): super(Cropping3D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(cropping, int): self.cropping = ((cropping, cropping), (cropping, cropping), (cropping, cropping)) elif hasattr(cropping, '__len__'): if len(cropping) != 3: raise ValueError('`cropping` should have 3 elements. ' 'Found: ' + str(cropping)) dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2, '1st entry of cropping') dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2, '2nd entry of cropping') dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2, '3rd entry of cropping') self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping) else: raise ValueError( '`cropping` should be either an int, ' 'a tuple of 3 ints ' '(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), ' 'or a tuple of 3 tuples of 2 ints ' '((left_dim1_crop, right_dim1_crop),' ' (left_dim2_crop, right_dim2_crop),' ' (left_dim3_crop, right_dim2_crop)). ' 'Found: ' + str(cropping)) self.input_spec = InputSpec(ndim=5) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() # pylint: disable=invalid-unary-operand-type if self.data_format == 'channels_first': if input_shape[2] is not None: dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1] else: dim1 = None if input_shape[3] is not None: dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1] else: dim2 = None if input_shape[4] is not None: dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1] else: dim3 = None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], dim1, dim2, dim3]) elif self.data_format == 'channels_last': if input_shape[1] is not None: dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1] else: dim1 = None if input_shape[2] is not None: dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1] else: dim2 = None if input_shape[3] is not None: dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1] else: dim3 = None return tensor_shape.TensorShape( [input_shape[0], dim1, dim2, dim3, input_shape[4]]) # pylint: enable=invalid-unary-operand-type def call(self, inputs): # pylint: disable=invalid-unary-operand-type if self.data_format == 'channels_first': if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:] elif self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]] elif self.cropping[1][1] == self.cropping[2][1] == 0: return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:] elif self.cropping[0][1] == self.cropping[2][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]:] elif self.cropping[0][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][ 0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]] elif self.cropping[1][1] == 0: return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self. cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]] elif self.cropping[2][1] == 0: return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self. cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:] return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][ 0]:-self.cropping[2][1]] else: if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:, :] elif self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1], :] elif self.cropping[1][1] == self.cropping[2][1] == 0: return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:, :] elif self.cropping[0][1] == self.cropping[2][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]:, :] elif self.cropping[0][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][ 0]:-self.cropping[1][1], self.cropping[2][0]: -self.cropping[2][1], :] elif self.cropping[1][1] == 0: return inputs[:, self.cropping[0][ 0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]: -self.cropping[2][1], :] elif self.cropping[2][1] == 0: return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1], self.cropping[ 2][0]:, :] return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[ 1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type -self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type # pylint: enable=invalid-unary-operand-type def get_config(self): config = {'cropping': self.cropping, 'data_format': self.data_format} base_config = super(Cropping3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Aliases Convolution1D = Conv1D Convolution2D = Conv2D Convolution3D = Conv3D SeparableConvolution1D = SeparableConv1D SeparableConvolution2D = SeparableConv2D Convolution2DTranspose = Conv2DTranspose Convolution3DTranspose = Conv3DTranspose Deconvolution2D = Deconv2D = Conv2DTranspose Deconvolution3D = Deconv3D = Conv3DTranspose
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/convolutional.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for embedding layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.eager import backprop from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test from tensorflow.python.training import adagrad class EmbeddingTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_embedding(self): if tf_test_util.is_gpu_available(): self.skipTest('Only test embedding on CPU.') testing_utils.layer_test( keras.layers.Embedding, kwargs={'output_dim': 4, 'input_dim': 10, 'input_length': 2}, input_shape=(3, 2), input_dtype='int32', expected_output_dtype='float32') testing_utils.layer_test( keras.layers.Embedding, kwargs={'output_dim': 4, 'input_dim': 10, 'mask_zero': True}, input_shape=(3, 2), input_dtype='int32', expected_output_dtype='float32') testing_utils.layer_test( keras.layers.Embedding, kwargs={'output_dim': 4, 'input_dim': 10, 'mask_zero': True}, input_shape=(3, 4, 2), input_dtype='int32', expected_output_dtype='float32') testing_utils.layer_test( keras.layers.Embedding, kwargs={'output_dim': 4, 'input_dim': 10, 'mask_zero': True, 'input_length': (None, 2)}, input_shape=(3, 4, 2), input_dtype='int32', expected_output_dtype='float32') @keras_parameterized.run_all_keras_modes def test_embedding_correctness(self): layer = keras.layers.Embedding(output_dim=2, input_dim=2) model = keras.models.Sequential([layer]) layer.set_weights([np.array([[1, 1], [2, 2]])]) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() outputs = model.predict(np.array([[0, 1, 0]], dtype='int32')) self.assertAllClose(outputs, [[[1, 1], [2, 2], [1, 1]]]) @tf_test_util.run_in_graph_and_eager_modes def test_eager_gpu_cpu(self): l = keras.layers.Embedding(output_dim=2, input_dim=2) l.build((None, 2)) inputs = keras.backend.constant([[0, 1, 0]], dtype='int32') with backprop.GradientTape() as tape: output = l(inputs) gs = tape.gradient(output, l.weights) opt = adagrad.AdagradOptimizer(0.1) opt.apply_gradients(zip(gs, l.weights)) self.assertAllEqual(len(gs), 1) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/embeddings_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for normalization layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import wrap_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers import normalization from tensorflow.python.keras.layers import normalization_v2 from tensorflow.python.keras.mixed_precision.experimental import policy from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2 from tensorflow.python.ops import array_ops from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent class BatchNormalizationTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_basic_batchnorm(self): testing_utils.layer_test( keras.layers.BatchNormalization, kwargs={ 'momentum': 0.9, 'epsilon': 0.1, 'gamma_regularizer': keras.regularizers.l2(0.01), 'beta_regularizer': keras.regularizers.l2(0.01) }, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.BatchNormalization, kwargs={ 'gamma_initializer': 'ones', 'beta_initializer': 'ones', 'moving_mean_initializer': 'zeros', 'moving_variance_initializer': 'ones' }, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.BatchNormalization, kwargs={'scale': False, 'center': False}, input_shape=(3, 3)) testing_utils.layer_test( keras.layers.BatchNormalization, kwargs={ 'gamma_initializer': 'ones', 'beta_initializer': 'ones', 'moving_mean_initializer': 'zeros', 'moving_variance_initializer': 'ones' }, input_shape=(3, 2, 4, 2)) @tf_test_util.run_in_graph_and_eager_modes def test_batchnorm_weights(self): layer = keras.layers.BatchNormalization(scale=False, center=False) layer.build((None, 3, 4)) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.weights), 2) layer = keras.layers.BatchNormalization() layer.build((None, 3, 4)) self.assertEqual(len(layer.trainable_weights), 2) self.assertEqual(len(layer.weights), 4) @tf_test_util.run_in_graph_and_eager_modes def test_batchnorm_regularization(self): layer = keras.layers.BatchNormalization( gamma_regularizer='l1', beta_regularizer='l1') layer.build((None, 3, 4)) self.assertEqual(len(layer.losses), 2) max_norm = keras.constraints.max_norm layer = keras.layers.BatchNormalization( gamma_constraint=max_norm, beta_constraint=max_norm) layer.build((None, 3, 4)) self.assertEqual(layer.gamma.constraint, max_norm) self.assertEqual(layer.beta.constraint, max_norm) @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet(self): if test.is_gpu_available(cuda_only=True): with self.session(use_gpu=True): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=1, input_shape=(3, 4, 4), momentum=0.8) model.add(norm) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1)) np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet_channel_last(self): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=-1, input_shape=(4, 4, 3), momentum=0.8) model.add(norm) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3)) np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet_channel_last_3d_fused(self): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=-1, input_shape=(4, 4, 4, 3), momentum=0.8, fused=True) model.add(norm) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 4, 3)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 1, 3)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 1, 3)) np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2, 3)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 1, 2, 3)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet_channel_first_3d_fused(self): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=1, input_shape=(3, 4, 4, 4), momentum=0.8, fused=True) model.add(norm) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4, 4)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1, 1)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1, 1)) np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3, 4)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 2, 3, 4)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet_channel_last_3d_fused_correctness(self): model = keras.models.Sequential() norm = keras.layers.BatchNormalization(axis=-1, momentum=0.8, fused=True) model.add(norm) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) # Sequential values ensure the result is axis-dependent. x = np.arange(5 * 5 * 5 * 5 * 3).reshape([5, 5, 5, 5, 3]) x = x.astype(np.float32) model.fit(x, x, epochs=1000, verbose=0) moving_mean = keras.backend.eval(norm.moving_mean) moving_variance = keras.backend.eval(norm.moving_variance) x_mean = x.mean(axis=(0, 1, 2, 3)) moving_mean_target = x_mean moving_variance_target = x.var(axis=(0, 1, 2, 3)) np.testing.assert_allclose( moving_mean, moving_mean_target, rtol=1e-5) np.testing.assert_allclose( moving_variance, moving_variance_target, rtol=1e-2) beta = np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 1, 3)) gamma = np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 1, 3)) out = (model.predict(x) - beta) / gamma np.testing.assert_allclose( np.mean(out, axis=(0, 1, 2, 3)), 0.0, atol=1e-2) np.testing.assert_allclose(np.std(out, axis=(0, 1, 2, 3)), 1.0, atol=1e-2) # Test with changed input shape. y = np.arange(7 * 7 * 7 * 7 * 3).reshape([7, 7, 7, 7, 3]) y = y.astype(np.float32) out = (model.predict(y) - beta) / gamma x_std = x.std(axis=(0, 1, 2, 3)) out_mean_target = (y.mean(axis=(0, 1, 2, 3)) - x_mean) / x_std out_std_target = y.std(axis=(0, 1, 2, 3)) / x_std np.testing.assert_allclose( np.mean(out, axis=(0, 1, 2, 3)), out_mean_target, atol=1e-2) np.testing.assert_allclose( np.std(out, axis=(0, 1, 2, 3)), out_std_target, atol=1e-2) @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet_channel_first_3d_fused_correctness(self): model = keras.models.Sequential() norm = keras.layers.BatchNormalization(axis=1, momentum=0.8, fused=True) model.add(norm) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) # Sequential values ensure the result is axis-dependent. x = np.arange(5 * 3 * 5 * 5 * 5).reshape([5, 3, 5, 5, 5]) x = x.astype(np.float32) model.fit(x, x, epochs=1000, verbose=0) moving_mean = keras.backend.eval(norm.moving_mean) moving_variance = keras.backend.eval(norm.moving_variance) x_mean = x.mean(axis=(0, 2, 3, 4)) moving_mean_target = x_mean moving_variance_target = x.var(axis=(0, 2, 3, 4)) np.testing.assert_allclose( moving_mean, moving_mean_target, rtol=1e-5) np.testing.assert_allclose( moving_variance, moving_variance_target, rtol=1e-2) beta = np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1, 1)) gamma = np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1, 1)) out = (model.predict(x) - beta) / gamma np.testing.assert_allclose( np.mean(out, axis=(0, 2, 3, 4)), 0.0, atol=1e-2) np.testing.assert_allclose(np.std(out, axis=(0, 2, 3, 4)), 1.0, atol=1e-2) # Test with changed input shape. y = np.arange(7 * 3 * 7 * 7 * 7).reshape([7, 3, 7, 7, 7]) y = y.astype(np.float32) out = (model.predict(y) - beta) / gamma x_std = x.std(axis=(0, 2, 3, 4)) out_mean_target = (y.mean(axis=(0, 2, 3, 4)) - x_mean) / x_std out_std_target = y.std(axis=(0, 2, 3, 4)) / x_std np.testing.assert_allclose( np.mean(out, axis=(0, 2, 3, 4)), out_mean_target, atol=1e-2) np.testing.assert_allclose( np.std(out, axis=(0, 2, 3, 4)), out_std_target, atol=1e-2) @keras_parameterized.run_all_keras_modes def test_batchnorm_correctness(self): _run_batchnorm_correctness_test( normalization.BatchNormalization, dtype='float32') _run_batchnorm_correctness_test( normalization_v2.BatchNormalization, dtype='float32') @keras_parameterized.run_all_keras_modes def test_batchnorm_mixed_precision(self): _run_batchnorm_correctness_test( normalization.BatchNormalization, dtype='float16') _run_batchnorm_correctness_test( normalization_v2.BatchNormalization, dtype='float16') @tf_test_util.run_in_graph_and_eager_modes def test_batchnorm_policy(self): norm = keras.layers.BatchNormalization( axis=-1, input_shape=(4, 4, 3), momentum=0.8, dtype=policy.Policy('infer_float32_vars')) x = np.random.normal(size=(10, 4, 4, 3)).astype('float16') y = norm(x) self.assertEqual(y.dtype, 'float16') self.assertEqual(norm.beta.dtype.base_dtype, 'float32') self.assertEqual(norm.gamma.dtype.base_dtype, 'float32') @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_batchnorm_non_trainable_with_fit(self): inputs = keras.Input((3,)) bn = normalization_v2.BatchNormalization() outputs = bn(inputs) model = keras.Model(inputs, outputs) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(np.random.random((100, 3)), np.random.random((100, 3))) test_data = np.random.random((10, 3)) test_targets = np.random.random((10, 3)) test_loss = model.evaluate(test_data, test_targets) bn.trainable = False model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) train_loss = model.train_on_batch(test_data, test_targets) self.assertAlmostEqual(test_loss, train_loss) @tf_test_util.run_in_graph_and_eager_modes def test_batchnorm_non_trainable_with_tf_function(self): inputs = keras.Input((3,)) bn = normalization_v2.BatchNormalization() outputs = bn(inputs) model = keras.Model(inputs, outputs) loss_fn = keras.losses.MeanSquaredError() optimizer = rmsprop_v2.RMSprop() @def_function.function() def train_step(x, y): with backprop.GradientTape() as tape: y_pred = model(x, training=True) loss = loss_fn(y, y_pred) grads = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights)) return loss @def_function.function() def test_step(x, y): y_pred = model(x, training=False) loss = loss_fn(y, y_pred) return loss train_step(np.random.random((100, 3)).astype(np.float32), np.random.random((100, 3)).astype(np.float32)) test_data = np.random.random((10, 3)).astype(np.float32) test_targets = np.random.random((10, 3)).astype(np.float32) test_loss = test_step(test_data, test_targets) bn.trainable = False train_loss = train_step(test_data, test_targets) if context.executing_eagerly(): self.assertAlmostEqual(test_loss.numpy(), train_loss.numpy()) def test_eager_batchnorm_in_custom_model_call_with_tf_function(self): class MyModel(keras.Model): def __init__(self): super(MyModel, self).__init__() self.bn = keras.layers.BatchNormalization() @def_function.function() def call(self, x, training): return self.bn(x, training=training) with context.eager_mode(): model = MyModel() for _ in range(10): x = constant_op.constant(0.5, shape=[2, 1]) model(x, training=True) # Make sure the moving mean and variance have been updated self.assertAllClose(model.bn.moving_mean.numpy(), [0.047], atol=3e-3) self.assertAllClose(model.bn.moving_variance.numpy(), [0.9], atol=3e-2) class BatchNormalizationV1Test(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_v1_fused_attribute(self): norm = normalization.BatchNormalization() inp = keras.layers.Input((4, 4, 4)) norm(inp) self.assertEqual(norm.fused, True) norm = normalization.BatchNormalization(fused=False) self.assertEqual(norm.fused, False) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, False) norm = normalization.BatchNormalization(virtual_batch_size=2) self.assertEqual(norm.fused, True) inp = keras.layers.Input(shape=(2, 2, 2)) norm(inp) self.assertEqual(norm.fused, False) class BatchNormalizationV2Test(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_basic_batchnorm_v2(self): testing_utils.layer_test( normalization_v2.BatchNormalization, kwargs={'fused': True}, input_shape=(3, 3, 3, 3)) testing_utils.layer_test( normalization_v2.BatchNormalization, kwargs={'fused': None}, input_shape=(3, 3, 3)) testing_utils.layer_test( normalization_v2.BatchNormalization, kwargs={'fused': True}, input_shape=(3, 3, 3, 3, 3)) testing_utils.layer_test( normalization_v2.BatchNormalization, kwargs={'fused': True}, input_shape=(3, 3)) @tf_test_util.run_in_graph_and_eager_modes def test_v2_fused_attribute(self): norm = normalization_v2.BatchNormalization() self.assertEqual(norm.fused, True) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, True) norm = normalization_v2.BatchNormalization() self.assertEqual(norm.fused, True) inp = keras.layers.Input(shape=(4, 4)) norm(inp) self.assertEqual(norm.fused, True) norm = normalization_v2.BatchNormalization(virtual_batch_size=2) self.assertEqual(norm.fused, False) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, False) norm = normalization_v2.BatchNormalization(fused=False) self.assertEqual(norm.fused, False) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, False) norm = normalization_v2.BatchNormalization(fused=True, axis=[3]) self.assertEqual(norm.fused, True) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, True) with self.assertRaisesRegexp(ValueError, 'fused.*renorm'): normalization_v2.BatchNormalization(fused=True, renorm=True) with self.assertRaisesRegexp(ValueError, 'fused.*over a single axis'): normalization_v2.BatchNormalization(fused=True, axis=[1, 3]) with self.assertRaisesRegexp(ValueError, 'fused.*virtual_batch_size'): normalization_v2.BatchNormalization(fused=True, virtual_batch_size=2) with self.assertRaisesRegexp(ValueError, 'fused.*adjustment'): normalization_v2.BatchNormalization(fused=True, adjustment=lambda _: (1, 0)) def test_updates_in_wrap_function(self): with context.eager_mode(): layer = keras.layers.BatchNormalization() def my_func(): x = array_ops.ones((10, 1)) return layer(x, training=True) wrapped_fn = wrap_function.wrap_function(my_func, []) wrapped_fn() # Updates should be tracked in a `wrap_function`. self.assertLen(layer.updates, 2) def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False): model = keras.models.Sequential() model.add(keras.Input(shape=(2, 2, 2), dtype=dtype)) norm = layer(momentum=0.8, fused=fused) model.add(norm) if dtype == 'float16': # Keras models require float32 losses. model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32'))) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # centered on 5.0, variance 10.0 x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2)) .astype(dtype)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= keras.backend.eval(norm.beta) out /= keras.backend.eval(norm.gamma) np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1) np.testing.assert_allclose(out.std(), 1.0, atol=1e-1) @parameterized.parameters( [normalization.BatchNormalization, normalization_v2.BatchNormalization]) class NormalizationLayersGraphModeOnlyTest( test.TestCase, parameterized.TestCase): def test_unknown_shape_batchnorm(self, layer): """Test that a BN layer supports unknown input shapes in graph mode.""" with self.cached_session(): def run_model(input_shape): bn = layer() x = keras.layers.Input(shape=input_shape) y = bn(x) model = keras.models.Model(x, y) model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') known_shape = [2] + [5 if dim is None else dim for dim in input_shape] val_a = np.random.random(known_shape) _ = model.predict(val_a) run_model((None, 10)) run_model((None, None, 10)) run_model((None, None, None, 10)) def test_shared_batchnorm(self, layer): """Test that a BN layer can be shared across different data streams.""" with self.cached_session(): # Test single layer reuse bn = layer() x1 = keras.layers.Input(shape=(10,)) _ = bn(x1) x2 = keras.layers.Input(shape=(10,)) y2 = bn(x2) x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10)) model = keras.models.Model(x2, y2) model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') model.train_on_batch(x, x) self.assertLen(bn.updates, 4) self.assertLen(bn.get_updates_for(x1), 2) self.assertLen(model.get_updates_for(x2), 2) # Test model-level reuse x3 = keras.layers.Input(shape=(10,)) y3 = model(x3) new_model = keras.models.Model(x3, y3, name='new_model') self.assertLen(new_model.updates, 6) self.assertLen(model.updates, 6) self.assertLen(new_model.get_updates_for(x3), 2) new_model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') new_model.train_on_batch(x, x) def test_that_trainable_disables_updates(self, layer): with self.cached_session(): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) a = keras.layers.Input(shape=(4,)) layer = layer(input_shape=(4,)) b = layer(a) model = keras.models.Model(a, b) model.trainable = False assert not model.updates model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') assert not model.updates x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) model.trainable = True model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') assert model.updates model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) assert np.abs(np.sum(x1 - x2)) > 1e-5 layer.trainable = False model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') assert not model.updates x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) @tf_test_util.run_deprecated_v1 def test_batchnorm_trainable(self, layer): """Tests that batchnorm layer is trainable when learning phase is enabled. Computes mean and std for current inputs then applies batch normalization using them. Args: layer: Either V1 or V2 of BatchNormalization layer. """ # TODO(fchollet): enable in all execution modes when issue with # learning phase setting is resolved. with self.cached_session(): bn_mean = 0.5 bn_std = 10. val_a = np.expand_dims(np.arange(10.), axis=1) def get_model(bn_mean, bn_std): inp = keras.layers.Input(shape=(1,)) x = layer()(inp) model1 = keras.models.Model(inp, x) model1.set_weights([ np.array([1.]), np.array([0.]), np.array([bn_mean]), np.array([bn_std**2]) ]) return model1 # Simulates training-mode with trainable layer. # Should use mini-batch statistics. with keras.backend.learning_phase_scope(1): model = get_model(bn_mean, bn_std) model.compile(loss='mse', optimizer='rmsprop') out = model.predict(val_a) self.assertAllClose( (val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3) def _run_layernorm_correctness_test(layer, dtype='float32'): model = keras.models.Sequential() norm = layer(input_shape=(2, 2, 2)) model.add(norm) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # centered on 5.0, variance 10.0 x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2)) .astype(dtype)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= keras.backend.eval(norm.beta) out /= keras.backend.eval(norm.gamma) np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1) np.testing.assert_allclose(out.std(), 1.0, atol=1e-1) class LayerNormalizationTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_basic_layernorm(self): testing_utils.layer_test( keras.layers.LayerNormalization, kwargs={ 'gamma_regularizer': keras.regularizers.l2(0.01), 'beta_regularizer': keras.regularizers.l2(0.01) }, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.LayerNormalization, kwargs={ 'gamma_initializer': 'ones', 'beta_initializer': 'ones', }, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.LayerNormalization, kwargs={'scale': False, 'center': False}, input_shape=(3, 3)) @tf_test_util.run_in_graph_and_eager_modes def test_layernorm_weights(self): layer = keras.layers.LayerNormalization(scale=False, center=False) layer.build((None, 3, 4)) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.weights), 0) layer = keras.layers.LayerNormalization() layer.build((None, 3, 4)) self.assertEqual(len(layer.trainable_weights), 2) self.assertEqual(len(layer.weights), 2) @tf_test_util.run_in_graph_and_eager_modes def test_layernorm_regularization(self): layer = keras.layers.LayerNormalization( gamma_regularizer='l1', beta_regularizer='l1') layer.build((None, 3, 4)) self.assertEqual(len(layer.losses), 2) max_norm = keras.constraints.max_norm layer = keras.layers.LayerNormalization( gamma_constraint=max_norm, beta_constraint=max_norm) layer.build((None, 3, 4)) self.assertEqual(layer.gamma.constraint, max_norm) self.assertEqual(layer.beta.constraint, max_norm) @keras_parameterized.run_all_keras_modes def test_layernorm_convnet_channel_last(self): model = keras.models.Sequential() norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3)) model.add(norm) model.compile( loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3)) np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_layernorm_correctness(self): _run_layernorm_correctness_test( normalization.LayerNormalization, dtype='float32') @keras_parameterized.run_all_keras_modes def test_layernorm_mixed_precision(self): _run_layernorm_correctness_test( normalization.LayerNormalization, dtype='float16') @tf_test_util.run_in_graph_and_eager_modes def testIncorrectAxisType(self): with self.assertRaisesRegexp( ValueError, r'Expected an int or a list/tuple of ints'): _ = normalization.LayerNormalization(axis={'axis': -1}) @tf_test_util.run_in_graph_and_eager_modes def testInvalidAxis(self): with self.assertRaisesRegexp(ValueError, r'Invalid axis: 3'): layer_norm = normalization.LayerNormalization(axis=3) layer_norm.build(input_shape=(2, 2, 2)) @tf_test_util.run_in_graph_and_eager_modes def testDuplicateAxis(self): with self.assertRaisesRegexp(ValueError, r'Duplicate axis:'): layer_norm = normalization.LayerNormalization(axis=[-1, -1]) layer_norm.build(input_shape=(2, 2, 2)) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/normalization_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Recurrent layers and their base classes. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import activations from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import state_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.tracking import base as trackable from tensorflow.python.training.tracking import data_structures from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.StackedRNNCells') class StackedRNNCells(Layer): """Wrapper allowing a stack of RNN cells to behave as a single cell. Used to implement efficient stacked RNNs. Arguments: cells: List of RNN cell instances. Examples: ```python cells = [ keras.layers.LSTMCell(output_dim), keras.layers.LSTMCell(output_dim), keras.layers.LSTMCell(output_dim), ] inputs = keras.Input((timesteps, input_dim)) x = keras.layers.RNN(cells)(inputs) ``` """ def __init__(self, cells, **kwargs): for cell in cells: if not hasattr(cell, 'call'): raise ValueError('All cells must have a `call` method. ' 'received cells:', cells) if not hasattr(cell, 'state_size'): raise ValueError('All cells must have a ' '`state_size` attribute. ' 'received cells:', cells) self.cells = cells # reverse_state_order determines whether the state size will be in a reverse # order of the cells' state. User might want to set this to True to keep the # existing behavior. This is only useful when use RNN(return_state=True) # since the state will be returned as the same order of state_size. self.reverse_state_order = kwargs.pop('reverse_state_order', False) if self.reverse_state_order: logging.warning('reverse_state_order=True in StackedRNNCells will soon ' 'be deprecated. Please update the code to work with the ' 'natural order of states if you rely on the RNN states, ' 'eg RNN(return_state=True).') super(StackedRNNCells, self).__init__(**kwargs) @property def state_size(self): return tuple(c.state_size for c in (self.cells[::-1] if self.reverse_state_order else self.cells)) @property def output_size(self): if getattr(self.cells[-1], 'output_size', None) is not None: return self.cells[-1].output_size elif _is_multiple_state(self.cells[-1].state_size): return self.cells[-1].state_size[0] else: return self.cells[-1].state_size def get_initial_state(self, inputs=None, batch_size=None, dtype=None): initial_states = [] for cell in self.cells[::-1] if self.reverse_state_order else self.cells: get_initial_state_fn = getattr(cell, 'get_initial_state', None) if get_initial_state_fn: initial_states.append(get_initial_state_fn( inputs=inputs, batch_size=batch_size, dtype=dtype)) else: initial_states.append(_generate_zero_filled_state_for_cell( cell, inputs, batch_size, dtype)) return tuple(initial_states) def call(self, inputs, states, constants=None, **kwargs): # Recover per-cell states. state_size = (self.state_size[::-1] if self.reverse_state_order else self.state_size) nested_states = nest.pack_sequence_as(state_size, nest.flatten(states)) # Call the cells in order and store the returned states. new_nested_states = [] for cell, states in zip(self.cells, nested_states): states = states if nest.is_sequence(states) else [states] # TF cell does not wrap the state into list when there is only one state. is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None states = states[0] if len(states) == 1 and is_tf_rnn_cell else states if generic_utils.has_arg(cell.call, 'constants'): inputs, states = cell.call(inputs, states, constants=constants, **kwargs) else: inputs, states = cell.call(inputs, states, **kwargs) new_nested_states.append(states) return inputs, nest.pack_sequence_as(state_size, nest.flatten(new_nested_states)) @tf_utils.shape_type_conversion def build(self, input_shape): if isinstance(input_shape, list): input_shape = input_shape[0] for cell in self.cells: if isinstance(cell, Layer): if not cell.built: cell.build(input_shape) if getattr(cell, 'output_size', None) is not None: output_dim = cell.output_size elif _is_multiple_state(cell.state_size): output_dim = cell.state_size[0] else: output_dim = cell.state_size input_shape = tuple([input_shape[0]] + tensor_shape.as_shape(output_dim).as_list()) self.built = True def get_config(self): cells = [] for cell in self.cells: cells.append({ 'class_name': cell.__class__.__name__, 'config': cell.get_config() }) config = {'cells': cells} base_config = super(StackedRNNCells, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top cells = [] for cell_config in config.pop('cells'): cells.append( deserialize_layer(cell_config, custom_objects=custom_objects)) return cls(cells, **config) @keras_export('keras.layers.RNN') class RNN(Layer): """Base class for recurrent layers. Arguments: cell: A RNN cell instance or a list of RNN cell instances. A RNN cell is a class that has: - A `call(input_at_t, states_at_t)` method, returning `(output_at_t, states_at_t_plus_1)`. The call method of the cell can also take the optional argument `constants`, see section "Note on passing external constants" below. - A `state_size` attribute. This can be a single integer (single state) in which case it is the size of the recurrent state. This can also be a list/tuple of integers (one size per state). The `state_size` can also be TensorShape or tuple/list of TensorShape, to represent high dimension state. - A `output_size` attribute. This can be a single integer or a TensorShape, which represent the shape of the output. For backward compatible reason, if this attribute is not available for the cell, the value will be inferred by the first element of the `state_size`. - A `get_initial_state(inputs=None, batch_size=None, dtype=None)` method that creates a tensor meant to be fed to `call()` as the initial state, if the user didn't specify any initial state via other means. The returned initial state should have a shape of [batch_size, cell.state_size]. The cell might choose to create a tensor full of zeros, or full of other values based on the cell's implementation. `inputs` is the input tensor to the RNN layer, which should contain the batch size as its shape[0], and also dtype. Note that the shape[0] might be `None` during the graph construction. Either the `inputs` or the pair of `batch_size` and `dtype` are provided. `batch_size` is a scalar tensor that represents the batch size of the inputs. `dtype` is `tf.DType` that represents the dtype of the inputs. For backward compatible reason, if this method is not implemented by the cell, the RNN layer will create a zero filled tensor with the size of [batch_size, cell.state_size]. In the case that `cell` is a list of RNN cell instances, the cells will be stacked on top of each other in the RNN, resulting in an efficient stacked RNN. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. Call arguments: inputs: Input tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is for use with cells that use dropout. initial_state: List of initial state tensors to be passed to the first call of the cell. constants: List of constant tensors to be passed to the cell at each timestep. Input shape: N-D tensor with shape `(batch_size, timesteps, ...)` or `(timesteps, batch_size, ...)` when time_major is True. Output shape: - If `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each with shape `(batch_size, state_size)`, where `state_size` could be a high dimension tensor shape. - If `return_sequences`: N-D tensor with shape `(batch_size, timesteps, output_size)`, where `output_size` could be a high dimension tensor shape, or `(timesteps, batch_size, output_size)` when `time_major` is True. - Else, N-D tensor with shape `(batch_size, output_size)`, where `output_size` could be a high dimension tensor shape. Masking: This layer supports masking for input data with a variable number of timesteps. To introduce masks to your data, use an [Embedding](embeddings.md) layer with the `mask_zero` parameter set to `True`. Note on using statefulness in RNNs: You can set RNN layers to be 'stateful', which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches. To enable statefulness: - Specify `stateful=True` in the layer constructor. - Specify a fixed batch size for your model, by passing If sequential model: `batch_input_shape=(...)` to the first layer in your model. Else for functional model with 1 or more Input layers: `batch_shape=(...)` to all the first layers in your model. This is the expected shape of your inputs *including the batch size*. It should be a tuple of integers, e.g. `(32, 10, 100)`. - Specify `shuffle=False` when calling fit(). To reset the states of your model, call `.reset_states()` on either a specific layer, or on your entire model. Note on specifying the initial state of RNNs: You can specify the initial state of RNN layers symbolically by calling them with the keyword argument `initial_state`. The value of `initial_state` should be a tensor or list of tensors representing the initial state of the RNN layer. You can specify the initial state of RNN layers numerically by calling `reset_states` with the keyword argument `states`. The value of `states` should be a numpy array or list of numpy arrays representing the initial state of the RNN layer. Note on passing external constants to RNNs: You can pass "external" constants to the cell using the `constants` keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This requires that the `cell.call` method accepts the same keyword argument `constants`. Such constants can be used to condition the cell transformation on additional static inputs (not changing over time), a.k.a. an attention mechanism. Examples: ```python # First, let's define a RNN Cell, as a layer subclass. class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = K.dot(inputs, self.kernel) output = h + K.dot(prev_output, self.recurrent_kernel) return output, [output] # Let's use this cell in a RNN layer: cell = MinimalRNNCell(32) x = keras.Input((None, 5)) layer = RNN(cell) y = layer(x) # Here's how to use the cell to build a stacked RNN: cells = [MinimalRNNCell(32), MinimalRNNCell(64)] x = keras.Input((None, 5)) layer = RNN(cells) y = layer(x) ``` """ def __init__(self, cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, time_major=False, **kwargs): if isinstance(cell, (list, tuple)): cell = StackedRNNCells(cell) if not hasattr(cell, 'call'): raise ValueError('`cell` should have a `call` method. ' 'The RNN was passed:', cell) if not hasattr(cell, 'state_size'): raise ValueError('The RNN cell should have ' 'an attribute `state_size` ' '(tuple of integers, ' 'one integer per RNN state).') # If True, the output for masked timestep will be zeros, whereas in the # False case, output from previous timestep is returned for masked timestep. self.zero_output_for_mask = kwargs.pop('zero_output_for_mask', False) if 'input_shape' not in kwargs and ( 'input_dim' in kwargs or 'input_length' in kwargs): input_shape = (kwargs.pop('input_length', None), kwargs.pop('input_dim', None)) kwargs['input_shape'] = input_shape super(RNN, self).__init__(**kwargs) self.cell = cell self.return_sequences = return_sequences self.return_state = return_state self.go_backwards = go_backwards self.stateful = stateful self.unroll = unroll self.time_major = time_major self.supports_masking = True # The input shape is unknown yet, it could have nested tensor inputs, and # the input spec will be the list of specs for nested inputs, the structure # of the input_spec will be the same as the input. self.input_spec = None self.state_spec = None self._states = None self.constants_spec = None self._num_constants = 0 @property def states(self): if self._states is None: state = nest.map_structure(lambda _: None, self.cell.state_size) return state if nest.is_sequence(self.cell.state_size) else [state] return self._states @states.setter # Automatic tracking catches "self._states" which adds an extra weight and # breaks HDF5 checkpoints. @trackable.no_automatic_dependency_tracking def states(self, states): self._states = states def compute_output_shape(self, input_shape): if isinstance(input_shape, list): input_shape = input_shape[0] # Check whether the input shape contains any nested shapes. It could be # (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy # inputs. try: input_shape = tensor_shape.as_shape(input_shape) except (ValueError, TypeError): # A nested tensor input input_shape = nest.flatten(input_shape)[0] batch = input_shape[0] time_step = input_shape[1] if self.time_major: batch, time_step = time_step, batch if _is_multiple_state(self.cell.state_size): state_size = self.cell.state_size else: state_size = [self.cell.state_size] def _get_output_shape(flat_output_size): output_dim = tensor_shape.as_shape(flat_output_size).as_list() if self.return_sequences: if self.time_major: output_shape = tensor_shape.as_shape([time_step, batch] + output_dim) else: output_shape = tensor_shape.as_shape([batch, time_step] + output_dim) else: output_shape = tensor_shape.as_shape([batch] + output_dim) return output_shape if getattr(self.cell, 'output_size', None) is not None: # cell.output_size could be nested structure. output_shape = nest.flatten(nest.map_structure( _get_output_shape, self.cell.output_size)) output_shape = output_shape[0] if len(output_shape) == 1 else output_shape else: # Note that state_size[0] could be a tensor_shape or int. output_shape = _get_output_shape(state_size[0]) if self.return_state: def _get_state_shape(flat_state): state_shape = [batch] + tensor_shape.as_shape(flat_state).as_list() return tensor_shape.as_shape(state_shape) state_shape = nest.map_structure(_get_state_shape, state_size) return generic_utils.to_list(output_shape) + nest.flatten(state_shape) else: return output_shape def compute_mask(self, inputs, mask): # Time step masks must be the same for each input. # This is because the mask for an RNN is of size [batch, time_steps, 1], # and specifies which time steps should be skipped, and a time step # must be skipped for all inputs. # TODO(scottzhu): Should we accept multiple different masks? mask = nest.flatten(mask)[0] output_mask = mask if self.return_sequences else None if self.return_state: state_mask = [None for _ in self.states] return [output_mask] + state_mask else: return output_mask def build(self, input_shape): if isinstance(input_shape, list): input_shape = input_shape[0] # The input_shape here could be a nest structure. # do the tensor_shape to shapes here. The input could be single tensor, or a # nested structure of tensors. def get_input_spec(shape): if isinstance(shape, tensor_shape.TensorShape): input_spec_shape = shape.as_list() else: input_spec_shape = list(shape) batch_index, time_step_index = (1, 0) if self.time_major else (0, 1) if not self.stateful: input_spec_shape[batch_index] = None input_spec_shape[time_step_index] = None return InputSpec(shape=tuple(input_spec_shape)) def get_step_input_shape(shape): if isinstance(shape, tensor_shape.TensorShape): shape = tuple(shape.as_list()) # remove the timestep from the input_shape return shape[1:] if self.time_major else (shape[0],) + shape[2:] # Check whether the input shape contains any nested shapes. It could be # (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy # inputs. try: input_shape = tensor_shape.as_shape(input_shape) except (ValueError, TypeError): # A nested tensor input pass if not nest.is_sequence(input_shape): # This indicates the there is only one input. if self.input_spec is not None: self.input_spec[0] = get_input_spec(input_shape) else: self.input_spec = [get_input_spec(input_shape)] step_input_shape = get_step_input_shape(input_shape) else: if self.input_spec is not None: self.input_spec[0] = nest.map_structure(get_input_spec, input_shape) else: self.input_spec = generic_utils.to_list( nest.map_structure(get_input_spec, input_shape)) step_input_shape = nest.map_structure(get_step_input_shape, input_shape) # allow cell (if layer) to build before we set or validate state_spec if isinstance(self.cell, Layer): if not self.cell.built: self.cell.build(step_input_shape) # set or validate state_spec if _is_multiple_state(self.cell.state_size): state_size = list(self.cell.state_size) else: state_size = [self.cell.state_size] if self.state_spec is not None: # initial_state was passed in call, check compatibility self._validate_state_spec(state_size, self.state_spec) else: self.state_spec = [ InputSpec(shape=[None] + tensor_shape.as_shape(dim).as_list()) for dim in state_size ] if self.stateful: self.reset_states() self.built = True @staticmethod def _validate_state_spec(cell_state_sizes, init_state_specs): """Validate the state spec between the initial_state and the state_size. Args: cell_state_sizes: list, the `state_size` attribute from the cell. init_state_specs: list, the `state_spec` from the initial_state that is passed in `call()`. Raises: ValueError: When initial state spec is not compatible with the state size. """ validation_error = ValueError( 'An `initial_state` was passed that is not compatible with ' '`cell.state_size`. Received `state_spec`={}; ' 'however `cell.state_size` is ' '{}'.format(init_state_specs, cell_state_sizes)) flat_cell_state_size = nest.flatten(cell_state_sizes) flat_state_spec = nest.flatten(init_state_specs) if len(flat_cell_state_size) != len(flat_state_spec): raise validation_error for i in range(len(flat_cell_state_size)): if not tensor_shape.TensorShape( # Ignore the first axis for init_state which is for batch flat_state_spec[i].shape[1:]).is_compatible_with( tensor_shape.TensorShape(flat_cell_state_size[i])): raise validation_error def get_initial_state(self, inputs): get_initial_state_fn = getattr(self.cell, 'get_initial_state', None) if nest.is_sequence(inputs): # The input are nested sequences. Use the first element in the seq to get # batch size and dtype. inputs = nest.flatten(inputs)[0] input_shape = array_ops.shape(inputs) batch_size = input_shape[1] if self.time_major else input_shape[0] dtype = inputs.dtype if get_initial_state_fn: init_state = get_initial_state_fn( inputs=None, batch_size=batch_size, dtype=dtype) else: init_state = _generate_zero_filled_state(batch_size, self.cell.state_size, dtype) # Keras RNN expect the states in a list, even if it's a single state tensor. if not nest.is_sequence(init_state): init_state = [init_state] # Force the state to be a list in case it is a namedtuple eg LSTMStateTuple. return list(init_state) def __call__(self, inputs, initial_state=None, constants=None, **kwargs): inputs, initial_state, constants = _standardize_args(inputs, initial_state, constants, self._num_constants) if initial_state is None and constants is None: return super(RNN, self).__call__(inputs, **kwargs) # If any of `initial_state` or `constants` are specified and are Keras # tensors, then add them to the inputs and temporarily modify the # input_spec to include them. additional_inputs = [] additional_specs = [] if initial_state is not None: additional_inputs += initial_state self.state_spec = nest.map_structure( lambda s: InputSpec(shape=K.int_shape(s)), initial_state) additional_specs += self.state_spec if constants is not None: additional_inputs += constants self.constants_spec = [ InputSpec(shape=K.int_shape(constant)) for constant in constants ] self._num_constants = len(constants) additional_specs += self.constants_spec # at this point additional_inputs cannot be empty is_keras_tensor = K.is_keras_tensor(nest.flatten(additional_inputs)[0]) for tensor in nest.flatten(additional_inputs): if K.is_keras_tensor(tensor) != is_keras_tensor: raise ValueError('The initial state or constants of an RNN' ' layer cannot be specified with a mix of' ' Keras tensors and non-Keras tensors' ' (a "Keras tensor" is a tensor that was' ' returned by a Keras layer, or by `Input`)') if is_keras_tensor: # Compute the full input spec, including state and constants full_input = [inputs] + additional_inputs # The original input_spec is None since there could be a nested tensor # input. Update the input_spec to match the inputs. full_input_spec = generic_utils.to_list( nest.map_structure(lambda _: None, inputs)) + additional_specs # Perform the call with temporarily replaced input_spec self.input_spec = full_input_spec output = super(RNN, self).__call__(full_input, **kwargs) # Remove the additional_specs from input spec and keep the rest. It is # important to keep since the input spec was populated by build(), and # will be reused in the stateful=True. self.input_spec = self.input_spec[:-len(additional_specs)] return output else: if initial_state is not None: kwargs['initial_state'] = initial_state if constants is not None: kwargs['constants'] = constants return super(RNN, self).__call__(inputs, **kwargs) def call(self, inputs, mask=None, training=None, initial_state=None, constants=None): inputs, initial_state, constants = self._process_inputs( inputs, initial_state, constants) if mask is not None: # Time step masks must be the same for each input. # TODO(scottzhu): Should we accept multiple different masks? mask = nest.flatten(mask)[0] if nest.is_sequence(inputs): # In the case of nested input, use the first element for shape check. input_shape = K.int_shape(nest.flatten(inputs)[0]) else: input_shape = K.int_shape(inputs) timesteps = input_shape[0] if self.time_major else input_shape[1] if self.unroll and timesteps is None: raise ValueError('Cannot unroll a RNN if the ' 'time dimension is undefined. \n' '- If using a Sequential model, ' 'specify the time dimension by passing ' 'an `input_shape` or `batch_input_shape` ' 'argument to your first layer. If your ' 'first layer is an Embedding, you can ' 'also use the `input_length` argument.\n' '- If using the functional API, specify ' 'the time dimension by passing a `shape` ' 'or `batch_shape` argument to your Input layer.') kwargs = {} if generic_utils.has_arg(self.cell.call, 'training'): kwargs['training'] = training # TF RNN cells expect single tensor as state instead of list wrapped tensor. is_tf_rnn_cell = getattr(self.cell, '_is_tf_rnn_cell', None) is not None if constants: if not generic_utils.has_arg(self.cell.call, 'constants'): raise ValueError('RNN cell does not support constants') def step(inputs, states): constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type states = states[0] if len(states) == 1 and is_tf_rnn_cell else states output, new_states = self.cell.call( inputs, states, constants=constants, **kwargs) if not nest.is_sequence(new_states): new_states = [new_states] return output, new_states else: def step(inputs, states): states = states[0] if len(states) == 1 and is_tf_rnn_cell else states output, new_states = self.cell.call(inputs, states, **kwargs) if not nest.is_sequence(new_states): new_states = [new_states] return output, new_states # `input_length` is passed as the `maximum_iterations` arg to tf.while_loop. # We only specify that when building for XLA since that causes slowdowns # on GPU in TF. if (not context.executing_eagerly() and control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph())): input_length = timesteps else: input_length = None last_output, outputs, states = K.rnn( step, inputs, initial_state, constants=constants, go_backwards=self.go_backwards, mask=mask, unroll=self.unroll, input_length=input_length, time_major=self.time_major, zero_output_for_mask=self.zero_output_for_mask) if self.stateful: updates = [] for state_, state in zip(nest.flatten(self.states), nest.flatten(states)): updates.append(state_ops.assign(state_, state)) self.add_update(updates) if self.return_sequences: output = outputs else: output = last_output if self.return_state: if not isinstance(states, (list, tuple)): states = [states] else: states = list(states) return generic_utils.to_list(output) + states else: return output def _process_inputs(self, inputs, initial_state, constants): # input shape: `(samples, time (padded with zeros), input_dim)` # note that the .build() method of subclasses MUST define # self.input_spec and self.state_spec with complete input shapes. if (isinstance(inputs, collections.Sequence) and not isinstance(inputs, tuple)): # get initial_state from full input spec # as they could be copied to multiple GPU. if not self._num_constants: initial_state = inputs[1:] else: initial_state = inputs[1:-self._num_constants] constants = inputs[-self._num_constants:] if len(initial_state) == 0: initial_state = None inputs = inputs[0] if initial_state is not None: pass elif self.stateful: initial_state = self.states else: initial_state = self.get_initial_state(inputs) if len(initial_state) != len(self.states): raise ValueError('Layer has ' + str(len(self.states)) + ' states but was passed ' + str(len(initial_state)) + ' initial states.') return inputs, initial_state, constants def reset_states(self, states=None): if not self.stateful: raise AttributeError('Layer must be stateful.') spec_shape = None if self.input_spec is None else self.input_spec[0].shape if spec_shape is None: # It is possible to have spec shape to be None, eg when construct a RNN # with a custom cell, or standard RNN layers (LSTM/GRU) which we only know # it has 3 dim input, but not its full shape spec before build(). batch_size = None else: batch_size = spec_shape[1] if self.time_major else spec_shape[0] if not batch_size: raise ValueError('If a RNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: \n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer.\n' '- If using the functional API, specify ' 'the batch size by passing a ' '`batch_shape` argument to your Input layer.') # initialize state if None if nest.flatten(self.states)[0] is None: def create_state_variable(state): return K.zeros([batch_size] + tensor_shape.as_shape(state).as_list()) self.states = nest.map_structure( create_state_variable, self.cell.state_size) if not nest.is_sequence(self.states): self.states = [self.states] elif states is None: for state, size in zip(nest.flatten(self.states), nest.flatten(self.cell.state_size)): K.set_value(state, np.zeros([batch_size] + tensor_shape.as_shape(size).as_list())) else: flat_states = nest.flatten(self.states) flat_input_states = nest.flatten(states) if len(flat_input_states) != len(flat_states): raise ValueError('Layer ' + self.name + ' expects ' + str(len(flat_states)) + ' states, ' 'but it received ' + str(len(flat_input_states)) + ' state values. Input received: ' + str(states)) set_value_tuples = [] for i, (value, state) in enumerate(zip(flat_input_states, flat_states)): if value.shape != state.shape: raise ValueError( 'State ' + str(i) + ' is incompatible with layer ' + self.name + ': expected shape=' + str( (batch_size, state)) + ', found shape=' + str(value.shape)) set_value_tuples.append((state, value)) K.batch_set_value(set_value_tuples) def get_config(self): config = { 'return_sequences': self.return_sequences, 'return_state': self.return_state, 'go_backwards': self.go_backwards, 'stateful': self.stateful, 'unroll': self.unroll, 'time_major': self.time_major } if self._num_constants: config['num_constants'] = self._num_constants if self.zero_output_for_mask: config['zero_output_for_mask'] = self.zero_output_for_mask cell_config = self.cell.get_config() config['cell'] = { 'class_name': self.cell.__class__.__name__, 'config': cell_config } base_config = super(RNN, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects) num_constants = config.pop('num_constants', 0) layer = cls(cell, **config) layer._num_constants = num_constants return layer @keras_export('keras.layers.AbstractRNNCell') class AbstractRNNCell(Layer): """Abstract object representing an RNN cell. This is the base class for implementing RNN cells with custom behavior. Every `RNNCell` must have the properties below and implement `call` with the signature `(output, next_state) = call(input, state)`. Examples: ```python class MinimalRNNCell(AbstractRNNCell): def __init__(self, units, **kwargs): self.units = units super(MinimalRNNCell, self).__init__(**kwargs) @property def state_size(self): return self.units def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = K.dot(inputs, self.kernel) output = h + K.dot(prev_output, self.recurrent_kernel) return output, output ``` This definition of cell differs from the definition used in the literature. In the literature, 'cell' refers to an object with a single scalar output. This definition refers to a horizontal array of such units. An RNN cell, in the most abstract setting, is anything that has a state and performs some operation that takes a matrix of inputs. This operation results in an output matrix with `self.output_size` columns. If `self.state_size` is an integer, this operation also results in a new state matrix with `self.state_size` columns. If `self.state_size` is a (possibly nested tuple of) TensorShape object(s), then it should return a matching structure of Tensors having shape `[batch_size].concatenate(s)` for each `s` in `self.batch_size`. """ def call(self, inputs, states): """The function that contains the logic for one RNN step calculation. Args: inputs: the input tensor, which is a slide from the overall RNN input by the time dimension (usually the second dimension). states: the state tensor from previous step, which has the same shape as `(batch, state_size)`. In the case of timestep 0, it will be the initial state user specified, or zero filled tensor otherwise. Returns: A tuple of two tensors: 1. output tensor for the current timestep, with size `output_size`. 2. state tensor for next step, which has the shape of `state_size`. """ raise NotImplementedError('Abstract method') @property def state_size(self): """size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes. """ raise NotImplementedError('Abstract method') @property def output_size(self): """Integer or TensorShape: size of outputs produced by this cell.""" raise NotImplementedError('Abstract method') def get_initial_state(self, inputs=None, batch_size=None, dtype=None): return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype) class DropoutRNNCellMixin(object): """Object that hold dropout related fields for RNN Cell. This class is not a standalone RNN cell. It suppose to be used with a RNN cell by multiple inheritance. Any cell that mix with class should have following fields: dropout: a float number within range [0, 1). The ratio that the input tensor need to dropout. recurrent_dropout: a float number within range [0, 1). The ratio that the recurrent state weights need to dropout. This object will create and cache created dropout masks, and reuse them for the incoming data, so that the same mask is used for every batch input. """ def __init__(self, *args, **kwargs): # Note that the following two masks will be used in "graph function" mode, # e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask` # tensors will be generated differently than in the "graph function" case, # and they will be cached. # Also note that in graph mode, we still cache those masks only because the # RNN could be created with `unroll=True`. In that case, the `cell.call()` # function will be invoked multiple times, and we want to ensure same mask # is used every time. self._dropout_mask = None self._recurrent_dropout_mask = None self._eager_dropout_mask = None self._eager_recurrent_dropout_mask = None super(DropoutRNNCellMixin, self).__init__(*args, **kwargs) def reset_dropout_mask(self): """Reset the cached dropout masks if any. This is important for the RNN layer to invoke this in it call() method so that the cached mask is cleared before calling the cell.call(). The mask should be cached across the timestep within the same batch, but shouldn't be cached between batches. Otherwise it will introduce unreasonable bias against certain index of data within the batch. """ self._dropout_mask = None self._eager_dropout_mask = None def reset_recurrent_dropout_mask(self): """Reset the cached recurrent dropout masks if any. This is important for the RNN layer to invoke this in it call() method so that the cached mask is cleared before calling the cell.call(). The mask should be cached across the timestep within the same batch, but shouldn't be cached between batches. Otherwise it will introduce unreasonable bias against certain index of data within the batch. """ self._recurrent_dropout_mask = None self._eager_recurrent_dropout_mask = None def get_dropout_mask_for_cell(self, inputs, training, count=1): """Get the dropout mask for RNN cell's input. It will create mask based on context if there isn't any existing cached mask. If a new mask is generated, it will update the cache in the cell. Args: inputs: the input tensor whose shape will be used to generate dropout mask. training: boolean tensor, whether its in training mode, dropout will be ignored in non-training mode. count: int, how many dropout mask will be generated. It is useful for cell that has internal weights fused together. Returns: List of mask tensor, generated or cached mask based on context. """ if self.dropout == 0: return None if (not context.executing_eagerly() and self._dropout_mask is None or context.executing_eagerly() and self._eager_dropout_mask is None): # Generate new mask and cache it based on context. dp_mask = _generate_dropout_mask( array_ops.ones_like(inputs), self.dropout, training=training, count=count) if context.executing_eagerly(): self._eager_dropout_mask = dp_mask else: self._dropout_mask = dp_mask else: # Reuse the existing mask. dp_mask = (self._eager_dropout_mask if context.executing_eagerly() else self._dropout_mask) return dp_mask def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1): """Get the recurrent dropout mask for RNN cell. It will create mask based on context if there isn't any existing cached mask. If a new mask is generated, it will update the cache in the cell. Args: inputs: the input tensor whose shape will be used to generate dropout mask. training: boolean tensor, whether its in training mode, dropout will be ignored in non-training mode. count: int, how many dropout mask will be generated. It is useful for cell that has internal weights fused together. Returns: List of mask tensor, generated or cached mask based on context. """ if self.recurrent_dropout == 0: return None if (not context.executing_eagerly() and self._recurrent_dropout_mask is None or context.executing_eagerly() and self._eager_recurrent_dropout_mask is None): # Generate new mask and cache it based on context. rec_dp_mask = _generate_dropout_mask( array_ops.ones_like(inputs), self.recurrent_dropout, training=training, count=count) if context.executing_eagerly(): self._eager_recurrent_dropout_mask = rec_dp_mask else: self._recurrent_dropout_mask = rec_dp_mask else: # Reuse the existing mask. rec_dp_mask = (self._eager_recurrent_dropout_mask if context.executing_eagerly() else self._recurrent_dropout_mask) return rec_dp_mask @keras_export('keras.layers.SimpleRNNCell') class SimpleRNNCell(DropoutRNNCellMixin, Layer): """Cell class for SimpleRNN. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Call arguments: inputs: A 2D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. """ def __init__(self, units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., **kwargs): super(SimpleRNNCell, self).__init__(**kwargs) self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.state_size = self.units self.output_size = self.units @tf_utils.shape_type_conversion def build(self, input_shape): self.kernel = self.add_weight( shape=(input_shape[-1], self.units), name='kernel', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), name='recurrent_kernel', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) if self.use_bias: self.bias = self.add_weight( shape=(self.units,), name='bias', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None self.built = True def call(self, inputs, states, training=None): prev_output = states[0] dp_mask = self.get_dropout_mask_for_cell(inputs, training) rec_dp_mask = self.get_recurrent_dropout_mask_for_cell( prev_output, training) if dp_mask is not None: h = K.dot(inputs * dp_mask, self.kernel) else: h = K.dot(inputs, self.kernel) if self.bias is not None: h = K.bias_add(h, self.bias) if rec_dp_mask is not None: prev_output = prev_output * rec_dp_mask output = h + K.dot(prev_output, self.recurrent_kernel) if self.activation is not None: output = self.activation(output) return output, [output] def get_initial_state(self, inputs=None, batch_size=None, dtype=None): return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype) def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout } base_config = super(SimpleRNNCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.SimpleRNN') class SimpleRNN(RNN): """Fully-connected RNN where the output is to be fed back to input. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass None, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used. initial_state: List of initial state tensors to be passed to the first call of the cell. """ def __init__(self, units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, **kwargs): if 'implementation' in kwargs: kwargs.pop('implementation') logging.warning('The `implementation` argument ' 'in `SimpleRNN` has been deprecated. ' 'Please remove it from your layer call.') cell = SimpleRNNCell( units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, dtype=kwargs.get('dtype')) super(SimpleRNN, self).__init__( cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, **kwargs) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = [InputSpec(ndim=3)] def call(self, inputs, mask=None, training=None, initial_state=None): self.cell.reset_dropout_mask() self.cell.reset_recurrent_dropout_mask() return super(SimpleRNN, self).call( inputs, mask=mask, training=training, initial_state=initial_state) @property def units(self): return self.cell.units @property def activation(self): return self.cell.activation @property def use_bias(self): return self.cell.use_bias @property def kernel_initializer(self): return self.cell.kernel_initializer @property def recurrent_initializer(self): return self.cell.recurrent_initializer @property def bias_initializer(self): return self.cell.bias_initializer @property def kernel_regularizer(self): return self.cell.kernel_regularizer @property def recurrent_regularizer(self): return self.cell.recurrent_regularizer @property def bias_regularizer(self): return self.cell.bias_regularizer @property def kernel_constraint(self): return self.cell.kernel_constraint @property def recurrent_constraint(self): return self.cell.recurrent_constraint @property def bias_constraint(self): return self.cell.bias_constraint @property def dropout(self): return self.cell.dropout @property def recurrent_dropout(self): return self.cell.recurrent_dropout def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout } base_config = super(SimpleRNN, self).get_config() del base_config['cell'] return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): if 'implementation' in config: config.pop('implementation') return cls(**config) @keras_export(v1=['keras.layers.GRUCell']) class GRUCell(DropoutRNNCellMixin, Layer): """Cell class for the GRU layer. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass None, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: hard sigmoid (`hard_sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = "before" (default), True = "after" (CuDNN compatible). Call arguments: inputs: A 2D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. """ def __init__(self, units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=1, reset_after=False, **kwargs): super(GRUCell, self).__init__(**kwargs) self.units = units self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.implementation = implementation self.reset_after = reset_after self.state_size = self.units self.output_size = self.units @tf_utils.shape_type_conversion def build(self, input_shape): input_dim = input_shape[-1] self.kernel = self.add_weight( shape=(input_dim, self.units * 3), name='kernel', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units * 3), name='recurrent_kernel', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) if self.use_bias: if not self.reset_after: bias_shape = (3 * self.units,) else: # separate biases for input and recurrent kernels # Note: the shape is intentionally different from CuDNNGRU biases # `(2 * 3 * self.units,)`, so that we can distinguish the classes # when loading and converting saved weights. bias_shape = (2, 3 * self.units) self.bias = self.add_weight(shape=bias_shape, name='bias', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None self.built = True def call(self, inputs, states, training=None): h_tm1 = states[0] # previous memory dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=3) rec_dp_mask = self.get_recurrent_dropout_mask_for_cell( h_tm1, training, count=3) if self.use_bias: if not self.reset_after: input_bias, recurrent_bias = self.bias, None else: input_bias, recurrent_bias = array_ops.unstack(self.bias) if self.implementation == 1: if 0. < self.dropout < 1.: inputs_z = inputs * dp_mask[0] inputs_r = inputs * dp_mask[1] inputs_h = inputs * dp_mask[2] else: inputs_z = inputs inputs_r = inputs inputs_h = inputs x_z = K.dot(inputs_z, self.kernel[:, :self.units]) x_r = K.dot(inputs_r, self.kernel[:, self.units:self.units * 2]) x_h = K.dot(inputs_h, self.kernel[:, self.units * 2:]) if self.use_bias: x_z = K.bias_add(x_z, input_bias[:self.units]) x_r = K.bias_add(x_r, input_bias[self.units: self.units * 2]) x_h = K.bias_add(x_h, input_bias[self.units * 2:]) if 0. < self.recurrent_dropout < 1.: h_tm1_z = h_tm1 * rec_dp_mask[0] h_tm1_r = h_tm1 * rec_dp_mask[1] h_tm1_h = h_tm1 * rec_dp_mask[2] else: h_tm1_z = h_tm1 h_tm1_r = h_tm1 h_tm1_h = h_tm1 recurrent_z = K.dot(h_tm1_z, self.recurrent_kernel[:, :self.units]) recurrent_r = K.dot(h_tm1_r, self.recurrent_kernel[:, self.units:self.units * 2]) if self.reset_after and self.use_bias: recurrent_z = K.bias_add(recurrent_z, recurrent_bias[:self.units]) recurrent_r = K.bias_add(recurrent_r, recurrent_bias[self.units:self.units * 2]) z = self.recurrent_activation(x_z + recurrent_z) r = self.recurrent_activation(x_r + recurrent_r) # reset gate applied after/before matrix multiplication if self.reset_after: recurrent_h = K.dot(h_tm1_h, self.recurrent_kernel[:, self.units * 2:]) if self.use_bias: recurrent_h = K.bias_add(recurrent_h, recurrent_bias[self.units * 2:]) recurrent_h = r * recurrent_h else: recurrent_h = K.dot(r * h_tm1_h, self.recurrent_kernel[:, self.units * 2:]) hh = self.activation(x_h + recurrent_h) else: if 0. < self.dropout < 1.: inputs = inputs * dp_mask[0] # inputs projected by all gate matrices at once matrix_x = K.dot(inputs, self.kernel) if self.use_bias: # biases: bias_z_i, bias_r_i, bias_h_i matrix_x = K.bias_add(matrix_x, input_bias) x_z = matrix_x[:, :self.units] x_r = matrix_x[:, self.units: 2 * self.units] x_h = matrix_x[:, 2 * self.units:] if 0. < self.recurrent_dropout < 1.: h_tm1 = h_tm1 * rec_dp_mask[0] if self.reset_after: # hidden state projected by all gate matrices at once matrix_inner = K.dot(h_tm1, self.recurrent_kernel) if self.use_bias: matrix_inner = K.bias_add(matrix_inner, recurrent_bias) else: # hidden state projected separately for update/reset and new matrix_inner = K.dot(h_tm1, self.recurrent_kernel[:, :2 * self.units]) recurrent_z = matrix_inner[:, :self.units] recurrent_r = matrix_inner[:, self.units:2 * self.units] z = self.recurrent_activation(x_z + recurrent_z) r = self.recurrent_activation(x_r + recurrent_r) if self.reset_after: recurrent_h = r * matrix_inner[:, 2 * self.units:] else: recurrent_h = K.dot(r * h_tm1, self.recurrent_kernel[:, 2 * self.units:]) hh = self.activation(x_h + recurrent_h) # previous and candidate state mixed by update gate h = z * h_tm1 + (1 - z) * hh return h, [h] def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'implementation': self.implementation, 'reset_after': self.reset_after } base_config = super(GRUCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) def get_initial_state(self, inputs=None, batch_size=None, dtype=None): return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype) @keras_export(v1=['keras.layers.GRU']) class GRU(RNN): """Gated Recurrent Unit - Cho et al. 2014. There are two variants. The default one is based on 1406.1078v3 and has reset gate applied to hidden state before matrix multiplication. The other one is based on original 1406.1078v1 and has the order reversed. The second variant is compatible with CuDNNGRU (GPU-only) and allows inference on CPU. Thus it has separate biases for `kernel` and `recurrent_kernel`. Use `'reset_after'=True` and `recurrent_activation='sigmoid'`. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: hard sigmoid (`hard_sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = "before" (default), True = "after" (CuDNN compatible). Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used. initial_state: List of initial state tensors to be passed to the first call of the cell. """ def __init__(self, units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=1, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, reset_after=False, **kwargs): if implementation == 0: logging.warning('`implementation=0` has been deprecated, ' 'and now defaults to `implementation=1`.' 'Please update your layer call.') cell = GRUCell( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, reset_after=reset_after, dtype=kwargs.get('dtype')) super(GRU, self).__init__( cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, **kwargs) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = [InputSpec(ndim=3)] def call(self, inputs, mask=None, training=None, initial_state=None): self.cell.reset_dropout_mask() self.cell.reset_recurrent_dropout_mask() return super(GRU, self).call( inputs, mask=mask, training=training, initial_state=initial_state) @property def units(self): return self.cell.units @property def activation(self): return self.cell.activation @property def recurrent_activation(self): return self.cell.recurrent_activation @property def use_bias(self): return self.cell.use_bias @property def kernel_initializer(self): return self.cell.kernel_initializer @property def recurrent_initializer(self): return self.cell.recurrent_initializer @property def bias_initializer(self): return self.cell.bias_initializer @property def kernel_regularizer(self): return self.cell.kernel_regularizer @property def recurrent_regularizer(self): return self.cell.recurrent_regularizer @property def bias_regularizer(self): return self.cell.bias_regularizer @property def kernel_constraint(self): return self.cell.kernel_constraint @property def recurrent_constraint(self): return self.cell.recurrent_constraint @property def bias_constraint(self): return self.cell.bias_constraint @property def dropout(self): return self.cell.dropout @property def recurrent_dropout(self): return self.cell.recurrent_dropout @property def implementation(self): return self.cell.implementation @property def reset_after(self): return self.cell.reset_after def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'implementation': self.implementation, 'reset_after': self.reset_after } base_config = super(GRU, self).get_config() del base_config['cell'] return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): if 'implementation' in config and config['implementation'] == 0: config['implementation'] = 1 return cls(**config) @keras_export(v1=['keras.layers.LSTMCell']) class LSTMCell(DropoutRNNCellMixin, Layer): """Cell class for the LSTM layer. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: hard sigmoid (`hard_sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Call arguments: inputs: A 2D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. """ def __init__(self, units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=1, **kwargs): super(LSTMCell, self).__init__(**kwargs) self.units = units self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.unit_forget_bias = unit_forget_bias self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.implementation = implementation # tuple(_ListWrapper) was silently dropping list content in at least 2.7.10, # and fixed after 2.7.16. Converting the state_size to wrapper around # NoDependency(), so that the base_layer.__setattr__ will not convert it to # ListWrapper. Down the stream, self.states will be a list since it is # generated from nest.map_structure with list, and tuple(list) will work # properly. self.state_size = data_structures.NoDependency([self.units, self.units]) self.output_size = self.units @tf_utils.shape_type_conversion def build(self, input_shape): input_dim = input_shape[-1] self.kernel = self.add_weight( shape=(input_dim, self.units * 4), name='kernel', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units * 4), name='recurrent_kernel', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) if self.use_bias: if self.unit_forget_bias: def bias_initializer(_, *args, **kwargs): return K.concatenate([ self.bias_initializer((self.units,), *args, **kwargs), initializers.Ones()((self.units,), *args, **kwargs), self.bias_initializer((self.units * 2,), *args, **kwargs), ]) else: bias_initializer = self.bias_initializer self.bias = self.add_weight( shape=(self.units * 4,), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None self.built = True def _compute_carry_and_output(self, x, h_tm1, c_tm1): """Computes carry and output using split kernels.""" x_i, x_f, x_c, x_o = x h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1 i = self.recurrent_activation( x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units])) f = self.recurrent_activation(x_f + K.dot( h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2])) c = f * c_tm1 + i * self.activation(x_c + K.dot( h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3])) o = self.recurrent_activation( x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:])) return c, o def _compute_carry_and_output_fused(self, z, c_tm1): """Computes carry and output using fused kernels.""" z0, z1, z2, z3 = z i = self.recurrent_activation(z0) f = self.recurrent_activation(z1) c = f * c_tm1 + i * self.activation(z2) o = self.recurrent_activation(z3) return c, o def call(self, inputs, states, training=None): h_tm1 = states[0] # previous memory state c_tm1 = states[1] # previous carry state dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4) rec_dp_mask = self.get_recurrent_dropout_mask_for_cell( h_tm1, training, count=4) if self.implementation == 1: if 0 < self.dropout < 1.: inputs_i = inputs * dp_mask[0] inputs_f = inputs * dp_mask[1] inputs_c = inputs * dp_mask[2] inputs_o = inputs * dp_mask[3] else: inputs_i = inputs inputs_f = inputs inputs_c = inputs inputs_o = inputs k_i, k_f, k_c, k_o = array_ops.split( self.kernel, num_or_size_splits=4, axis=1) x_i = K.dot(inputs_i, k_i) x_f = K.dot(inputs_f, k_f) x_c = K.dot(inputs_c, k_c) x_o = K.dot(inputs_o, k_o) if self.use_bias: b_i, b_f, b_c, b_o = array_ops.split( self.bias, num_or_size_splits=4, axis=0) x_i = K.bias_add(x_i, b_i) x_f = K.bias_add(x_f, b_f) x_c = K.bias_add(x_c, b_c) x_o = K.bias_add(x_o, b_o) if 0 < self.recurrent_dropout < 1.: h_tm1_i = h_tm1 * rec_dp_mask[0] h_tm1_f = h_tm1 * rec_dp_mask[1] h_tm1_c = h_tm1 * rec_dp_mask[2] h_tm1_o = h_tm1 * rec_dp_mask[3] else: h_tm1_i = h_tm1 h_tm1_f = h_tm1 h_tm1_c = h_tm1 h_tm1_o = h_tm1 x = (x_i, x_f, x_c, x_o) h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o) c, o = self._compute_carry_and_output(x, h_tm1, c_tm1) else: if 0. < self.dropout < 1.: inputs = inputs * dp_mask[0] z = K.dot(inputs, self.kernel) if 0. < self.recurrent_dropout < 1.: h_tm1 = h_tm1 * rec_dp_mask[0] z += K.dot(h_tm1, self.recurrent_kernel) if self.use_bias: z = K.bias_add(z, self.bias) z = array_ops.split(z, num_or_size_splits=4, axis=1) c, o = self._compute_carry_and_output_fused(z, c_tm1) h = o * self.activation(c) return h, [h, c] def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'implementation': self.implementation } base_config = super(LSTMCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) def get_initial_state(self, inputs=None, batch_size=None, dtype=None): return list(_generate_zero_filled_state_for_cell( self, inputs, batch_size, dtype)) @keras_export('keras.experimental.PeepholeLSTMCell') class PeepholeLSTMCell(LSTMCell): """Equivalent to LSTMCell class but adds peephole connections. Peephole connections allow the gates to utilize the previous internal state as well as the previous hidden state (which is what LSTMCell is limited to). This allows PeepholeLSTMCell to better learn precise timings over LSTMCell. From [Gers et al.](http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf): "We find that LSTM augmented by 'peephole connections' from its internal cells to its multiplicative gates can learn the fine distinction between sequences of spikes spaced either 50 or 49 time steps apart without the help of any short training exemplars." The peephole implementation is based on: [Long short-term memory recurrent neural network architectures for large scale acoustic modeling. ](https://research.google.com/pubs/archive/43905.pdf) Example: ```python # Create 2 PeepholeLSTMCells peephole_lstm_cells = [PeepholeLSTMCell(size) for size in [128, 256]] # Create a layer composed sequentially of the peephole LSTM cells. layer = RNN(peephole_lstm_cells) input = keras.Input((timesteps, input_dim)) output = layer(input) ``` """ def build(self, input_shape): super(PeepholeLSTMCell, self).build(input_shape) # The following are the weight matrices for the peephole connections. These # are multiplied with the previous internal state during the computation of # carry and output. self.input_gate_peephole_weights = self.add_weight( shape=(self.units,), name='input_gate_peephole_weights', initializer=self.kernel_initializer) self.forget_gate_peephole_weights = self.add_weight( shape=(self.units,), name='forget_gate_peephole_weights', initializer=self.kernel_initializer) self.output_gate_peephole_weights = self.add_weight( shape=(self.units,), name='output_gate_peephole_weights', initializer=self.kernel_initializer) def _compute_carry_and_output(self, x, h_tm1, c_tm1): x_i, x_f, x_c, x_o = x h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1 i = self.recurrent_activation( x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]) + self.input_gate_peephole_weights * c_tm1) f = self.recurrent_activation(x_f + K.dot( h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]) + self.forget_gate_peephole_weights * c_tm1) c = f * c_tm1 + i * self.activation(x_c + K.dot( h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3])) o = self.recurrent_activation( x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]) + self.output_gate_peephole_weights * c) return c, o def _compute_carry_and_output_fused(self, z, c_tm1): z0, z1, z2, z3 = z i = self.recurrent_activation(z0 + self.input_gate_peephole_weights * c_tm1) f = self.recurrent_activation(z1 + self.forget_gate_peephole_weights * c_tm1) c = f * c_tm1 + i * self.activation(z2) o = self.recurrent_activation(z3 + self.output_gate_peephole_weights * c) return c, o @keras_export(v1=['keras.layers.LSTM']) class LSTM(RNN): """Long Short-Term Memory layer - Hochreiter 1997. Note that this cell is not optimized for performance on GPU. Please use `tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: hard sigmoid (`hard_sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs.. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used. initial_state: List of initial state tensors to be passed to the first call of the cell. """ def __init__(self, units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=1, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, **kwargs): if implementation == 0: logging.warning('`implementation=0` has been deprecated, ' 'and now defaults to `implementation=1`.' 'Please update your layer call.') cell = LSTMCell( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, unit_forget_bias=unit_forget_bias, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, dtype=kwargs.get('dtype')) super(LSTM, self).__init__( cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, **kwargs) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = [InputSpec(ndim=3)] def call(self, inputs, mask=None, training=None, initial_state=None): self.cell.reset_dropout_mask() self.cell.reset_recurrent_dropout_mask() return super(LSTM, self).call( inputs, mask=mask, training=training, initial_state=initial_state) @property def units(self): return self.cell.units @property def activation(self): return self.cell.activation @property def recurrent_activation(self): return self.cell.recurrent_activation @property def use_bias(self): return self.cell.use_bias @property def kernel_initializer(self): return self.cell.kernel_initializer @property def recurrent_initializer(self): return self.cell.recurrent_initializer @property def bias_initializer(self): return self.cell.bias_initializer @property def unit_forget_bias(self): return self.cell.unit_forget_bias @property def kernel_regularizer(self): return self.cell.kernel_regularizer @property def recurrent_regularizer(self): return self.cell.recurrent_regularizer @property def bias_regularizer(self): return self.cell.bias_regularizer @property def kernel_constraint(self): return self.cell.kernel_constraint @property def recurrent_constraint(self): return self.cell.recurrent_constraint @property def bias_constraint(self): return self.cell.bias_constraint @property def dropout(self): return self.cell.dropout @property def recurrent_dropout(self): return self.cell.recurrent_dropout @property def implementation(self): return self.cell.implementation def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'implementation': self.implementation } base_config = super(LSTM, self).get_config() del base_config['cell'] return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): if 'implementation' in config and config['implementation'] == 0: config['implementation'] = 1 return cls(**config) def _generate_dropout_mask(ones, rate, training=None, count=1): def dropped_inputs(): return K.dropout(ones, rate) if count > 1: return [ K.in_train_phase(dropped_inputs, ones, training=training) for _ in range(count) ] return K.in_train_phase(dropped_inputs, ones, training=training) def _standardize_args(inputs, initial_state, constants, num_constants): """Standardizes `__call__` to a single list of tensor inputs. When running a model loaded from a file, the input tensors `initial_state` and `constants` can be passed to `RNN.__call__()` as part of `inputs` instead of by the dedicated keyword arguments. This method makes sure the arguments are separated and that `initial_state` and `constants` are lists of tensors (or None). Arguments: inputs: Tensor or list/tuple of tensors. which may include constants and initial states. In that case `num_constant` must be specified. initial_state: Tensor or list of tensors or None, initial states. constants: Tensor or list of tensors or None, constant tensors. num_constants: Expected number of constants (if constants are passed as part of the `inputs` list. Returns: inputs: Single tensor or tuple of tensors. initial_state: List of tensors or None. constants: List of tensors or None. """ if isinstance(inputs, list): # There are several situations here: # In the graph mode, __call__ will be only called once. The initial_state # and constants could be in inputs (from file loading). # In the eager mode, __call__ will be called twice, once during # rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be # model.fit/train_on_batch/predict with real np data. In the second case, # the inputs will contain initial_state and constants as eager tensor. # # For either case, the real input is the first item in the list, which # could be a nested structure itself. Then followed by initial_states, which # could be a list of items, or list of list if the initial_state is complex # structure, and finally followed by constants which is a flat list. assert initial_state is None and constants is None if num_constants: constants = inputs[-num_constants:] inputs = inputs[:-num_constants] if len(inputs) > 1: initial_state = inputs[1:] inputs = inputs[:1] if len(inputs) > 1: inputs = tuple(inputs) else: inputs = inputs[0] def to_list_or_none(x): if x is None or isinstance(x, list): return x if isinstance(x, tuple): return list(x) return [x] initial_state = to_list_or_none(initial_state) constants = to_list_or_none(constants) return inputs, initial_state, constants def _is_multiple_state(state_size): """Check whether the state_size contains multiple states.""" return (hasattr(state_size, '__len__') and not isinstance(state_size, tensor_shape.TensorShape)) def _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype): if inputs is not None: batch_size = array_ops.shape(inputs)[0] dtype = inputs.dtype return _generate_zero_filled_state(batch_size, cell.state_size, dtype) def _generate_zero_filled_state(batch_size_tensor, state_size, dtype): """Generate a zero filled tensor with shape [batch_size, state_size].""" if batch_size_tensor is None or dtype is None: raise ValueError( 'batch_size and dtype cannot be None while constructing initial state: ' 'batch_size={}, dtype={}'.format(batch_size_tensor, dtype)) def create_zeros(unnested_state_size): flat_dims = tensor_shape.as_shape(unnested_state_size).as_list() init_state_size = [batch_size_tensor] + flat_dims return array_ops.zeros(init_state_size, dtype=dtype) if nest.is_sequence(state_size): return nest.map_structure(create_zeros, state_size) else: return create_zeros(state_size)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/recurrent.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The V2 implementation of Normalization layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.layers import normalization from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.BatchNormalization', v1=[]) # pylint: disable=missing-docstring class BatchNormalization(normalization.BatchNormalizationBase): __doc__ = normalization.replace_in_base_docstring([ ('{{TRAINABLE_ATTRIBUTE_NOTE}}', ''' **About setting `layer.trainable = False` on a `BatchNormalization layer:** The meaning of setting `layer.trainable = False` is to freeze the layer, i.e. its internal state will not change during training: its trainable weights will not be updated during `fit()` or `train_on_batch()`, and its state updates will not be run. Usually, this does not necessarily mean that the layer is run in inference mode (which is normally controlled by the `training` argument that can be passed when calling a layer). "Frozen state" and "inference mode" are two separate concepts. However, in the case of the `BatchNormalization` layer, **setting `trainable = False` on the layer means that the layer will be subsequently run in inference mode** (meaning that it will use the moving mean and the moving variance to normalize the current batch, rather than using the mean and variance of the current batch). This behavior has been introduced in TensorFlow 2.0, in order to enable `layer.trainable = False` to produce the most commonly expected behavior in the convnet fine-tuning use case. Note that: - This behavior only occurs as of TensorFlow 2.0. In 1.*, setting `layer.trainable = False` would freeze the layer but would not switch it to inference mode. - Setting `trainable` on an model containing other layers will recursively set the `trainable` value of all inner layers. - If the value of the `trainable` attribute is changed after calling `compile()` on a model, the new value doesn't take effect for this model until `compile()` is called again. ''')]) _USE_V2_BEHAVIOR = True
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/normalization_v2.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Locally-connected layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras import activations from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils import conv_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.LocallyConnected1D') class LocallyConnected1D(Layer): """Locally-connected layer for 1D inputs. The `LocallyConnected1D` layer works similarly to the `Conv1D` layer, except that weights are unshared, that is, a different set of filters is applied at each different patch of the input. Example: ```python # apply a unshared weight convolution 1d of length 3 to a sequence with # 10 timesteps, with 64 output filters model = Sequential() model.add(LocallyConnected1D(64, 3, input_shape=(10, 32))) # now model.output_shape == (None, 8, 64) # add a new conv1d on top model.add(LocallyConnected1D(32, 3)) # now model.output_shape == (None, 6, 32) ``` Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: Currently only supports `"valid"` (case-insensitive). `"same"` may be supported in the future. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. implementation: implementation mode, either `1`, `2`, or `3`. `1` loops over input spatial locations to perform the forward pass. It is memory-efficient but performs a lot of (small) ops. `2` stores layer weights in a dense but sparsely-populated 2D matrix and implements the forward pass as a single matrix-multiply. It uses a lot of RAM but performs few (large) ops. `3` stores layer weights in a sparse tensor and implements the forward pass as a single sparse matrix-multiply. How to choose: `1`: large, dense models, `2`: small models, `3`: large, sparse models, where "large" stands for large input/output activations (i.e. many `filters`, `input_filters`, large `input_size`, `output_size`), and "sparse" stands for few connections between inputs and outputs, i.e. small ratio `filters * input_filters * kernel_size / (input_size * strides)`, where inputs to and outputs of the layer are assumed to have shapes `(input_size, input_filters)`, `(output_size, filters)` respectively. It is recommended to benchmark each in the setting of interest to pick the most efficient one (in terms of speed and memory usage). Correct choice of implementation can lead to dramatic speed improvements (e.g. 50X), potentially at the expense of RAM. Also, only `padding="valid"` is supported by `implementation=1`. Input shape: 3D tensor with shape: `(batch_size, steps, input_dim)` Output shape: 3D tensor with shape: `(batch_size, new_steps, filters)` `steps` value might have changed due to padding or strides. """ def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, implementation=1, **kwargs): super(LocallyConnected1D, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 1, 'strides') self.padding = conv_utils.normalize_padding(padding) if self.padding != 'valid' and implementation == 1: raise ValueError('Invalid border mode for LocallyConnected1D ' '(only "valid" is supported if implementation is 1): ' + padding) self.data_format = conv_utils.normalize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.implementation = implementation self.input_spec = InputSpec(ndim=3) @tf_utils.shape_type_conversion def build(self, input_shape): if self.data_format == 'channels_first': input_dim, input_length = input_shape[1], input_shape[2] else: input_dim, input_length = input_shape[2], input_shape[1] if input_dim is None: raise ValueError('Axis 2 of input should be fully-defined. ' 'Found shape:', input_shape) self.output_length = conv_utils.conv_output_length( input_length, self.kernel_size[0], self.padding, self.strides[0]) if self.implementation == 1: self.kernel_shape = (self.output_length, self.kernel_size[0] * input_dim, self.filters) self.kernel = self.add_weight( shape=self.kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) elif self.implementation == 2: if self.data_format == 'channels_first': self.kernel_shape = (input_dim, input_length, self.filters, self.output_length) else: self.kernel_shape = (input_length, input_dim, self.output_length, self.filters) self.kernel = self.add_weight(shape=self.kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.kernel_mask = get_locallyconnected_mask( input_shape=(input_length,), kernel_shape=self.kernel_size, strides=self.strides, padding=self.padding, data_format=self.data_format, ) elif self.implementation == 3: self.kernel_shape = (self.output_length * self.filters, input_length * input_dim) self.kernel_idxs = sorted( conv_utils.conv_kernel_idxs( input_shape=(input_length,), kernel_shape=self.kernel_size, strides=self.strides, padding=self.padding, filters_in=input_dim, filters_out=self.filters, data_format=self.data_format) ) self.kernel = self.add_weight( shape=(len(self.kernel_idxs),), initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) else: raise ValueError('Unrecognized implementation mode: %d.' % self.implementation) if self.use_bias: self.bias = self.add_weight( shape=(self.output_length, self.filters), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None if self.data_format == 'channels_first': self.input_spec = InputSpec(ndim=3, axes={1: input_dim}) else: self.input_spec = InputSpec(ndim=3, axes={-1: input_dim}) self.built = True @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': input_length = input_shape[2] else: input_length = input_shape[1] length = conv_utils.conv_output_length(input_length, self.kernel_size[0], self.padding, self.strides[0]) if self.data_format == 'channels_first': return (input_shape[0], self.filters, length) elif self.data_format == 'channels_last': return (input_shape[0], length, self.filters) def call(self, inputs): if self.implementation == 1: output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides, (self.output_length,), self.data_format) elif self.implementation == 2: output = local_conv_matmul(inputs, self.kernel, self.kernel_mask, self.compute_output_shape(inputs.shape)) elif self.implementation == 3: output = local_conv_sparse_matmul(inputs, self.kernel, self.kernel_idxs, self.kernel_shape, self.compute_output_shape(inputs.shape)) else: raise ValueError('Unrecognized implementation mode: %d.' % self.implementation) if self.use_bias: output = K.bias_add(output, self.bias, data_format=self.data_format) output = self.activation(output) return output def get_config(self): config = { 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'implementation': self.implementation } base_config = super(LocallyConnected1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.LocallyConnected2D') class LocallyConnected2D(Layer): """Locally-connected layer for 2D inputs. The `LocallyConnected2D` layer works similarly to the `Conv2D` layer, except that weights are unshared, that is, a different set of filters is applied at each different patch of the input. Examples: ```python # apply a 3x3 unshared weights convolution with 64 output filters on a 32x32 image # with `data_format="channels_last"`: model = Sequential() model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3))) # now model.output_shape == (None, 30, 30, 64) # notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64 parameters # add a 3x3 unshared weights convolution on top, with 32 output filters: model.add(LocallyConnected2D(32, (3, 3))) # now model.output_shape == (None, 28, 28, 32) ``` Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. padding: Currently only support `"valid"` (case-insensitive). `"same"` will be supported in future. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. implementation: implementation mode, either `1`, `2`, or `3`. `1` loops over input spatial locations to perform the forward pass. It is memory-efficient but performs a lot of (small) ops. `2` stores layer weights in a dense but sparsely-populated 2D matrix and implements the forward pass as a single matrix-multiply. It uses a lot of RAM but performs few (large) ops. `3` stores layer weights in a sparse tensor and implements the forward pass as a single sparse matrix-multiply. How to choose: `1`: large, dense models, `2`: small models, `3`: large, sparse models, where "large" stands for large input/output activations (i.e. many `filters`, `input_filters`, large `np.prod(input_size)`, `np.prod(output_size)`), and "sparse" stands for few connections between inputs and outputs, i.e. small ratio `filters * input_filters * np.prod(kernel_size) / (np.prod(input_size) * np.prod(strides))`, where inputs to and outputs of the layer are assumed to have shapes `input_size + (input_filters,)`, `output_size + (filters,)` respectively. It is recommended to benchmark each in the setting of interest to pick the most efficient one (in terms of speed and memory usage). Correct choice of implementation can lead to dramatic speed improvements (e.g. 50X), potentially at the expense of RAM. Also, only `padding="valid"` is supported by `implementation=1`. Input shape: 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, implementation=1, **kwargs): super(LocallyConnected2D, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) if self.padding != 'valid' and implementation == 1: raise ValueError('Invalid border mode for LocallyConnected2D ' '(only "valid" is supported if implementation is 1): ' + padding) self.data_format = conv_utils.normalize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.implementation = implementation self.input_spec = InputSpec(ndim=4) @tf_utils.shape_type_conversion def build(self, input_shape): if self.data_format == 'channels_last': input_row, input_col = input_shape[1:-1] input_filter = input_shape[3] else: input_row, input_col = input_shape[2:] input_filter = input_shape[1] if input_row is None or input_col is None: raise ValueError('The spatial dimensions of the inputs to ' ' a LocallyConnected2D layer ' 'should be fully-defined, but layer received ' 'the inputs shape ' + str(input_shape)) output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0], self.padding, self.strides[0]) output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1], self.padding, self.strides[1]) self.output_row = output_row self.output_col = output_col if self.implementation == 1: self.kernel_shape = ( output_row * output_col, self.kernel_size[0] * self.kernel_size[1] * input_filter, self.filters) self.kernel = self.add_weight( shape=self.kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) elif self.implementation == 2: if self.data_format == 'channels_first': self.kernel_shape = (input_filter, input_row, input_col, self.filters, self.output_row, self.output_col) else: self.kernel_shape = (input_row, input_col, input_filter, self.output_row, self.output_col, self.filters) self.kernel = self.add_weight(shape=self.kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.kernel_mask = get_locallyconnected_mask( input_shape=(input_row, input_col), kernel_shape=self.kernel_size, strides=self.strides, padding=self.padding, data_format=self.data_format, ) elif self.implementation == 3: self.kernel_shape = (self.output_row * self.output_col * self.filters, input_row * input_col * input_filter) self.kernel_idxs = sorted( conv_utils.conv_kernel_idxs( input_shape=(input_row, input_col), kernel_shape=self.kernel_size, strides=self.strides, padding=self.padding, filters_in=input_filter, filters_out=self.filters, data_format=self.data_format) ) self.kernel = self.add_weight( shape=(len(self.kernel_idxs),), initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) else: raise ValueError('Unrecognized implementation mode: %d.' % self.implementation) if self.use_bias: self.bias = self.add_weight( shape=(output_row, output_col, self.filters), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None if self.data_format == 'channels_first': self.input_spec = InputSpec(ndim=4, axes={1: input_filter}) else: self.input_spec = InputSpec(ndim=4, axes={-1: input_filter}) self.built = True @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] elif self.data_format == 'channels_last': rows = input_shape[1] cols = input_shape[2] rows = conv_utils.conv_output_length(rows, self.kernel_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.kernel_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return (input_shape[0], self.filters, rows, cols) elif self.data_format == 'channels_last': return (input_shape[0], rows, cols, self.filters) def call(self, inputs): if self.implementation == 1: output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides, (self.output_row, self.output_col), self.data_format) elif self.implementation == 2: output = local_conv_matmul(inputs, self.kernel, self.kernel_mask, self.compute_output_shape(inputs.shape)) elif self.implementation == 3: output = local_conv_sparse_matmul(inputs, self.kernel, self.kernel_idxs, self.kernel_shape, self.compute_output_shape(inputs.shape)) else: raise ValueError('Unrecognized implementation mode: %d.' % self.implementation) if self.use_bias: output = K.bias_add(output, self.bias, data_format=self.data_format) output = self.activation(output) return output def get_config(self): config = { 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'implementation': self.implementation } base_config = super(LocallyConnected2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def get_locallyconnected_mask(input_shape, kernel_shape, strides, padding, data_format): """Return a mask representing connectivity of a locally-connected operation. This method returns a masking numpy array of 0s and 1s (of type `np.float32`) that, when element-wise multiplied with a fully-connected weight tensor, masks out the weights between disconnected input-output pairs and thus implements local connectivity through a sparse fully-connected weight tensor. Assume an unshared convolution with given parameters is applied to an input having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an output with spatial shape `(d_out1, ..., d_outN)` (determined by layer parameters such as `strides`). This method returns a mask which can be broadcast-multiplied (element-wise) with a 2*(N+1)-D weight matrix (equivalent to a fully-connected layer between (N+1)-D activations (N spatial + 1 channel dimensions for input and output) to make it perform an unshared convolution with given `kernel_shape`, `strides`, `padding` and `data_format`. Arguments: input_shape: tuple of size N: `(d_in1, ..., d_inN)` spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `"same"` or `"valid"`. data_format: a string, `"channels_first"` or `"channels_last"`. Returns: a `np.float32`-type `np.ndarray` of shape `(1, d_in1, ..., d_inN, 1, d_out1, ..., d_outN)` if `data_format == `"channels_first"`, or `(d_in1, ..., d_inN, 1, d_out1, ..., d_outN, 1)` if `data_format == "channels_last"`. Raises: ValueError: if `data_format` is neither `"channels_first"` nor `"channels_last"`. """ mask = conv_utils.conv_kernel_mask( input_shape=input_shape, kernel_shape=kernel_shape, strides=strides, padding=padding ) ndims = int(mask.ndim / 2) if data_format == 'channels_first': mask = np.expand_dims(mask, 0) mask = np.expand_dims(mask, -ndims - 1) elif data_format == 'channels_last': mask = np.expand_dims(mask, ndims) mask = np.expand_dims(mask, -1) else: raise ValueError('Unrecognized data_format: ' + str(data_format)) return mask def local_conv_matmul(inputs, kernel, kernel_mask, output_shape): """Apply N-D convolution with un-shared weights using a single matmul call. This method outputs `inputs . (kernel * kernel_mask)` (with `.` standing for matrix-multiply and `*` for element-wise multiply) and requires a precomputed `kernel_mask` to zero-out weights in `kernel` and hence perform the same operation as a convolution with un-shared (the remaining entries in `kernel`) weights. It also does the necessary reshapes to make `inputs` and `kernel` 2-D and `output` (N+2)-D. Arguments: inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ..., d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`. kernel: the unshared weights for N-D convolution, an (N+2)-D tensor of shape: `(d_in1, ..., d_inN, channels_in, d_out2, ..., d_outN, channels_out)` or `(channels_in, d_in1, ..., d_inN, channels_out, d_out2, ..., d_outN)`, with the ordering of channels and spatial dimensions matching that of the input. Each entry is the weight between a particular input and output location, similarly to a fully-connected weight matrix. kernel_mask: a float 0/1 mask tensor of shape: `(d_in1, ..., d_inN, 1, d_out2, ..., d_outN, 1)` or `(1, d_in1, ..., d_inN, 1, d_out2, ..., d_outN)`, with the ordering of singleton and spatial dimensions matching that of the input. Mask represents the connectivity pattern of the layer and is precomputed elsewhere based on layer parameters: stride, padding, and the receptive field shape. output_shape: a tuple of (N+2) elements representing the output shape: `(batch_size, channels_out, d_out1, ..., d_outN)` or `(batch_size, d_out1, ..., d_outN, channels_out)`, with the ordering of channels and spatial dimensions matching that of the input. Returns: Output (N+2)-D tensor with shape `output_shape`. """ inputs_flat = K.reshape(inputs, (K.shape(inputs)[0], -1)) kernel = kernel_mask * kernel kernel = make_2d(kernel, split_dim=K.ndim(kernel) // 2) output_flat = K.math_ops.sparse_matmul(inputs_flat, kernel, b_is_sparse=True) output = K.reshape(output_flat, [K.shape(output_flat)[0],] + output_shape.as_list()[1:]) return output def local_conv_sparse_matmul(inputs, kernel, kernel_idxs, kernel_shape, output_shape): """Apply N-D convolution with un-shared weights using a single sparse matmul. This method outputs `inputs . tf.SparseTensor(indices=kernel_idxs, values=kernel, dense_shape=kernel_shape)`, with `.` standing for matrix-multiply. It also reshapes `inputs` to 2-D and `output` to (N+2)-D. Arguments: inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ..., d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`. kernel: a 1-D tensor with shape `(len(kernel_idxs),)` containing all the weights of the layer. kernel_idxs: a list of integer tuples representing indices in a sparse matrix performing the un-shared convolution as a matrix-multiply. kernel_shape: a tuple `(input_size, output_size)`, where `input_size = channels_in * d_in1 * ... * d_inN` and `output_size = channels_out * d_out1 * ... * d_outN`. output_shape: a tuple of (N+2) elements representing the output shape: `(batch_size, channels_out, d_out1, ..., d_outN)` or `(batch_size, d_out1, ..., d_outN, channels_out)`, with the ordering of channels and spatial dimensions matching that of the input. Returns: Output (N+2)-D dense tensor with shape `output_shape`. """ inputs_flat = K.reshape(inputs, (K.shape(inputs)[0], -1)) output_flat = K.sparse_ops.sparse_tensor_dense_mat_mul( kernel_idxs, kernel, kernel_shape, inputs_flat, adjoint_b=True) output_flat_transpose = K.transpose(output_flat) output_reshaped = K.reshape( output_flat_transpose, [K.shape(output_flat_transpose)[0],] + output_shape.as_list()[1:] ) return output_reshaped def make_2d(tensor, split_dim): """Reshapes an N-dimensional tensor into a 2D tensor. Dimensions before (excluding) and after (including) `split_dim` are grouped together. Arguments: tensor: a tensor of shape `(d0, ..., d(N-1))`. split_dim: an integer from 1 to N-1, index of the dimension to group dimensions before (excluding) and after (including). Returns: Tensor of shape `(d0 * ... * d(split_dim-1), d(split_dim) * ... * d(N-1))`. """ shape = K.array_ops.shape(tensor) in_dims = shape[:split_dim] out_dims = shape[split_dim:] in_size = K.math_ops.reduce_prod(in_dims) out_size = K.math_ops.reduce_prod(out_dims) return K.array_ops.reshape(tensor, (in_size, out_size))
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/local.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for kernelized.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import math from absl.testing import parameterized import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.keras import backend as keras_backend from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.layers import kernelized as kernel_layers from tensorflow.python.keras.utils import kernelized_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import test def _exact_gaussian(stddev): return functools.partial( kernelized_utils.exact_gaussian_kernel, stddev=stddev) def _exact_laplacian(stddev): return functools.partial( kernelized_utils.exact_laplacian_kernel, stddev=stddev) class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase): def _assert_all_close(self, expected, actual, atol=0.001): if not context.executing_eagerly(): with self.cached_session() as sess: keras_backend._initialize_variables(sess) self.assertAllClose(expected, actual, atol=atol) else: self.assertAllClose(expected, actual, atol=atol) @test_util.run_in_graph_and_eager_modes() def test_invalid_output_dim(self): with self.assertRaisesRegexp( ValueError, r'`output_dim` should be a positive integer. Given: -3.'): _ = kernel_layers.RandomFourierFeatures(output_dim=-3, scale=2.0) @test_util.run_in_graph_and_eager_modes() def test_unsupported_kernel_type(self): with self.assertRaisesRegexp( ValueError, r'Unsupported kernel type: \'unsupported_kernel\'.'): _ = kernel_layers.RandomFourierFeatures( 3, 'unsupported_kernel', stddev=2.0) @test_util.run_in_graph_and_eager_modes() def test_invalid_scale(self): with self.assertRaisesRegexp( ValueError, r'When provided, `scale` should be a positive float. Given: 0.0.'): _ = kernel_layers.RandomFourierFeatures(output_dim=10, scale=0.0) @test_util.run_in_graph_and_eager_modes() def test_invalid_input_shape(self): inputs = random_ops.random_uniform((3, 2, 4), seed=1) rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0) with self.assertRaisesRegexp( ValueError, r'The rank of the input tensor should be 2. Got 3 instead.'): _ = rff_layer(inputs) @parameterized.named_parameters( ('gaussian', 'gaussian', 10.0, False), ('random', init_ops.random_uniform_initializer, 1.0, True)) @test_util.run_in_graph_and_eager_modes() def test_random_features_properties(self, initializer, scale, trainable): rff_layer = kernel_layers.RandomFourierFeatures( output_dim=10, kernel_initializer=initializer, scale=scale, trainable=trainable) self.assertEqual(rff_layer.output_dim, 10) self.assertEqual(rff_layer.kernel_initializer, initializer) self.assertEqual(rff_layer.scale, scale) self.assertEqual(rff_layer.trainable, trainable) @parameterized.named_parameters(('gaussian', 'gaussian', False), ('laplacian', 'laplacian', True), ('other', init_ops.ones_initializer, True)) @test_util.run_in_graph_and_eager_modes() def test_call(self, initializer, trainable): rff_layer = kernel_layers.RandomFourierFeatures( output_dim=10, kernel_initializer=initializer, scale=1.0, trainable=trainable, name='random_fourier_features') inputs = random_ops.random_uniform((3, 2), seed=1) outputs = rff_layer(inputs) self.assertListEqual([3, 10], outputs.shape.as_list()) num_trainable_vars = 1 if trainable else 0 self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars) @test_util.assert_no_new_pyobjects_executing_eagerly def test_no_eager_Leak(self): # Tests that repeatedly constructing and building a Layer does not leak # Python objects. inputs = random_ops.random_uniform((5, 4), seed=1) kernel_layers.RandomFourierFeatures(output_dim=4, name='rff')(inputs) kernel_layers.RandomFourierFeatures(output_dim=10, scale=2.0)(inputs) @test_util.run_in_graph_and_eager_modes() def test_output_shape(self): inputs = random_ops.random_uniform((3, 2), seed=1) rff_layer = kernel_layers.RandomFourierFeatures( output_dim=7, name='random_fourier_features', trainable=True) outputs = rff_layer(inputs) self.assertEqual([3, 7], outputs.shape.as_list()) @parameterized.named_parameters( ('gaussian', 'gaussian'), ('laplacian', 'laplacian'), ('other', init_ops.random_uniform_initializer)) @test_util.run_deprecated_v1 def test_call_on_placeholder(self, initializer): inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None]) rff_layer = kernel_layers.RandomFourierFeatures( output_dim=5, kernel_initializer=initializer, name='random_fourier_features') with self.assertRaisesRegexp( ValueError, r'The last dimension of the inputs to ' '`RandomFourierFeatures` should be defined. Found `None`.'): rff_layer(inputs) inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None]) rff_layer = kernel_layers.RandomFourierFeatures( output_dim=5, kernel_initializer=initializer, name='random_fourier_features') with self.assertRaisesRegexp( ValueError, r'The last dimension of the inputs to ' '`RandomFourierFeatures` should be defined. Found `None`.'): rff_layer(inputs) inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3]) rff_layer = kernel_layers.RandomFourierFeatures( output_dim=5, name='random_fourier_features') rff_layer(inputs) @parameterized.named_parameters(('gaussian', 10, 'gaussian', 2.0), ('laplacian', 5, 'laplacian', None), ('other', 10, init_ops.ones_initializer, 1.0)) @test_util.run_in_graph_and_eager_modes() def test_compute_output_shape(self, output_dim, initializer, scale): rff_layer = kernel_layers.RandomFourierFeatures( output_dim, initializer, scale=scale, name='rff') with self.assertRaises(ValueError): rff_layer.compute_output_shape(tensor_shape.TensorShape(None)) with self.assertRaises(ValueError): rff_layer.compute_output_shape(tensor_shape.TensorShape([])) with self.assertRaises(ValueError): rff_layer.compute_output_shape(tensor_shape.TensorShape([3])) with self.assertRaises(ValueError): rff_layer.compute_output_shape(tensor_shape.TensorShape([3, 2, 3])) with self.assertRaisesRegexp( ValueError, r'The innermost dimension of input shape must be defined.'): rff_layer.compute_output_shape(tensor_shape.TensorShape([3, None])) self.assertEqual([None, output_dim], rff_layer.compute_output_shape((None, 3)).as_list()) self.assertEqual([None, output_dim], rff_layer.compute_output_shape( tensor_shape.TensorShape([None, 2])).as_list()) self.assertEqual([4, output_dim], rff_layer.compute_output_shape((4, 1)).as_list()) @parameterized.named_parameters( ('gaussian', 10, 'gaussian', 3.0, False), ('laplacian', 5, 'laplacian', 5.5, True), ('other', 7, init_ops.random_uniform_initializer(), None, True)) @test_util.run_in_graph_and_eager_modes() def test_get_config(self, output_dim, initializer, scale, trainable): rff_layer = kernel_layers.RandomFourierFeatures( output_dim, initializer, scale=scale, trainable=trainable, name='random_fourier_features', ) expected_initializer = initializer if isinstance(initializer, init_ops.Initializer): expected_initializer = initializers.serialize(initializer) expected_dtype = ( 'float32' if base_layer_utils.v2_dtype_behavior_enabled() else None) expected_config = { 'output_dim': output_dim, 'kernel_initializer': expected_initializer, 'scale': scale, 'name': 'random_fourier_features', 'trainable': trainable, 'dtype': expected_dtype, } self.assertLen(expected_config, len(rff_layer.get_config())) self.assertSameElements( list(expected_config.items()), list(rff_layer.get_config().items())) @parameterized.named_parameters( ('gaussian', 5, 'gaussian', None, True), ('laplacian', 5, 'laplacian', 5.5, False), ('other', 7, init_ops.ones_initializer(), 2.0, True)) @test_util.run_in_graph_and_eager_modes() def test_from_config(self, output_dim, initializer, scale, trainable): model_config = { 'output_dim': output_dim, 'kernel_initializer': initializer, 'scale': scale, 'trainable': trainable, 'name': 'random_fourier_features', } rff_layer = kernel_layers.RandomFourierFeatures.from_config(model_config) self.assertEqual(rff_layer.output_dim, output_dim) self.assertEqual(rff_layer.kernel_initializer, initializer) self.assertEqual(rff_layer.scale, scale) self.assertEqual(rff_layer.trainable, trainable) inputs = random_ops.random_uniform((3, 2), seed=1) outputs = rff_layer(inputs) self.assertListEqual([3, output_dim], outputs.shape.as_list()) num_trainable_vars = 1 if trainable else 0 self.assertLen(rff_layer.trainable_variables, num_trainable_vars) if trainable: self.assertEqual('random_fourier_features/random_features_scale:0', rff_layer.trainable_variables[0].name) self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars) @parameterized.named_parameters( ('gaussian', 10, 'gaussian', 3.0, True), ('laplacian', 5, 'laplacian', 5.5, False), ('other', 10, init_ops.random_uniform_initializer(), None, True)) @test_util.run_in_graph_and_eager_modes() def test_same_random_features_params_reused(self, output_dim, initializer, scale, trainable): """Applying the layer on the same input twice gives the same output.""" rff_layer = kernel_layers.RandomFourierFeatures( output_dim=output_dim, kernel_initializer=initializer, scale=scale, trainable=trainable, name='random_fourier_features') inputs = constant_op.constant( np.random.uniform(low=-1.0, high=1.0, size=(2, 4))) output1 = rff_layer(inputs) output2 = rff_layer(inputs) self._assert_all_close(output1, output2) @parameterized.named_parameters( ('gaussian', 'gaussian', 5.0), ('laplacian', 'laplacian', 3.0), ('other', init_ops.random_uniform_initializer(), 5.0)) @test_util.run_in_graph_and_eager_modes() def test_different_params_similar_approximation(self, initializer, scale): random_seed.set_random_seed(12345) rff_layer1 = kernel_layers.RandomFourierFeatures( output_dim=3000, kernel_initializer=initializer, scale=scale, name='rff1') rff_layer2 = kernel_layers.RandomFourierFeatures( output_dim=2000, kernel_initializer=initializer, scale=scale, name='rff2') # Two distinct inputs. x = constant_op.constant([[1.0, -1.0, 0.5]]) y = constant_op.constant([[-1.0, 1.0, 1.0]]) # Apply both layers to both inputs. output_x1 = math.sqrt(2.0 / 3000.0) * rff_layer1(x) output_y1 = math.sqrt(2.0 / 3000.0) * rff_layer1(y) output_x2 = math.sqrt(2.0 / 2000.0) * rff_layer2(x) output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2(y) # Compute the inner products of the outputs (on inputs x and y) for both # layers. For any fixed random features layer rff_layer, and inputs x, y, # rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization factor. approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1) approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2) self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.08) @parameterized.named_parameters( ('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)), ('laplacian', 'laplacian', 20.0, _exact_laplacian(stddev=20.0))) @test_util.run_in_graph_and_eager_modes() def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn): """Approximation is bad when output dimension is small.""" # Two distinct inputs. x = constant_op.constant([[1.0, -1.0, 0.5]]) y = constant_op.constant([[-1.0, 1.0, 1.0]]) small_output_dim = 10 random_seed.set_random_seed(1234) # Initialize layer. rff_layer = kernel_layers.RandomFourierFeatures( output_dim=small_output_dim, kernel_initializer=initializer, scale=scale, name='random_fourier_features') # Apply layer to both inputs. output_x = math.sqrt(2.0 / small_output_dim) * rff_layer(x) output_y = math.sqrt(2.0 / small_output_dim) * rff_layer(y) # The inner products of the outputs (on inputs x and y) approximates the # real value of the RBF kernel but poorly since the output dimension of the # layer is small. exact_kernel_value = exact_kernel_fn(x, y) approx_kernel_value = kernelized_utils.inner_product(output_x, output_y) abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value) if not context.executing_eagerly(): with self.cached_session() as sess: keras_backend._initialize_variables(sess) abs_error_eval = sess.run([abs_error]) self.assertGreater(abs_error_eval[0][0], 0.05) self.assertLess(abs_error_eval[0][0], 0.5) else: self.assertGreater(abs_error, 0.05) self.assertLess(abs_error, 0.5) @parameterized.named_parameters( ('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)), ('laplacian', 'laplacian', 10.0, _exact_laplacian(stddev=10.0))) @test_util.run_in_graph_and_eager_modes() def test_good_kernel_approximation_multiple_inputs(self, initializer, scale, exact_kernel_fn): # Parameters. input_dim = 5 output_dim = 2000 x_rows = 20 y_rows = 30 x = constant_op.constant( np.random.uniform(size=(x_rows, input_dim)), dtype=dtypes.float32) y = constant_op.constant( np.random.uniform(size=(y_rows, input_dim)), dtype=dtypes.float32) random_seed.set_random_seed(1234) rff_layer = kernel_layers.RandomFourierFeatures( output_dim=output_dim, kernel_initializer=initializer, scale=scale, name='random_fourier_features') # The shapes of output_x and output_y are (x_rows, output_dim) and # (y_rows, output_dim) respectively. output_x = math.sqrt(2.0 / output_dim) * rff_layer(x) output_y = math.sqrt(2.0 / output_dim) * rff_layer(y) approx_kernel_matrix = kernelized_utils.inner_product(output_x, output_y) exact_kernel_matrix = exact_kernel_fn(x, y) self._assert_all_close(approx_kernel_matrix, exact_kernel_matrix, atol=0.05) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/kernelized_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for convolutional recurrent layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class ConvLSTMTest(keras_parameterized.TestCase): @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( data_format=['channels_first', 'channels_last'], return_sequences=[True, False])) def test_conv_lstm(self, data_format, return_sequences): num_row = 3 num_col = 3 filters = 2 num_samples = 1 input_channel = 2 input_num_row = 5 input_num_col = 5 sequence_len = 2 if data_format == 'channels_first': inputs = np.random.rand(num_samples, sequence_len, input_channel, input_num_row, input_num_col) else: inputs = np.random.rand(num_samples, sequence_len, input_num_row, input_num_col, input_channel) # test for return state: x = keras.Input(batch_shape=inputs.shape) kwargs = {'data_format': data_format, 'return_sequences': return_sequences, 'return_state': True, 'stateful': True, 'filters': filters, 'kernel_size': (num_row, num_col), 'padding': 'valid'} layer = keras.layers.ConvLSTM2D(**kwargs) layer.build(inputs.shape) outputs = layer(x) _, states = outputs[0], outputs[1:] self.assertEqual(len(states), 2) model = keras.models.Model(x, states[0]) state = model.predict(inputs) self.assertAllClose( keras.backend.eval(layer.states[0]), state, atol=1e-4) # test for output shape: testing_utils.layer_test( keras.layers.ConvLSTM2D, kwargs={'data_format': data_format, 'return_sequences': return_sequences, 'filters': filters, 'kernel_size': (num_row, num_col), 'padding': 'valid'}, input_shape=inputs.shape) def test_conv_lstm_statefulness(self): # Tests for statefulness num_row = 3 num_col = 3 filters = 2 num_samples = 1 input_channel = 2 input_num_row = 5 input_num_col = 5 sequence_len = 2 inputs = np.random.rand(num_samples, sequence_len, input_num_row, input_num_col, input_channel) with self.cached_session(): model = keras.models.Sequential() kwargs = {'data_format': 'channels_last', 'return_sequences': False, 'filters': filters, 'kernel_size': (num_row, num_col), 'stateful': True, 'batch_input_shape': inputs.shape, 'padding': 'same'} layer = keras.layers.ConvLSTM2D(**kwargs) model.add(layer) model.compile(optimizer='sgd', loss='mse') out1 = model.predict(np.ones_like(inputs)) # train once so that the states change model.train_on_batch(np.ones_like(inputs), np.random.random(out1.shape)) out2 = model.predict(np.ones_like(inputs)) # if the state is not reset, output should be different self.assertNotEqual(out1.max(), out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones_like(inputs)) self.assertNotEqual(out3.max(), out2.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones_like(inputs)) self.assertAllClose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones_like(inputs)) self.assertNotEqual(out4.max(), out5.max()) def test_conv_lstm_regularizers(self): # check regularizers num_row = 3 num_col = 3 filters = 2 num_samples = 1 input_channel = 2 input_num_row = 5 input_num_col = 5 sequence_len = 2 inputs = np.random.rand(num_samples, sequence_len, input_num_row, input_num_col, input_channel) with self.cached_session(): kwargs = {'data_format': 'channels_last', 'return_sequences': False, 'kernel_size': (num_row, num_col), 'stateful': True, 'filters': filters, 'batch_input_shape': inputs.shape, 'kernel_regularizer': keras.regularizers.L1L2(l1=0.01), 'recurrent_regularizer': keras.regularizers.L1L2(l1=0.01), 'activity_regularizer': 'l2', 'bias_regularizer': 'l2', 'kernel_constraint': 'max_norm', 'recurrent_constraint': 'max_norm', 'bias_constraint': 'max_norm', 'padding': 'same'} layer = keras.layers.ConvLSTM2D(**kwargs) layer.build(inputs.shape) self.assertEqual(len(layer.losses), 3) layer(keras.backend.variable(np.ones(inputs.shape))) self.assertEqual(len(layer.losses), 4) def test_conv_lstm_dropout(self): # check dropout with self.cached_session(): testing_utils.layer_test( keras.layers.ConvLSTM2D, kwargs={'data_format': 'channels_last', 'return_sequences': False, 'filters': 2, 'kernel_size': (3, 3), 'padding': 'same', 'dropout': 0.1, 'recurrent_dropout': 0.1}, input_shape=(1, 2, 5, 5, 2)) def test_conv_lstm_cloning(self): with self.cached_session(): model = keras.models.Sequential() model.add(keras.layers.ConvLSTM2D(5, 3, input_shape=(None, 5, 5, 3))) test_inputs = np.random.random((2, 4, 5, 5, 3)) reference_outputs = model.predict(test_inputs) weights = model.get_weights() # Use a new graph to clone the model with self.cached_session(): clone = keras.models.clone_model(model) clone.set_weights(weights) outputs = clone.predict(test_inputs) self.assertAllClose(reference_outputs, outputs, atol=1e-5) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/convolutional_recurrent_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for layer wrappers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import ResidualWrapper from tensorflow.python.ops.array_ops import concat from tensorflow.python.platform import test from tensorflow.python.training.tracking import util as trackable_util from tensorflow.python.util import object_identity class _RNNCellWithConstants(keras.layers.Layer): def __init__(self, units, constant_size, **kwargs): self.units = units self.state_size = units self.constant_size = constant_size super(_RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(self.constant_size, self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = keras.backend.dot(inputs, self.input_kernel) h_state = keras.backend.dot(prev_output, self.recurrent_kernel) h_const = keras.backend.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units, 'constant_size': self.constant_size} base_config = super(_RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) class TimeDistributedTest(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_timedistributed_dense(self): model = keras.models.Sequential() model.add( keras.layers.TimeDistributed( keras.layers.Dense(2), input_shape=(3, 4))) model.compile(optimizer='rmsprop', loss='mse') model.fit( np.random.random((10, 3, 4)), np.random.random((10, 3, 2)), epochs=1, batch_size=10) # test config model.get_config() # check whether the model variables are present in the # trackable list of objects checkpointed_objects = object_identity.ObjectIdentitySet( trackable_util.list_objects(model)) for v in model.variables: self.assertIn(v, checkpointed_objects) def test_timedistributed_static_batch_size(self): model = keras.models.Sequential() model.add( keras.layers.TimeDistributed( keras.layers.Dense(2), input_shape=(3, 4), batch_size=10)) model.compile(optimizer='rmsprop', loss='mse') model.fit( np.random.random((10, 3, 4)), np.random.random((10, 3, 2)), epochs=1, batch_size=10) def test_timedistributed_invalid_init(self): x = constant_op.constant(np.zeros((1, 1)).astype('float32')) with self.assertRaisesRegexp( ValueError, 'Please initialize `TimeDistributed` layer with a `Layer` instance.'): keras.layers.TimeDistributed(x) def test_timedistributed_conv2d(self): with self.cached_session(): model = keras.models.Sequential() model.add( keras.layers.TimeDistributed( keras.layers.Conv2D(5, (2, 2), padding='same'), input_shape=(2, 4, 4, 3))) model.add(keras.layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5))) model = keras.models.model_from_json(model.to_json()) model.summary() def test_timedistributed_stacked(self): with self.cached_session(): model = keras.models.Sequential() model.add( keras.layers.TimeDistributed( keras.layers.Dense(2), input_shape=(3, 4))) model.add(keras.layers.TimeDistributed(keras.layers.Dense(3))) model.add(keras.layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') model.fit( np.random.random((10, 3, 4)), np.random.random((10, 3, 3)), epochs=1, batch_size=10) def test_regularizers(self): with self.cached_session(): model = keras.models.Sequential() model.add( keras.layers.TimeDistributed( keras.layers.Dense(2, kernel_regularizer='l1', activity_regularizer='l1'), input_shape=(3, 4))) model.add(keras.layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') self.assertEqual(len(model.losses), 2) def test_TimeDistributed_batchnorm(self): with self.cached_session(): # test that wrapped BN updates still work. model = keras.models.Sequential() model.add(keras.layers.TimeDistributed( keras.layers.BatchNormalization(center=True, scale=True), name='bn', input_shape=(10, 2))) model.compile(optimizer='rmsprop', loss='mse') # Assert that mean and variance are 0 and 1. td = model.layers[0] self.assertAllClose(td.get_weights()[2], np.array([0, 0])) assert np.array_equal(td.get_weights()[3], np.array([1, 1])) # Train model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)), np.broadcast_to(np.array([0, 1]), (1, 10, 2))) # Assert that mean and variance changed. assert not np.array_equal(td.get_weights()[2], np.array([0, 0])) assert not np.array_equal(td.get_weights()[3], np.array([1, 1])) # Verify input_map has one mapping from inputs to reshaped inputs. self.assertEqual(len(td._input_map.keys()), 1) def test_TimeDistributed_trainable(self): # test layers that need learning_phase to be set x = keras.layers.Input(shape=(3, 2)) layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization()) _ = layer(x) self.assertEqual(len(layer.updates), 2) self.assertEqual(len(layer.trainable_weights), 2) layer.trainable = False assert not layer.updates assert not layer.trainable_weights layer.trainable = True assert len(layer.updates) == 2 assert len(layer.trainable_weights) == 2 def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self): with self.cached_session(): # test with unspecified shape and Embeddings with mask_zero model = keras.models.Sequential() model.add(keras.layers.TimeDistributed( keras.layers.Embedding(5, 6, mask_zero=True), input_shape=(None, None))) # N by t_1 by t_2 by 6 model.add(keras.layers.TimeDistributed( keras.layers.SimpleRNN(7, return_sequences=True))) model.add(keras.layers.TimeDistributed( keras.layers.SimpleRNN(8, return_sequences=False))) model.add(keras.layers.SimpleRNN(1, return_sequences=False)) model.compile(optimizer='rmsprop', loss='mse') model_input = np.random.randint(low=1, high=5, size=(10, 3, 4), dtype='int32') for i in range(4): model_input[i, i:, i:] = 0 model.fit(model_input, np.random.random((10, 1)), epochs=1, batch_size=10) mask_outputs = [model.layers[0].compute_mask(model.input)] for layer in model.layers[1:]: mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1])) func = keras.backend.function([model.input], mask_outputs[:-1]) mask_outputs_val = func([model_input]) ref_mask_val_0 = model_input > 0 # embedding layer ref_mask_val_1 = ref_mask_val_0 # first RNN layer ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2] for i in range(3): self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i]) self.assertIs(mask_outputs[-1], None) # final layer def test_TimeDistributed_with_masking_layer(self): with self.cached_session(): # test with Masking layer model = keras.models.Sequential() model.add(keras.layers.TimeDistributed(keras.layers.Masking( mask_value=0.,), input_shape=(None, 4))) model.add(keras.layers.TimeDistributed(keras.layers.Dense(5))) model.compile(optimizer='rmsprop', loss='mse') model_input = np.random.randint(low=1, high=5, size=(10, 3, 4)) for i in range(4): model_input[i, i:, :] = 0. model.compile(optimizer='rmsprop', loss='mse') model.fit(model_input, np.random.random((10, 3, 5)), epochs=1, batch_size=6) mask_outputs = [model.layers[0].compute_mask(model.input)] mask_outputs += [model.layers[1].compute_mask(model.layers[1].input, mask_outputs[-1])] func = keras.backend.function([model.input], mask_outputs) mask_outputs_val = func([model_input]) self.assertEqual((mask_outputs_val[0]).all(), model_input.all()) self.assertEqual((mask_outputs_val[1]).all(), model_input.all()) def test_TimeDistributed_with_different_time_shapes(self): time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5)) ph_1 = keras.backend.placeholder(shape=(None, 10, 13)) out_1 = time_dist(ph_1) self.assertEqual(out_1.shape.as_list(), [None, 10, 5]) ph_2 = keras.backend.placeholder(shape=(None, 1, 13)) out_2 = time_dist(ph_2) self.assertEqual(out_2.shape.as_list(), [None, 1, 5]) ph_3 = keras.backend.placeholder(shape=(None, 1, 18)) with self.assertRaisesRegexp(ValueError, 'is incompatible with layer'): time_dist(ph_3) def test_TimeDistributed_with_invalid_dimensions(self): time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5)) ph = keras.backend.placeholder(shape=(None, 10)) with self.assertRaisesRegexp( ValueError, '`TimeDistributed` Layer should be passed an `input_shape `'): time_dist(ph) @tf_test_util.run_in_graph_and_eager_modes def test_TimeDistributed_reshape(self): class NoReshapeLayer(keras.layers.Layer): def call(self, inputs): return inputs # Built-in layers that aren't stateful use the reshape implementation. td1 = keras.layers.TimeDistributed(keras.layers.Dense(5)) self.assertTrue(td1._always_use_reshape) # Built-in layers that are stateful don't use the reshape implementation. td2 = keras.layers.TimeDistributed( keras.layers.RNN(keras.layers.SimpleRNNCell(10), stateful=True)) self.assertFalse(td2._always_use_reshape) # Custom layers are not whitelisted for the fast reshape implementation. td3 = keras.layers.TimeDistributed(NoReshapeLayer()) self.assertFalse(td3._always_use_reshape) @tf_test_util.run_in_graph_and_eager_modes def test_TimeDistributed_output_shape_return_types(self): class TestLayer(keras.layers.Layer): def call(self, inputs): return concat([inputs, inputs], axis=-1) def compute_output_shape(self, input_shape): output_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape[-1] = output_shape[-1] * 2 output_shape = tensor_shape.TensorShape(output_shape) return output_shape class TestListLayer(TestLayer): def compute_output_shape(self, input_shape): shape = super(TestListLayer, self).compute_output_shape(input_shape) return shape.as_list() class TestTupleLayer(TestLayer): def compute_output_shape(self, input_shape): shape = super(TestTupleLayer, self).compute_output_shape(input_shape) return tuple(shape.as_list()) # Layers can specify output shape as list/tuple/TensorShape test_layers = [TestLayer, TestListLayer, TestTupleLayer] for layer in test_layers: input_layer = keras.layers.TimeDistributed(layer()) inputs = keras.backend.placeholder(shape=(None, 2, 4)) output = input_layer(inputs) self.assertEqual(output.shape.as_list(), [None, 2, 8]) self.assertEqual( input_layer.compute_output_shape([None, 2, 4]).as_list(), [None, 2, 8]) @tf_test_util.run_all_in_graph_and_eager_modes class BidirectionalTest(test.TestCase, parameterized.TestCase): def test_bidirectional(self): rnn = keras.layers.SimpleRNN samples = 2 dim = 2 timesteps = 2 output_dim = 2 with self.cached_session(): for mode in ['sum', 'concat', 'ave', 'mul']: x = np.random.random((samples, timesteps, dim)) target_dim = 2 * output_dim if mode == 'concat' else output_dim y = np.random.random((samples, target_dim)) # test with Sequential model model = keras.models.Sequential() model.add( keras.layers.Bidirectional( rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim))) model.compile(optimizer='rmsprop', loss='mse') model.fit(x, y, epochs=1, batch_size=1) # check whether the model variables are present in the # trackable list of objects checkpointed_objects = object_identity.ObjectIdentitySet( trackable_util.list_objects(model)) for v in model.variables: self.assertIn(v, checkpointed_objects) # test compute output shape ref_shape = model.layers[-1].output.shape shape = model.layers[-1].compute_output_shape( (None, timesteps, dim)) self.assertListEqual(shape.as_list(), ref_shape.as_list()) # test config model.get_config() model = keras.models.model_from_json(model.to_json()) model.summary() def test_bidirectional_invalid_init(self): x = constant_op.constant(np.zeros((1, 1)).astype('float32')) with self.assertRaisesRegexp( ValueError, 'Please initialize `Bidirectional` layer with a `Layer` instance.'): keras.layers.Bidirectional(x) def test_bidirectional_weight_loading(self): rnn = keras.layers.SimpleRNN samples = 2 dim = 2 timesteps = 2 output_dim = 2 with self.cached_session(): x = np.random.random((samples, timesteps, dim)) model = keras.models.Sequential() model.add( keras.layers.Bidirectional( rnn(output_dim), input_shape=(timesteps, dim))) y_ref = model.predict(x) weights = model.layers[-1].get_weights() model.layers[-1].set_weights(weights) y = model.predict(x) self.assertAllClose(y, y_ref) def test_bidirectional_stacked(self): # test stacked bidirectional layers rnn = keras.layers.SimpleRNN samples = 2 dim = 2 timesteps = 2 output_dim = 2 mode = 'sum' with self.cached_session(): x = np.random.random((samples, timesteps, dim)) target_dim = 2 * output_dim if mode == 'concat' else output_dim y = np.random.random((samples, target_dim)) model = keras.models.Sequential() model.add( keras.layers.Bidirectional( rnn(output_dim, return_sequences=True), merge_mode=mode, input_shape=(timesteps, dim))) model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode)) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) # test with functional API inputs = keras.layers.Input((timesteps, dim)) output = keras.layers.Bidirectional( rnn(output_dim), merge_mode=mode)(inputs) model = keras.models.Model(inputs, output) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) def test_bidirectional_statefulness(self): # Bidirectional and stateful rnn = keras.layers.SimpleRNN samples = 2 dim = 2 timesteps = 2 output_dim = 2 mode = 'sum' with self.cached_session(): x = np.random.random((samples, timesteps, dim)) target_dim = 2 * output_dim if mode == 'concat' else output_dim y = np.random.random((samples, target_dim)) inputs = keras.layers.Input(batch_shape=(1, timesteps, dim)) output = keras.layers.Bidirectional( rnn(output_dim, stateful=True), merge_mode=mode)(inputs) model = keras.models.Model(inputs, output) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) def test_Bidirectional_merged_value(self): rnn = keras.layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 x = [np.random.rand(samples, timesteps, dim)] with self.cached_session(): for merge_mode in ['sum', 'mul', 'ave', 'concat', None]: if merge_mode == 'sum': merge_func = lambda y, y_rev: y + y_rev elif merge_mode == 'mul': merge_func = lambda y, y_rev: y * y_rev elif merge_mode == 'ave': merge_func = lambda y, y_rev: (y + y_rev) / 2 elif merge_mode == 'concat': merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1) else: merge_func = lambda y, y_rev: [y, y_rev] # basic case inputs = keras.Input((timesteps, dim)) layer = keras.layers.Bidirectional( rnn(units, return_sequences=True), merge_mode=merge_mode) f_merged = keras.backend.function([inputs], _to_list(layer(inputs))) f_forward = keras.backend.function([inputs], [layer.forward_layer(inputs)]) f_backward = keras.backend.function( [inputs], [keras.backend.reverse(layer.backward_layer(inputs), 1)]) y_merged = f_merged(x) y_expected = _to_list(merge_func(f_forward(x)[0], f_backward(x)[0])) assert len(y_merged) == len(y_expected) for x1, x2 in zip(y_merged, y_expected): self.assertAllClose(x1, x2, atol=1e-5) # test return_state inputs = keras.Input((timesteps, dim)) layer = keras.layers.Bidirectional( rnn(units, return_state=True), merge_mode=merge_mode) f_merged = keras.backend.function([inputs], layer(inputs)) f_forward = keras.backend.function([inputs], layer.forward_layer(inputs)) f_backward = keras.backend.function([inputs], layer.backward_layer(inputs)) n_states = len(layer.layer.states) y_merged = f_merged(x) y_forward = f_forward(x) y_backward = f_backward(x) y_expected = _to_list(merge_func(y_forward[0], y_backward[0])) assert len(y_merged) == len(y_expected) + n_states * 2 for x1, x2 in zip(y_merged, y_expected): self.assertAllClose(x1, x2, atol=1e-5) y_merged = y_merged[-n_states * 2:] y_forward = y_forward[-n_states:] y_backward = y_backward[-n_states:] for state_birnn, state_inner in zip(y_merged, y_forward + y_backward): self.assertAllClose(state_birnn, state_inner, atol=1e-5) def test_Bidirectional_dropout(self): rnn = keras.layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 merge_mode = 'sum' x = [np.random.rand(samples, timesteps, dim)] with self.cached_session(): inputs = keras.Input((timesteps, dim)) wrapped = keras.layers.Bidirectional( rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs, training=True)) inputs = keras.Input((timesteps, dim)) wrapped = keras.layers.Bidirectional( rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode) outputs = _to_list(wrapped(inputs)) model = keras.Model(inputs, outputs) y1 = _to_list(model.predict(x)) y2 = _to_list(model.predict(x)) for x1, x2 in zip(y1, y2): self.assertAllClose(x1, x2, atol=1e-5) def test_Bidirectional_state_reuse(self): rnn = keras.layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 with self.cached_session(): input1 = keras.layers.Input((timesteps, dim)) layer = keras.layers.Bidirectional( rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = keras.layers.Input((timesteps, dim)) with self.assertRaises(ValueError): keras.layers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = keras.layers.Bidirectional(rnn(units))(input2, initial_state=state) model = keras.models.Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] model.predict(inputs) def test_Bidirectional_state_reuse_with_np_input(self): # See https://github.com/tensorflow/tensorflow/issues/28761 for more detail. rnn = keras.layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 with self.cached_session(): input1 = np.random.rand(samples, timesteps, dim).astype(np.float32) layer = keras.layers.Bidirectional( rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] input2 = np.random.rand(samples, timesteps, dim).astype(np.float32) keras.layers.Bidirectional(rnn(units))(input2, initial_state=state) def test_Bidirectional_trainable(self): # test layers that need learning_phase to be set with self.cached_session(): x = keras.layers.Input(shape=(3, 2)) layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3)) _ = layer(x) assert len(layer.trainable_weights) == 6 layer.trainable = False assert not layer.trainable_weights layer.trainable = True assert len(layer.trainable_weights) == 6 def test_Bidirectional_updates(self): if context.executing_eagerly(): self.skipTest('layer.updates is only available in graph mode.') with self.cached_session(): x = keras.layers.Input(shape=(3, 2)) x_reachable_update = x * x layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3)) _ = layer(x) assert not layer.updates assert not layer.get_updates_for(None) assert not layer.get_updates_for(x) # TODO(b/128684069): Remove when Wrapper sublayers are __call__'d. with base_layer_utils.call_context().enter(layer, x, True, None): layer.forward_layer.add_update(x_reachable_update, inputs=x) layer.forward_layer.add_update(1, inputs=None) layer.backward_layer.add_update(x_reachable_update, inputs=x) layer.backward_layer.add_update(1, inputs=None) assert len(layer.updates) == 4 assert len(layer.get_updates_for(None)) == 2 assert len(layer.get_updates_for(x)) == 2 def test_Bidirectional_losses(self): with self.cached_session(): x = keras.layers.Input(shape=(3, 2)) x_reachable_loss = x * x layer = keras.layers.Bidirectional( keras.layers.SimpleRNN( 3, kernel_regularizer='l1', bias_regularizer='l1', activity_regularizer='l1')) _ = layer(x) assert len(layer.losses) == 6 assert len(layer.get_losses_for(None)) == 4 assert len(layer.get_losses_for(x)) == 2 # Create a random tensor that is not conditional on the inputs. with keras.backend.get_graph().as_default(): const_tensor = constant_op.constant(1) layer.forward_layer.add_loss(x_reachable_loss, inputs=x) layer.forward_layer.add_loss(const_tensor, inputs=None) layer.backward_layer.add_loss(x_reachable_loss, inputs=x) layer.backward_layer.add_loss(const_tensor, inputs=None) assert len(layer.losses) == 10 assert len(layer.get_losses_for(None)) == 6 assert len(layer.get_losses_for(x)) == 4 def test_Bidirectional_with_constants(self): with self.cached_session(): # Test basic case. x = keras.Input((5, 5)) c = keras.Input((3,)) cell = _RNNCellWithConstants(32, 3) custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.Bidirectional(keras.layers.RNN(cell)) y = layer(x, constants=c) model = keras.Model([x, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 64)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config)) y = layer(x, constants=c) model = keras.Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) # Test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config)) y = layer([x, c]) model = keras.Model([x, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, c_np]) self.assertAllClose(y_np, y_np_3, atol=1e-4) def test_Bidirectional_with_constants_layer_passing_initial_state(self): with self.cached_session(): # Test basic case. x = keras.Input((5, 5)) c = keras.Input((3,)) s_for = keras.Input((32,)) s_bac = keras.Input((32,)) cell = _RNNCellWithConstants(32, 3) custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants} with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.Bidirectional(keras.layers.RNN(cell)) y = layer(x, initial_state=[s_for, s_bac], constants=c) model = keras.Model([x, s_for, s_bac, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 32)), np.zeros((6, 3))], np.zeros((6, 64)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) s_fw_np = np.random.random((6, 32)) s_bk_np = np.random.random((6, 32)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np]) weights = model.get_weights() config = layer.get_config() with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config)) y = layer(x, initial_state=[s_for, s_bac], constants=c) model = keras.Model([x, s_for, s_bac, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np]) self.assertAllClose(y_np, y_np_2, atol=1e-4) # Verify that state is used y_np_2_different_s = model.predict( [x_np, s_fw_np + 10., s_bk_np + 10., c_np]) assert np.mean(y_np - y_np_2_different_s) != 0 # Test flat list inputs with keras.utils.CustomObjectScope(custom_objects): layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config)) y = layer([x, s_for, s_bac, c]) model = keras.Model([x, s_for, s_bac, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np]) self.assertAllClose(y_np, y_np_3, atol=1e-4) @tf_test_util.run_in_graph_and_eager_modes def test_Bidirectional_output_shape_return_types(self): class TestLayer(keras.layers.SimpleRNN): def call(self, inputs): return concat([inputs, inputs], axis=-1) def compute_output_shape(self, input_shape): output_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape[-1] = output_shape[-1] * 2 return tensor_shape.TensorShape(output_shape) class TestListLayer(TestLayer): def compute_output_shape(self, input_shape): shape = super(TestListLayer, self).compute_output_shape(input_shape) return shape.as_list() class TestTupleLayer(TestLayer): def compute_output_shape(self, input_shape): shape = super(TestTupleLayer, self).compute_output_shape(input_shape) return tuple(shape.as_list()) # Layers can specify output shape as list/tuple/TensorShape test_layers = [TestLayer, TestListLayer, TestTupleLayer] for layer in test_layers: input_layer = keras.layers.Bidirectional(layer(1)) inputs = keras.backend.placeholder(shape=(None, 2, 4)) output = input_layer(inputs) self.assertEqual(output.shape.as_list(), [None, 2, 16]) self.assertEqual( input_layer.compute_output_shape([None, 2, 4]).as_list(), [None, 2, 16]) def test_Bidirectional_last_output_with_masking(self): rnn = keras.layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 merge_mode = 'concat' x = np.random.rand(samples, timesteps, dim) # clear the first record's timestep 2. Last output should be same as state, # not zeroed. x[0, 2] = 0 with self.cached_session(): inputs = keras.Input((timesteps, dim)) masked_inputs = keras.layers.Masking()(inputs) wrapped = keras.layers.Bidirectional( rnn(units, return_state=True), merge_mode=merge_mode) outputs = _to_list(wrapped(masked_inputs, training=True)) self.assertLen(outputs, 5) self.assertEqual(outputs[0].shape.as_list(), [None, units * 2]) model = keras.Model(inputs, outputs) y = _to_list(model.predict(x)) self.assertLen(y, 5) self.assertAllClose(y[0], np.concatenate([y[1], y[3]], axis=1)) def test_Bidirectional_sequence_output_with_masking(self): rnn = keras.layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 merge_mode = 'concat' x = np.random.rand(samples, timesteps, dim) # clear the first record's timestep 2, and expect the output of timestep 2 # is also 0s. x[0, 2] = 0 with self.cached_session(): inputs = keras.Input((timesteps, dim)) masked_inputs = keras.layers.Masking()(inputs) wrapped = keras.layers.Bidirectional( rnn(units, return_sequences=True), merge_mode=merge_mode) outputs = _to_list(wrapped(masked_inputs, training=True)) self.assertLen(outputs, 1) self.assertEqual(outputs[0].shape.as_list(), [None, timesteps, units * 2]) model = keras.Model(inputs, outputs) y = _to_list(model.predict(x)) self.assertLen(y, 1) self.assertAllClose(y[0][0, 2], np.zeros(units * 2)) @parameterized.parameters(['sum', 'concat']) @tf_test_util.run_in_graph_and_eager_modes def test_custom_backward_layer(self, mode): rnn = keras.layers.SimpleRNN samples = 2 dim = 2 timesteps = 2 output_dim = 2 x = np.random.random((samples, timesteps, dim)) target_dim = 2 * output_dim if mode == 'concat' else output_dim y = np.random.random((samples, target_dim)) forward_layer = rnn(output_dim) backward_layer = rnn(output_dim, go_backwards=True) # test with Sequential model model = keras.models.Sequential() model.add( keras.layers.Bidirectional( forward_layer, merge_mode=mode, backward_layer=backward_layer, input_shape=(timesteps, dim))) model.compile(optimizer='rmsprop', loss='mse') model.fit(x, y, epochs=1, batch_size=1) # check whether the model variables are present in the # trackable list of objects checkpointed_objects = object_identity.ObjectIdentitySet( trackable_util.list_objects(model)) for v in model.variables: self.assertIn(v, checkpointed_objects) # test compute output shape ref_shape = model.layers[-1].output.shape shape = model.layers[-1].compute_output_shape((None, timesteps, dim)) self.assertListEqual(shape.as_list(), ref_shape.as_list()) # test config model.get_config() model = keras.models.model_from_json(model.to_json()) model.summary() @tf_test_util.run_in_graph_and_eager_modes def test_custom_backward_layer_error_check(self): rnn = keras.layers.LSTM units = 2 forward_layer = rnn(units) backward_layer = rnn(units) with self.assertRaisesRegexp(ValueError, 'should have different `go_backwards` value.'): keras.layers.Bidirectional( forward_layer, merge_mode='concat', backward_layer=backward_layer) for attr in ('stateful', 'return_sequences', 'return_state'): kwargs = {attr: True} backward_layer = rnn(units, go_backwards=True, **kwargs) with self.assertRaisesRegexp( ValueError, 'expected to have the same value for attribute ' + attr): keras.layers.Bidirectional( forward_layer, merge_mode='concat', backward_layer=backward_layer) def test_custom_backward_layer_serialization(self): rnn = keras.layers.LSTM units = 2 forward_layer = rnn(units) backward_layer = rnn(units, go_backwards=True) layer = keras.layers.Bidirectional( forward_layer, merge_mode='concat', backward_layer=backward_layer) config = layer.get_config() layer_from_config = keras.layers.Bidirectional.from_config(config) new_config = layer_from_config.get_config() self.assertDictEqual(config, new_config) def test_rnn_layer_name(self): rnn = keras.layers.LSTM units = 2 layer = keras.layers.Bidirectional(rnn(units, name='rnn')) config = layer.get_config() self.assertEqual(config['layer']['config']['name'], 'rnn') layer_from_config = keras.layers.Bidirectional.from_config(config) self.assertEqual(layer_from_config.forward_layer.name, 'forward_rnn') self.assertEqual(layer_from_config.backward_layer.name, 'backward_rnn') def test_custom_backward_rnn_layer_name(self): rnn = keras.layers.LSTM units = 2 forward_layer = rnn(units) backward_layer = rnn(units, go_backwards=True) layer = keras.layers.Bidirectional( forward_layer, merge_mode='concat', backward_layer=backward_layer) config = layer.get_config() self.assertEqual(config['layer']['config']['name'], 'lstm') self.assertEqual(config['backward_layer']['config']['name'], 'lstm_1') layer_from_config = keras.layers.Bidirectional.from_config(config) self.assertEqual(layer_from_config.forward_layer.name, 'forward_lstm') self.assertEqual(layer_from_config.backward_layer.name, 'backward_lstm_1') def test_rnn_with_customized_cell(self): batch = 20 dim = 5 timesteps = 3 units = 5 merge_mode = 'sum' class ResidualLSTMCell(keras.layers.LSTMCell): def call(self, inputs, states, training=None): output, states = super(ResidualLSTMCell, self).call(inputs, states) return output + inputs, states cell = ResidualLSTMCell(units) forward_layer = keras.layers.RNN(cell) inputs = keras.Input((timesteps, dim)) bidirectional_rnn = keras.layers.Bidirectional( forward_layer, merge_mode=merge_mode) outputs = _to_list(bidirectional_rnn(inputs)) model = keras.Model(inputs, outputs) model.compile(optimizer='rmsprop', loss='mse') model.fit( np.random.random((batch, timesteps, dim)), np.random.random((batch, units)), epochs=1, batch_size=10) # Test stacking cell = [ResidualLSTMCell(units), ResidualLSTMCell(units)] forward_layer = keras.layers.RNN(cell) inputs = keras.Input((timesteps, dim)) bidirectional_rnn = keras.layers.Bidirectional( forward_layer, merge_mode=merge_mode) outputs = _to_list(bidirectional_rnn(inputs)) model = keras.Model(inputs, outputs) model.compile(optimizer='rmsprop', loss='mse') model.fit( np.random.random((batch, timesteps, dim)), np.random.random((batch, units)), epochs=1, batch_size=10) @tf_test_util.run_v2_only def test_wrapped_rnn_cell(self): # See https://github.com/tensorflow/tensorflow/issues/26581. batch = 20 dim = 5 timesteps = 3 units = 5 merge_mode = 'sum' cell = keras.layers.LSTMCell(units) cell = ResidualWrapper(cell) rnn = keras.layers.RNN(cell) inputs = keras.Input((timesteps, dim)) wrapped = keras.layers.Bidirectional(rnn, merge_mode=merge_mode) outputs = _to_list(wrapped(inputs)) model = keras.Model(inputs, outputs) model.compile(optimizer='rmsprop', loss='mse') model.fit( np.random.random((batch, timesteps, dim)), np.random.random((batch, units)), epochs=1, batch_size=10) def _to_list(ls): if isinstance(ls, list): return ls else: return [ls] if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/wrappers_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for GRU layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class GRULayerTest(keras_parameterized.TestCase): def test_return_sequences_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.GRU, kwargs={'units': units, 'return_sequences': True}, input_shape=(num_samples, timesteps, embedding_dim)) def test_float64_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.GRU, kwargs={'units': units, 'return_sequences': True, 'dtype': 'float64'}, input_shape=(num_samples, timesteps, embedding_dim), input_dtype='float64') def test_dynamic_behavior_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer = keras.layers.GRU(units, input_shape=(None, embedding_dim)) model = keras.models.Sequential() model.add(layer) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.random.random((num_samples, timesteps, embedding_dim)) y = np.random.random((num_samples, units)) model.train_on_batch(x, y) def test_dropout_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.GRU, kwargs={'units': units, 'dropout': 0.1, 'recurrent_dropout': 0.1}, input_shape=(num_samples, timesteps, embedding_dim)) @parameterized.parameters([0, 1, 2]) def test_implementation_mode_GRU(self, implementation_mode): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.GRU, kwargs={'units': units, 'implementation': implementation_mode}, input_shape=(num_samples, timesteps, embedding_dim)) def test_reset_after_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=num_samples, test_samples=0, input_shape=(timesteps, embedding_dim), num_classes=units) y_train = keras.utils.to_categorical(y_train, units) inputs = keras.layers.Input(shape=[timesteps, embedding_dim]) gru_layer = keras.layers.GRU(units, reset_after=True) output = gru_layer(inputs) gru_model = keras.models.Model(inputs, output) gru_model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) gru_model.fit(x_train, y_train) gru_model.predict(x_train) def test_with_masking_layer_GRU(self): layer_class = keras.layers.GRU inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(layer_class(units=5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_statefulness_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer_class = keras.layers.GRU model = keras.models.Sequential() model.add( keras.layers.Embedding( 4, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class( units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile( optimizer='sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) out1 = model.predict(np.ones((num_samples, timesteps))) self.assertEqual(out1.shape, (num_samples, units)) # train once so that the states change model.train_on_batch( np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) # if the state is not reset, output should be different self.assertNotEqual(out1.max(), out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out2.max(), out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) np.testing.assert_allclose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out4.max(), out5.max()) # Check masking layer.reset_states() left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) layer.reset_states() right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) np.testing.assert_allclose(out7, out6, atol=1e-5) @tf_test_util.run_all_in_graph_and_eager_modes class GRULayerGenericTest(test.TestCase): def test_constraints_GRU(self): embedding_dim = 4 layer_class = keras.layers.GRU k_constraint = keras.constraints.max_norm(0.01) r_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_constraint=k_constraint, recurrent_constraint=r_constraint, bias_constraint=b_constraint) layer.build((None, None, embedding_dim)) self.assertEqual(layer.cell.kernel.constraint, k_constraint) self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint) self.assertEqual(layer.cell.bias.constraint, b_constraint) def test_from_config_GRU(self): layer_class = keras.layers.GRU for stateful in (False, True): l1 = layer_class(units=1, stateful=stateful) l2 = layer_class.from_config(l1.get_config()) assert l1.get_config() == l2.get_config() def test_regularizers_GRU(self): embedding_dim = 4 layer_class = keras.layers.GRU layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_regularizer=keras.regularizers.l1(0.01), recurrent_regularizer=keras.regularizers.l1(0.01), bias_regularizer='l2', activity_regularizer='l1') layer.build((None, None, 2)) self.assertEqual(len(layer.losses), 3) x = keras.backend.variable(np.ones((2, 3, 2))) layer(x) if context.executing_eagerly(): self.assertEqual(len(layer.losses), 4) else: self.assertEqual(len(layer.get_losses_for(x)), 1) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/gru_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Pooling layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils import conv_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.util.tf_export import keras_export class Pooling1D(Layer): """Pooling layer for arbitrary pooling functions, for 1D inputs. This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(Pooling1D, self).__init__(name=name, **kwargs) if data_format is None: data_format = backend.image_data_format() if strides is None: strides = pool_size self.pool_function = pool_function self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size') self.strides = conv_utils.normalize_tuple(strides, 1, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=3) def call(self, inputs): pad_axis = 2 if self.data_format == 'channels_last' else 3 inputs = array_ops.expand_dims(inputs, pad_axis) outputs = self.pool_function( inputs, self.pool_size + (1,), strides=self.strides + (1,), padding=self.padding, data_format=self.data_format) return array_ops.squeeze(outputs, pad_axis) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': steps = input_shape[2] features = input_shape[1] else: steps = input_shape[1] features = input_shape[2] length = conv_utils.conv_output_length(steps, self.pool_size[0], self.padding, self.strides[0]) if self.data_format == 'channels_first': return tensor_shape.TensorShape([input_shape[0], features, length]) else: return tensor_shape.TensorShape([input_shape[0], length, features]) def get_config(self): config = { 'strides': self.strides, 'pool_size': self.pool_size, 'padding': self.padding, 'data_format': self.data_format, } base_config = super(Pooling1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.MaxPool1D', 'keras.layers.MaxPooling1D') class MaxPooling1D(Pooling1D): """Max pooling operation for temporal data. Arguments: pool_size: Integer, size of the max pooling windows. strides: Integer, or None. Factor by which to downscale. E.g. 2 will halve the input. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, steps)`. Output shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, downsampled_steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, downsampled_steps)`. """ def __init__(self, pool_size=2, strides=None, padding='valid', data_format='channels_last', **kwargs): super(MaxPooling1D, self).__init__( functools.partial(backend.pool2d, pool_mode='max'), pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) @keras_export('keras.layers.AveragePooling1D', 'keras.layers.AvgPool1D') class AveragePooling1D(Pooling1D): """Average pooling for temporal data. Arguments: pool_size: Integer, size of the average pooling windows. strides: Integer, or None. Factor by which to downscale. E.g. 2 will halve the input. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, steps)`. Output shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, downsampled_steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, downsampled_steps)`. """ def __init__(self, pool_size=2, strides=None, padding='valid', data_format='channels_last', **kwargs): super(AveragePooling1D, self).__init__( functools.partial(backend.pool2d, pool_mode='avg'), pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) class Pooling2D(Layer): """Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images). This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format=None, name=None, **kwargs): super(Pooling2D, self).__init__(name=name, **kwargs) if data_format is None: data_format = backend.image_data_format() if strides is None: strides = pool_size self.pool_function = pool_function self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4) def call(self, inputs): if self.data_format == 'channels_last': pool_shape = (1,) + self.pool_size + (1,) strides = (1,) + self.strides + (1,) else: pool_shape = (1, 1) + self.pool_size strides = (1, 1) + self.strides outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper(), data_format=conv_utils.convert_data_format(self.data_format, 4)) return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] else: rows = input_shape[1] cols = input_shape[2] rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], input_shape[1], rows, cols]) else: return tensor_shape.TensorShape( [input_shape[0], rows, cols, input_shape[3]]) def get_config(self): config = { 'pool_size': self.pool_size, 'padding': self.padding, 'strides': self.strides, 'data_format': self.data_format } base_config = super(Pooling2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.MaxPool2D', 'keras.layers.MaxPooling2D') class MaxPooling2D(Pooling2D): """Max pooling operation for spatial data. Arguments: pool_size: integer or tuple of 2 integers, factors by which to downscale (vertical, horizontal). `(2, 2)` will halve the input in both spatial dimension. If only one integer is specified, the same window length will be used for both dimensions. strides: Integer, tuple of 2 integers, or None. Strides values. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`. """ def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs): super(MaxPooling2D, self).__init__( nn.max_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) @keras_export('keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D') class AveragePooling2D(Pooling2D): """Average pooling operation for spatial data. Arguments: pool_size: integer or tuple of 2 integers, factors by which to downscale (vertical, horizontal). `(2, 2)` will halve the input in both spatial dimension. If only one integer is specified, the same window length will be used for both dimensions. strides: Integer, tuple of 2 integers, or None. Strides values. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`. """ def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs): super(AveragePooling2D, self).__init__( nn.avg_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) class Pooling3D(Layer): """Pooling layer for arbitrary pooling functions, for 3D inputs. This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(Pooling3D, self).__init__(name=name, **kwargs) if data_format is None: data_format = backend.image_data_format() if strides is None: strides = pool_size self.pool_function = pool_function self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size') self.strides = conv_utils.normalize_tuple(strides, 3, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=5) def call(self, inputs): pool_shape = (1,) + self.pool_size + (1,) strides = (1,) + self.strides + (1,) if self.data_format == 'channels_first': # TF does not support `channels_first` with 3D pooling operations, # so we must handle this case manually. # TODO(fchollet): remove this when TF pooling is feature-complete. inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1)) outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper()) if self.data_format == 'channels_first': outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3)) return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': len_dim1 = input_shape[2] len_dim2 = input_shape[3] len_dim3 = input_shape[4] else: len_dim1 = input_shape[1] len_dim2 = input_shape[2] len_dim3 = input_shape[3] len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0], self.padding, self.strides[0]) len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1], self.padding, self.strides[1]) len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2], self.padding, self.strides[2]) if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3]) else: return tensor_shape.TensorShape( [input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]]) def get_config(self): config = { 'pool_size': self.pool_size, 'padding': self.padding, 'strides': self.strides, 'data_format': self.data_format } base_config = super(Pooling3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.MaxPool3D', 'keras.layers.MaxPooling3D') class MaxPooling3D(Pooling3D): """Max pooling operation for 3D data (spatial or spatio-temporal). Arguments: pool_size: Tuple of 3 integers, factors by which to downscale (dim1, dim2, dim3). `(2, 2, 2)` will halve the size of the 3D input in each dimension. strides: tuple of 3 integers, or None. Strides values. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)` """ def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None, **kwargs): super(MaxPooling3D, self).__init__( nn.max_pool3d, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) @keras_export('keras.layers.AveragePooling3D', 'keras.layers.AvgPool3D') class AveragePooling3D(Pooling3D): """Average pooling operation for 3D data (spatial or spatio-temporal). Arguments: pool_size: tuple of 3 integers, factors by which to downscale (dim1, dim2, dim3). `(2, 2, 2)` will halve the size of the 3D input in each dimension. strides: tuple of 3 integers, or None. Strides values. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)` """ def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None, **kwargs): super(AveragePooling3D, self).__init__( nn.avg_pool3d, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) class GlobalPooling1D(Layer): """Abstract class for different global pooling 1D layers.""" def __init__(self, data_format='channels_last', **kwargs): super(GlobalPooling1D, self).__init__(**kwargs) self.input_spec = InputSpec(ndim=3) self.data_format = conv_utils.normalize_data_format(data_format) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': return tensor_shape.TensorShape([input_shape[0], input_shape[1]]) else: return tensor_shape.TensorShape([input_shape[0], input_shape[2]]) def call(self, inputs): raise NotImplementedError def get_config(self): config = {'data_format': self.data_format} base_config = super(GlobalPooling1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.GlobalAveragePooling1D', 'keras.layers.GlobalAvgPool1D') class GlobalAveragePooling1D(GlobalPooling1D): """Global average pooling operation for temporal data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(batch_size, steps)` indicating whether a given step should be masked (excluded from the average). Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: 2D tensor with shape `(batch_size, features)`. """ def __init__(self, data_format='channels_last', **kwargs): super(GlobalAveragePooling1D, self).__init__(data_format=data_format, **kwargs) self.supports_masking = True def call(self, inputs, mask=None): steps_axis = 1 if self.data_format == 'channels_last' else 2 if mask is not None: mask = math_ops.cast(mask, backend.floatx()) input_shape = inputs.shape.as_list() broadcast_shape = [-1, input_shape[steps_axis], 1] mask = array_ops.reshape(mask, broadcast_shape) inputs *= mask return backend.sum(inputs, axis=steps_axis) / math_ops.reduce_sum( mask, axis=steps_axis) else: return backend.mean(inputs, axis=steps_axis) def compute_mask(self, inputs, mask=None): return None @keras_export('keras.layers.GlobalMaxPool1D', 'keras.layers.GlobalMaxPooling1D') class GlobalMaxPooling1D(GlobalPooling1D): """Global max pooling operation for temporal data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: 2D tensor with shape `(batch_size, features)`. """ def call(self, inputs): steps_axis = 1 if self.data_format == 'channels_last' else 2 return backend.max(inputs, axis=steps_axis) class GlobalPooling2D(Layer): """Abstract class for different global pooling 2D layers. """ def __init__(self, data_format=None, **kwargs): super(GlobalPooling2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': return tensor_shape.TensorShape([input_shape[0], input_shape[3]]) else: return tensor_shape.TensorShape([input_shape[0], input_shape[1]]) def call(self, inputs): raise NotImplementedError def get_config(self): config = {'data_format': self.data_format} base_config = super(GlobalPooling2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.GlobalAveragePooling2D', 'keras.layers.GlobalAvgPool2D') class GlobalAveragePooling2D(GlobalPooling2D): """Global average pooling operation for spatial data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: 2D tensor with shape `(batch_size, channels)`. """ def call(self, inputs): if self.data_format == 'channels_last': return backend.mean(inputs, axis=[1, 2]) else: return backend.mean(inputs, axis=[2, 3]) @keras_export('keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPooling2D') class GlobalMaxPooling2D(GlobalPooling2D): """Global max pooling operation for spatial data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: 2D tensor with shape `(batch_size, channels)`. """ def call(self, inputs): if self.data_format == 'channels_last': return backend.max(inputs, axis=[1, 2]) else: return backend.max(inputs, axis=[2, 3]) class GlobalPooling3D(Layer): """Abstract class for different global pooling 3D layers.""" def __init__(self, data_format=None, **kwargs): super(GlobalPooling3D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=5) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': return tensor_shape.TensorShape([input_shape[0], input_shape[4]]) else: return tensor_shape.TensorShape([input_shape[0], input_shape[1]]) def call(self, inputs): raise NotImplementedError def get_config(self): config = {'data_format': self.data_format} base_config = super(GlobalPooling3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.GlobalAveragePooling3D', 'keras.layers.GlobalAvgPool3D') class GlobalAveragePooling3D(GlobalPooling3D): """Global Average pooling operation for 3D data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: 2D tensor with shape `(batch_size, channels)`. """ def call(self, inputs): if self.data_format == 'channels_last': return backend.mean(inputs, axis=[1, 2, 3]) else: return backend.mean(inputs, axis=[2, 3, 4]) @keras_export('keras.layers.GlobalMaxPool3D', 'keras.layers.GlobalMaxPooling3D') class GlobalMaxPooling3D(GlobalPooling3D): """Global Max pooling operation for 3D data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: 2D tensor with shape `(batch_size, channels)`. """ def call(self, inputs): if self.data_format == 'channels_last': return backend.max(inputs, axis=[1, 2, 3]) else: return backend.max(inputs, axis=[2, 3, 4]) # Aliases AvgPool1D = AveragePooling1D MaxPool1D = MaxPooling1D AvgPool2D = AveragePooling2D MaxPool2D = MaxPooling2D AvgPool3D = AveragePooling3D MaxPool3D = MaxPooling3D GlobalMaxPool1D = GlobalMaxPooling1D GlobalMaxPool2D = GlobalMaxPooling2D GlobalMaxPool3D = GlobalMaxPooling3D GlobalAvgPool1D = GlobalAveragePooling1D GlobalAvgPool2D = GlobalAveragePooling2D GlobalAvgPool3D = GlobalAveragePooling3D
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/pooling.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for V2 LSTM layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import time from absl.testing import parameterized import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import keras from tensorflow.python.client import session as session_lib from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers import recurrent as rnn_v1 from tensorflow.python.keras.layers import recurrent_v2 as rnn from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import gradient_descent from tensorflow.python.util import nest # Global config for grappler setting that is used for graph mode test. _rewrites = rewriter_config_pb2.RewriterConfig() _rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON _rewrites.min_graph_nodes = -1 _graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites) _config = config_pb2.ConfigProto(graph_options=_graph_options) @keras_parameterized.run_all_keras_modes(config=_config) class LSTMV2Test(keras_parameterized.TestCase): @parameterized.named_parameters( ('non_tan_activation', 'relu', 'sigmoid', 0, False, True), ('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True), ('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True), ('unroll', 'tanh', 'sigmoid', 0, True, True), ('not_use_bias', 'tanh', 'sigmoid', 0, False, False), ) def test_could_use_defun_backend(self, activation, recurrent_activation, recurrent_dropout, unroll, use_bias): layer = rnn.LSTM( 1, activation=activation, recurrent_activation=recurrent_activation, recurrent_dropout=recurrent_dropout, unroll=unroll, use_bias=use_bias) self.assertFalse(layer.could_use_cudnn) def test_static_shape_inference_LSTM(self): # Github issue: 15165 timesteps = 3 embedding_dim = 4 units = 2 model = keras.models.Sequential() inputs = keras.layers.Dense( embedding_dim, input_shape=(timesteps, embedding_dim)) model.add(inputs) layer = rnn.LSTM(units, return_sequences=True) model.add(layer) outputs = model.layers[-1].output self.assertEqual(outputs.shape.as_list(), [None, timesteps, units]) def test_dynamic_behavior_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer = rnn.LSTM(units, input_shape=(None, embedding_dim)) model = keras.models.Sequential() model.add(layer) model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse') x = np.random.random((num_samples, timesteps, embedding_dim)) y = np.random.random((num_samples, units)) model.train_on_batch(x, y) def test_stacking_LSTM(self): inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(rnn.LSTM(10, return_sequences=True, unroll=False)) model.add(rnn.LSTM(5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_from_config_LSTM(self): layer_class = rnn.LSTM for stateful in (False, True): l1 = layer_class(units=1, stateful=stateful) l2 = layer_class.from_config(l1.get_config()) assert l1.get_config() == l2.get_config() def test_specify_initial_state_keras_tensor(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 # Test with Keras tensor inputs = keras.Input((timesteps, embedding_dim)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = rnn.LSTM(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) self.assertTrue( any(initial_state[0] is t for t in layer._inbound_nodes[0].input_tensors)) model = keras.models.Model([inputs] + initial_state, output) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [ np.random.random((num_samples, units)) for _ in range(num_states) ] targets = np.random.random((num_samples, units)) model.train_on_batch([inputs] + initial_state, targets) def test_specify_initial_state_non_keras_tensor(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 # Test with non-Keras tensor inputs = keras.Input((timesteps, embedding_dim)) initial_state = [ keras.backend.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states) ] layer = rnn.LSTM(units) output = layer(inputs, initial_state=initial_state) model = keras.models.Model(inputs, output) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.train_on_batch(inputs, targets) def test_reset_states_with_values(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 layer = rnn.LSTM(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) initial_weight_count = len(layer.weights) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None self.assertAllClose( keras.backend.eval(layer.states[0]), np.zeros(keras.backend.int_shape(layer.states[0])), atol=1e-4) state_shapes = [keras.backend.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) self.assertAllClose( keras.backend.eval(layer.states[0]), np.ones(keras.backend.int_shape(layer.states[0])), atol=1e-4) # Test with invalid data with self.assertRaises(ValueError): layer.reset_states([1] * (len(layer.states) + 1)) self.assertEqual(initial_weight_count, len(layer.weights)) # Variables in "states" shouldn't show up in .weights layer.states = nest.map_structure(variables.Variable, values) layer.reset_states() self.assertEqual(initial_weight_count, len(layer.weights)) def test_specify_state_with_masking(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 inputs = keras.Input((timesteps, embedding_dim)) _ = keras.layers.Masking()(inputs) initial_state = [keras.Input((units,)) for _ in range(num_states)] output = rnn.LSTM(units)( inputs, initial_state=initial_state) model = keras.models.Model([inputs] + initial_state, output) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [ np.random.random((num_samples, units)) for _ in range(num_states) ] targets = np.random.random((num_samples, units)) model.train_on_batch([inputs] + initial_state, targets) def test_return_state(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim)) masked = keras.layers.Masking()(inputs) layer = rnn.LSTM(units, return_state=True, stateful=True) outputs = layer(masked) state = outputs[1:] assert len(state) == num_states model = keras.models.Model(inputs, state[0]) inputs = np.random.random((num_samples, timesteps, embedding_dim)) state = model.predict(inputs) self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4) def test_state_reuse(self): timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = rnn.LSTM( units, return_state=True, return_sequences=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] output = rnn.LSTM(units)(output, initial_state=state) model = keras.models.Model(inputs, output) inputs = np.random.random((num_samples, timesteps, embedding_dim)) model.predict(inputs) def test_initial_states_as_other_inputs(self): timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 num_states = 2 layer_class = rnn.LSTM # Test with Keras tensor main_inputs = keras.Input((timesteps, embedding_dim)) initial_state = [keras.Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) self.assertTrue( any(initial_state[0] is t for t in layer._inbound_nodes[0].input_tensors)) model = keras.models.Model(inputs, output) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [ np.random.random((num_samples, units)) for _ in range(num_states) ] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets) @test_util.run_v2_only def test_lstm_v2_feature_parity_with_canonical_lstm(self): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 20 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=rnn_state_size, random_seed=random_seed.DEFAULT_GRAPH_SEED) y_train = keras.utils.to_categorical(y_train, rnn_state_size) # For the last batch item of the test data, we filter out the last # timestep to simulate the variable length sequence and masking test. x_train[-2:, -1, :] = 0.0 y_train[-2:] = 0 inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) masked_input = keras.layers.Masking()(inputs) lstm_layer = rnn_v1.LSTM(rnn_state_size, recurrent_activation='sigmoid') output = lstm_layer(masked_input) lstm_model = keras.models.Model(inputs, output) weights = lstm_model.get_weights() y_1 = lstm_model.predict(x_train) lstm_model.compile('rmsprop', 'mse') lstm_model.fit(x_train, y_train) y_2 = lstm_model.predict(x_train) with test_util.device(use_gpu=True): cudnn_layer = rnn.LSTM(rnn_state_size) cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input)) cudnn_model.set_weights(weights) y_3 = cudnn_model.predict(x_train) cudnn_model.compile('rmsprop', 'mse') cudnn_model.fit(x_train, y_train) y_4 = cudnn_model.predict(x_train) self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5) self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5) @parameterized.named_parameters(('v0', 0), ('v1', 1), ('v2', 2)) def test_implementation_mode_LSTM(self, implementation_mode): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.LSTM, kwargs={ 'units': units, 'implementation': implementation_mode }, input_shape=(num_samples, timesteps, embedding_dim)) layer_class = rnn.LSTM k_constraint = keras.constraints.max_norm(0.01) r_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_constraint=k_constraint, recurrent_constraint=r_constraint, bias_constraint=b_constraint) layer.build((None, None, embedding_dim)) self.assertEqual(layer.cell.kernel.constraint, k_constraint) self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint) self.assertEqual(layer.cell.bias.constraint, b_constraint) layer_class = rnn.LSTM inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(layer_class(units=5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_masking_with_stacking_LSTM(self): inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(rnn.LSTM(10, return_sequences=True, unroll=False)) model.add(rnn.LSTM(5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) @parameterized.named_parameters( # test_name, time_major, go_backwards ('normal', False, False), ('time_major', True, False), ('go_backwards', False, True), ('both', True, True), ) def test_time_major_and_go_backward(self, time_major, go_backwards): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 100 x_train = np.random.random((batch, timestep, input_shape)) def build_model(layer_cls): inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) layer = layer_cls(rnn_state_size, recurrent_activation='sigmoid', time_major=time_major, return_sequences=True, go_backwards=go_backwards) if time_major: converted_input = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs) outputs = layer(converted_input) outputs = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs) else: outputs = layer(inputs) return keras.models.Model(inputs, outputs) lstm_model = build_model(rnn_v1.LSTM) y_ref = lstm_model.predict(x_train) weights = lstm_model.get_weights() lstm_v2_model = build_model(rnn.LSTM) lstm_v2_model.set_weights(weights) y = lstm_v2_model.predict(x_train) self.assertAllClose(y, y_ref) input_shape = 10 rnn_state_size = 8 output_shape = 8 timestep = 4 batch = 100 epoch = 10 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = keras.utils.to_categorical(y_train, output_shape) layer = rnn.LSTM(rnn_state_size) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) outputs = layer(inputs) model = keras.models.Model(inputs, outputs) model.compile('rmsprop', loss='mse') model.fit(x_train, y_train, epochs=epoch) model.evaluate(x_train, y_train) model.predict(x_train) @parameterized.named_parameters( # test_name, use_bias, bias_initializer, activation ('normal', True, 'zeros'), ('no_bias', False, 'zeros'), ('random_bias', True, 'random_uniform'), ) def test_lstm_model_save_load(self, use_bias, bias_initializer): temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir) h5_path = os.path.join(temp_dir, 'test.h5') batch = 10 timestep = 3 input_dim = 5 units = 2 x = np.random.random((batch, timestep, input_dim)) def build_model(): inputs = keras.layers.Input( shape=[timestep, input_dim], dtype=dtypes.float32) layer = rnn.LSTM( units, use_bias=use_bias, bias_initializer=bias_initializer) output = layer(inputs) return keras.models.Model(inputs, output), layer model, layer = build_model() y_ref = model.predict(x) model.save_weights(h5_path) cloned_model, new_layer = build_model() cloned_model.load_weights(h5_path) y = cloned_model.predict(x) self.assertAllClose(y, y_ref) self.assertAllClose(layer.get_weights(), new_layer.get_weights()) def test_lstm_output_on_multiple_kernel(self): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 100 x_train = np.random.random((batch, timestep, input_shape)) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) with test_util.device(use_gpu=False): layer = rnn.LSTM(rnn_state_size) output = layer(inputs) cpu_model = keras.models.Model(inputs, output) weights = cpu_model.get_weights() y_1 = cpu_model.predict(x_train) with test_util.device(use_gpu=True): layer = rnn.LSTM(rnn_state_size) output = layer(inputs) gpu_model = keras.models.Model(inputs, output) gpu_model.set_weights(weights) y_2 = gpu_model.predict(x_train) # Note that CuDNN uses 'sigmoid' as activation, so the LSTM V2 uses # 'sigmoid' as default. Construct the canonical LSTM with sigmoid to achieve # the same output. with test_util.device(use_gpu=True): layer = rnn_v1.LSTM(rnn_state_size, recurrent_activation='sigmoid') output = layer(inputs) canonical_model = keras.models.Model(inputs, output) # Remove the extra cudnn bias since canonical lstm will not use it. canonical_model.set_weights(weights[:3]) y_3 = canonical_model.predict(x_train) self.assertAllClose(y_1, y_2) self.assertAllClose(y_2, y_3) def test_return_sequences_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.LSTM, kwargs={ 'units': units, 'return_sequences': True }, input_shape=(num_samples, timesteps, embedding_dim)) def test_float64_LSTM(self): if test.is_built_with_rocm: self.skipTest("Double type is yet not supported in ROCm") num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.LSTM, kwargs={ 'units': units, 'return_sequences': True, 'dtype': 'float64' }, input_shape=(num_samples, timesteps, embedding_dim), input_dtype='float64') def test_regularizers_LSTM(self): embedding_dim = 4 layer_class = rnn.LSTM layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_regularizer=keras.regularizers.l1(0.01), recurrent_regularizer=keras.regularizers.l1(0.01), bias_regularizer='l2', activity_regularizer='l1') layer.build((None, None, 2)) self.assertEqual(len(layer.losses), 3) x = keras.backend.variable(np.ones((2, 3, 2))) layer(x) if context.executing_eagerly(): self.assertEqual(len(layer.losses), 4) else: self.assertEqual(len(layer.get_losses_for(x)), 1) def test_statefulness_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer_class = rnn.LSTM model = keras.models.Sequential() model.add( keras.layers.Embedding( 4, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class( units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile( optimizer=gradient_descent.GradientDescentOptimizer(0.01), loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) out1 = model.predict(np.ones((num_samples, timesteps))) self.assertEqual(out1.shape, (num_samples, units)) # train once so that the states change model.train_on_batch( np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) # if the state is not reset, output should be different self.assertNotEqual(out1.max(), out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out2.max(), out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) self.assertAllClose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out4.max(), out5.max()) # Check masking layer.reset_states() left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) layer.reset_states() right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) layer.reset_states() mix_padded_input = np.ones((num_samples, timesteps)) mix_padded_input[0, 1] = 0 mix_padded_input[1, 0] = 0 mix_padded_input[1, 2] = 0 out8 = model.predict(mix_padded_input) self.assertAllClose(out7, out6, atol=1e-5) self.assertAllClose(out8, out7, atol=1e-5) def test_stateful_LSTM_training(self): # See b/123587692 for more context. vocab_size = 20 embedding_dim = 10 batch_size = 8 timestep = 12 units = 5 x = np.random.randint(0, vocab_size, size=(batch_size, timestep)) y = np.random.randint(0, vocab_size, size=(batch_size, timestep)) model = keras.Sequential([ keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, timestep]), rnn.LSTM(units, return_sequences=True, stateful=True), keras.layers.Dense(vocab_size) ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, shuffle=False) def test_dropout_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.LSTM, kwargs={ 'units': units, 'dropout': 0.1, 'recurrent_dropout': 0.1 }, input_shape=(num_samples, timesteps, embedding_dim)) def test_bidirectional(self): batch = 128 timestep = 20 vocab_size = 1000 model = keras.Sequential([ keras.layers.Embedding(vocab_size, 64), keras.layers.Bidirectional(rnn.LSTM( 64, return_sequences=True)), keras.layers.Bidirectional(rnn.LSTM(32)), keras.layers.Dense(64, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) x = np.random.randint(0, vocab_size, size=(batch, timestep)) y = np.random.randint(0, 1, size=(batch)) model.fit(x, y, epochs=1, shuffle=False) model.evaluate(x, y) model.predict(x) @test_util.run_v2_only def test_explicit_device_with_go_backward_and_mask(self): batch_size = 8 timestep = 7 masksteps = 5 units = 4 inputs = np.random.randn(batch_size, timestep, units).astype(np.float32) mask = np.ones((batch_size, timestep)).astype(np.bool) mask[:, masksteps:] = 0 # Test for V1 behavior. lstm_v1 = rnn_v1.LSTM(units, return_sequences=True, go_backwards=True) with test_util.device(use_gpu=True): outputs_masked_v1 = lstm_v1(inputs, mask=constant_op.constant(mask)) outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps]) self.assertAllClose(outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1) # Test for V2 behavior. lstm = rnn.LSTM(units, return_sequences=True, go_backwards=True) with test_util.device(use_gpu=True): outputs_masked = lstm(inputs, mask=constant_op.constant(mask)) outputs_trimmed = lstm(inputs[:, :masksteps]) self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed) @test_util.run_deprecated_v1 def test_v1_session_behavior(self): # See b/139132348 for more details. x = np.random.uniform(size=(100, 4, 8)) y = np.random.uniform(size=(100, 1)) dataset = dataset_ops.Dataset.from_tensor_slices( (x, y)).shuffle(100).batch(32) inp = keras.layers.Input(shape=(4, 8)) layer = rnn.LSTM(1)(inp) layer = keras.layers.Dense(1)(layer) model = keras.models.Model(inp, layer) model.compile(loss='mse', optimizer='sgd') model.fit(dataset) @keras_parameterized.run_all_keras_modes(config=_config) class LSTMGraphRewriteTest(keras_parameterized.TestCase): input_shape = 10 output_shape = 8 rnn_state_size = 8 timestep = 4 batch = 100 epoch = 1 def _test_runtime_with_model(self, model): (x_train, y_train), _ = testing_utils.get_test_data( train_samples=self.batch, test_samples=0, input_shape=(self.timestep, self.input_shape), num_classes=self.output_shape) y_train = keras.utils.to_categorical(y_train, self.output_shape) model.compile( optimizer='sgd', loss=['categorical_crossentropy', None], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) existing_loss = 0 for _ in range(self.epoch): history = model.fit(x_train, y_train) loss_value = history.history['loss'][0] self.assertNotEqual(existing_loss, loss_value) existing_loss = loss_value _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) @test_util.run_v2_only def test_LSTM_runtime(self): layer = rnn.LSTM(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) outputs, runtime = layer(inputs) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) self._test_runtime_with_model(model) @test_util.run_v2_only def test_LSTM_runtime_with_mask(self): # Masking will affect which backend is selected based on whether the mask # is strictly right padded. layer = rnn.LSTM(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) masked_inputs = keras.layers.Masking()(inputs) outputs, runtime = layer(masked_inputs) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=self.batch, test_samples=0, input_shape=(self.timestep, self.input_shape), num_classes=self.output_shape) y_train = keras.utils.to_categorical(y_train, self.output_shape) model.compile( optimizer='sgd', loss=['categorical_crossentropy', None], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x_train, y_train) # Verify unpadded data. _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) # Update x/y to be right padded by setting the last timestep to 0 x_train[:, -1, :] = 0 y_train[:, -1] = 0 _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) # Further update x/y to be mix padded (masks in the middle), and verify # only cpu kernel can be selected. x_train[:, -3, :] = 0 y_train[:, -3] = 0 _, runtime_value = model.predict(x_train) self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) @test_util.run_v2_only def test_LSTM_runtime_with_cond(self): # This test is to demonstrate the graph rewrite of grappler plugin under # the condition that the function returns different number of internal # states. layer = rnn.LSTM(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) zeros = array_ops.zeros([self.batch, self.output_shape]) dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN) a = constant_op.constant(0) b = constant_op.constant(1) # Will always run the lstm layer. outputs, runtime = control_flow_ops.cond( gen_math_ops.less(a, b), lambda: layer(inputs), lambda: (zeros, dummy_runtime)) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) self._test_runtime_with_model(model) class LSTMPerformanceTest(test.Benchmark): def _measure_performance(self, test_config, model, x_train, y_train): batch = test_config['batch'] epoch = test_config['epoch'] warmup_epoch = test_config['warmup_epoch'] # warm up the model model.fit(x_train, y_train, batch_size=batch, epochs=warmup_epoch) start_time = time.time() model.fit(x_train, y_train, batch_size=batch, epochs=epoch - warmup_epoch) end_time = time.time() return (end_time - start_time) / (epoch - warmup_epoch) def _time_performance_run_cudnn_lstm(self, test_config, x_train, y_train): # Get the performance number for standard Cudnn LSTM input_shape = test_config['input_shape'] rnn_state_size = test_config['rnn_state_size'] timestep = test_config['timestep'] cudnn_lstm_layer = keras.layers.CuDNNLSTM(rnn_state_size) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) outputs = cudnn_lstm_layer(inputs) model = keras.models.Model(inputs, outputs) model.compile('sgd', 'mse') sec_per_epoch = self._measure_performance( test_config, model, x_train, y_train) logging.info('Average performance for %s per epoch is: %s', 'CuDNN LSTM', sec_per_epoch) return sec_per_epoch def _time_performance_run_unifed_lstm_gpu( self, test_config, x_train, y_train): # Get performance number for lstm_v2 with grappler swap the impl input_shape = test_config['input_shape'] rnn_state_size = test_config['rnn_state_size'] timestep = test_config['timestep'] layer = rnn.LSTM(rnn_state_size) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) outputs = layer(inputs) model = keras.models.Model(inputs, outputs) model.compile('sgd', 'mse') sec_per_epoch = self._measure_performance( test_config, model, x_train, y_train) logging.info('Average performance for %s per epoch is: %s', 'LSTM V2', sec_per_epoch) return sec_per_epoch def _time_performance_run_normal_lstm( self, test_config, x_train, y_train): # Get performance number for standard LSTM on GPU. input_shape = test_config['input_shape'] rnn_state_size = test_config['rnn_state_size'] timestep = test_config['timestep'] layer = rnn_v1.LSTM(rnn_state_size) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) outputs = layer(inputs) model = keras.models.Model(inputs, outputs) model.compile('sgd', 'mse') sec_per_epoch = self._measure_performance( test_config, model, x_train, y_train) logging.info('Average performance for %s per epoch is: %s', 'Normal LSTM', sec_per_epoch) return sec_per_epoch def _benchmark_performance_with_standard_cudnn_impl(self): if not test.is_gpu_available(): self.skipTest('performance test will only run on GPU') mode = 'eager' if context.executing_eagerly() else 'graph' batch = 64 num_batch = 10 test_config = { 'input_shape': 128, 'rnn_state_size': 64, 'output_shape': 64, 'timestep': 50, 'batch': batch, 'epoch': 20, # The performance for warmup epoch is ignored. 'warmup_epoch': 1, } (x_train, y_train), _ = testing_utils.get_test_data( train_samples=(batch * num_batch), test_samples=0, input_shape=(test_config['timestep'], test_config['input_shape']), num_classes=test_config['output_shape']) y_train = keras.utils.to_categorical(y_train, test_config['output_shape']) cudnn_sec_per_epoch = self._time_performance_run_cudnn_lstm( test_config, x_train, y_train) lstm_v2_sec_per_epoch = self._time_performance_run_unifed_lstm_gpu( test_config, x_train, y_train) normal_lstm_sec_per_epoch = self._time_performance_run_normal_lstm( test_config, x_train, y_train) cudnn_vs_v2 = cudnn_sec_per_epoch / lstm_v2_sec_per_epoch v2_vs_normal = normal_lstm_sec_per_epoch / lstm_v2_sec_per_epoch self.report_benchmark(name='keras_cudnn_lstm_' + mode, wall_time=cudnn_sec_per_epoch, iters=test_config['epoch'], extras=test_config) self.report_benchmark(name='keras_lstm_v2_' + mode, wall_time=lstm_v2_sec_per_epoch, iters=test_config['epoch'], extras=test_config) self.report_benchmark(name='keras_canonical_lstm_' + mode, wall_time=normal_lstm_sec_per_epoch, iters=test_config['epoch'], extras=test_config) logging.info('Expect the performance of LSTM V2 is within 80% of ' 'CuDNN LSTM, got {0:.2f}%'.format(cudnn_vs_v2 * 100)) logging.info('Expect the performance of LSTM V2 is more than 5 times' ' of normal LSTM, got {0:.2f}'.format(v2_vs_normal)) def benchmark_performance_graph(self): with context.graph_mode(), session_lib.Session(config=_config): self._benchmark_performance_with_standard_cudnn_impl() def benchmark_performance_eager(self): with context.eager_mode(): self._benchmark_performance_with_standard_cudnn_impl() if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/lstm_v2_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras layers API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Generic layers. # pylint: disable=g-bad-import-order from tensorflow.python.keras.engine.input_layer import Input from tensorflow.python.keras.engine.input_layer import InputLayer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.engine.base_layer import Layer # Advanced activations. from tensorflow.python.keras.layers.advanced_activations import LeakyReLU from tensorflow.python.keras.layers.advanced_activations import PReLU from tensorflow.python.keras.layers.advanced_activations import ELU from tensorflow.python.keras.layers.advanced_activations import ReLU from tensorflow.python.keras.layers.advanced_activations import ThresholdedReLU from tensorflow.python.keras.layers.advanced_activations import Softmax # Convolution layers. from tensorflow.python.keras.layers.convolutional import Conv1D from tensorflow.python.keras.layers.convolutional import Conv2D from tensorflow.python.keras.layers.convolutional import Conv3D from tensorflow.python.keras.layers.convolutional import Conv2DTranspose from tensorflow.python.keras.layers.convolutional import Conv3DTranspose from tensorflow.python.keras.layers.convolutional import SeparableConv1D from tensorflow.python.keras.layers.convolutional import SeparableConv2D # Convolution layer aliases. from tensorflow.python.keras.layers.convolutional import Convolution1D from tensorflow.python.keras.layers.convolutional import Convolution2D from tensorflow.python.keras.layers.convolutional import Convolution3D from tensorflow.python.keras.layers.convolutional import Convolution2DTranspose from tensorflow.python.keras.layers.convolutional import Convolution3DTranspose from tensorflow.python.keras.layers.convolutional import SeparableConvolution1D from tensorflow.python.keras.layers.convolutional import SeparableConvolution2D from tensorflow.python.keras.layers.convolutional import DepthwiseConv2D # Image processing layers. from tensorflow.python.keras.layers.convolutional import UpSampling1D from tensorflow.python.keras.layers.convolutional import UpSampling2D from tensorflow.python.keras.layers.convolutional import UpSampling3D from tensorflow.python.keras.layers.convolutional import ZeroPadding1D from tensorflow.python.keras.layers.convolutional import ZeroPadding2D from tensorflow.python.keras.layers.convolutional import ZeroPadding3D from tensorflow.python.keras.layers.convolutional import Cropping1D from tensorflow.python.keras.layers.convolutional import Cropping2D from tensorflow.python.keras.layers.convolutional import Cropping3D # Core layers. from tensorflow.python.keras.layers.core import Masking from tensorflow.python.keras.layers.core import Dropout from tensorflow.python.keras.layers.core import SpatialDropout1D from tensorflow.python.keras.layers.core import SpatialDropout2D from tensorflow.python.keras.layers.core import SpatialDropout3D from tensorflow.python.keras.layers.core import Activation from tensorflow.python.keras.layers.core import Reshape from tensorflow.python.keras.layers.core import Permute from tensorflow.python.keras.layers.core import Flatten from tensorflow.python.keras.layers.core import RepeatVector from tensorflow.python.keras.layers.core import Lambda from tensorflow.python.keras.layers.core import Dense from tensorflow.python.keras.layers.core import ActivityRegularization # Dense Attention layers. from tensorflow.python.keras.layers.dense_attention import AdditiveAttention from tensorflow.python.keras.layers.dense_attention import Attention # Embedding layers. from tensorflow.python.keras.layers.embeddings import Embedding # Locally-connected layers. from tensorflow.python.keras.layers.local import LocallyConnected1D from tensorflow.python.keras.layers.local import LocallyConnected2D # Merge layers. from tensorflow.python.keras.layers.merge import Add from tensorflow.python.keras.layers.merge import Subtract from tensorflow.python.keras.layers.merge import Multiply from tensorflow.python.keras.layers.merge import Average from tensorflow.python.keras.layers.merge import Maximum from tensorflow.python.keras.layers.merge import Minimum from tensorflow.python.keras.layers.merge import Concatenate from tensorflow.python.keras.layers.merge import Dot from tensorflow.python.keras.layers.merge import add from tensorflow.python.keras.layers.merge import subtract from tensorflow.python.keras.layers.merge import multiply from tensorflow.python.keras.layers.merge import average from tensorflow.python.keras.layers.merge import maximum from tensorflow.python.keras.layers.merge import minimum from tensorflow.python.keras.layers.merge import concatenate from tensorflow.python.keras.layers.merge import dot # Noise layers. from tensorflow.python.keras.layers.noise import AlphaDropout from tensorflow.python.keras.layers.noise import GaussianNoise from tensorflow.python.keras.layers.noise import GaussianDropout # Normalization layers. from tensorflow.python.keras.layers.normalization import LayerNormalization from tensorflow.python.keras.layers.normalization import BatchNormalization from tensorflow.python.keras.layers.normalization_v2 import BatchNormalization as BatchNormalizationV2 # Kernelized layers. from tensorflow.python.keras.layers.kernelized import RandomFourierFeatures # Pooling layers. from tensorflow.python.keras.layers.pooling import MaxPooling1D from tensorflow.python.keras.layers.pooling import MaxPooling2D from tensorflow.python.keras.layers.pooling import MaxPooling3D from tensorflow.python.keras.layers.pooling import AveragePooling1D from tensorflow.python.keras.layers.pooling import AveragePooling2D from tensorflow.python.keras.layers.pooling import AveragePooling3D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling1D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D from tensorflow.python.keras.layers.pooling import GlobalAveragePooling3D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling1D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling2D from tensorflow.python.keras.layers.pooling import GlobalMaxPooling3D # Pooling layer aliases. from tensorflow.python.keras.layers.pooling import MaxPool1D from tensorflow.python.keras.layers.pooling import MaxPool2D from tensorflow.python.keras.layers.pooling import MaxPool3D from tensorflow.python.keras.layers.pooling import AvgPool1D from tensorflow.python.keras.layers.pooling import AvgPool2D from tensorflow.python.keras.layers.pooling import AvgPool3D from tensorflow.python.keras.layers.pooling import GlobalAvgPool1D from tensorflow.python.keras.layers.pooling import GlobalAvgPool2D from tensorflow.python.keras.layers.pooling import GlobalAvgPool3D from tensorflow.python.keras.layers.pooling import GlobalMaxPool1D from tensorflow.python.keras.layers.pooling import GlobalMaxPool2D from tensorflow.python.keras.layers.pooling import GlobalMaxPool3D # Recurrent layers. from tensorflow.python.keras.layers.recurrent import RNN from tensorflow.python.keras.layers.recurrent import AbstractRNNCell from tensorflow.python.keras.layers.recurrent import StackedRNNCells from tensorflow.python.keras.layers.recurrent import SimpleRNNCell from tensorflow.python.keras.layers.recurrent import PeepholeLSTMCell from tensorflow.python.keras.layers.recurrent import SimpleRNN from tensorflow.python.keras.layers.recurrent import GRU from tensorflow.python.keras.layers.recurrent import GRUCell from tensorflow.python.keras.layers.recurrent import LSTM from tensorflow.python.keras.layers.recurrent import LSTMCell from tensorflow.python.keras.layers.recurrent_v2 import GRU as GRU_v2 from tensorflow.python.keras.layers.recurrent_v2 import GRUCell as GRUCell_v2 from tensorflow.python.keras.layers.recurrent_v2 import LSTM as LSTM_v2 from tensorflow.python.keras.layers.recurrent_v2 import LSTMCell as LSTMCell_v2 # Convolutional-recurrent layers. from tensorflow.python.keras.layers.convolutional_recurrent import ConvLSTM2D # CuDNN recurrent layers. from tensorflow.python.keras.layers.cudnn_recurrent import CuDNNLSTM from tensorflow.python.keras.layers.cudnn_recurrent import CuDNNGRU # Wrapper functions from tensorflow.python.keras.layers.wrappers import Wrapper from tensorflow.python.keras.layers.wrappers import Bidirectional from tensorflow.python.keras.layers.wrappers import TimeDistributed # # RNN Cell wrappers. from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import DeviceWrapper from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import DropoutWrapper from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import ResidualWrapper # Serialization functions from tensorflow.python.keras.layers.serialization import deserialize from tensorflow.python.keras.layers.serialization import serialize del absolute_import del division del print_function
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/__init__.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Core Keras layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import sys import types as python_types import warnings import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import activations from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils import conv_utils from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import standard_ops from tensorflow.python.ops import variable_scope from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.Masking') class Masking(Layer): """Masks a sequence by using a mask value to skip timesteps. For each timestep in the input tensor (dimension #1 in the tensor), if all values in the input tensor at that timestep are equal to `mask_value`, then the timestep will be masked (skipped) in all downstream layers (as long as they support masking). If any downstream layer does not support masking yet receives such an input mask, an exception will be raised. Example: Consider a Numpy data array `x` of shape `(samples, timesteps, features)`, to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you lack data for these timesteps. You can: - Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.` - Insert a `Masking` layer with `mask_value=0.` before the LSTM layer: ```python model = Sequential() model.add(Masking(mask_value=0., input_shape=(timesteps, features))) model.add(LSTM(32)) ``` """ def __init__(self, mask_value=0., **kwargs): super(Masking, self).__init__(**kwargs) self.supports_masking = True self.mask_value = mask_value self._compute_output_and_mask_jointly = True def compute_mask(self, inputs, mask=None): return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1) def call(self, inputs): boolean_mask = K.any( math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True) outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype) # Compute the mask and outputs simultaneously. outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1) # pylint: disable=protected-access return outputs def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = {'mask_value': self.mask_value} base_config = super(Masking, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Dropout') class Dropout(Layer): """Applies Dropout to the input. Dropout consists in randomly setting a fraction `rate` of input units to 0 at each update during training time, which helps prevent overfitting. Arguments: rate: Float between 0 and 1. Fraction of the input units to drop. noise_shape: 1D integer tensor representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)` and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=(batch_size, 1, features)`. seed: A Python integer to use as random seed. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). """ def __init__(self, rate, noise_shape=None, seed=None, **kwargs): super(Dropout, self).__init__(**kwargs) self.rate = rate self.noise_shape = noise_shape self.seed = seed self.supports_masking = True def _get_noise_shape(self, inputs): # Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`, # which will override `self.noise_shape`, and allows for custom noise # shapes with dynamically sized inputs. if self.noise_shape is None: return None concrete_inputs_shape = array_ops.shape(inputs) noise_shape = [] for i, value in enumerate(self.noise_shape): noise_shape.append(concrete_inputs_shape[i] if value is None else value) return ops.convert_to_tensor(noise_shape) def call(self, inputs, training=None): if training is None: training = K.learning_phase() def dropped_inputs(): return nn.dropout( inputs, noise_shape=self._get_noise_shape(inputs), seed=self.seed, rate=self.rate) output = tf_utils.smart_cond(training, dropped_inputs, lambda: array_ops.identity(inputs)) return output def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = { 'rate': self.rate, 'noise_shape': self.noise_shape, 'seed': self.seed } base_config = super(Dropout, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.SpatialDropout1D') class SpatialDropout1D(Dropout): """Spatial 1D version of Dropout. This version performs the same function as Dropout, however it drops entire 1D feature maps instead of individual elements. If adjacent frames within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout1D will help promote independence between feature maps and should be used instead. Arguments: rate: Float between 0 and 1. Fraction of the input units to drop. Call arguments: inputs: A 3D tensor. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: 3D tensor with shape: `(samples, timesteps, channels)` Output shape: Same as input. References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) """ def __init__(self, rate, **kwargs): super(SpatialDropout1D, self).__init__(rate, **kwargs) self.input_spec = InputSpec(ndim=3) def _get_noise_shape(self, inputs): input_shape = array_ops.shape(inputs) noise_shape = (input_shape[0], 1, input_shape[2]) return noise_shape @keras_export('keras.layers.SpatialDropout2D') class SpatialDropout2D(Dropout): """Spatial 2D version of Dropout. This version performs the same function as Dropout, however it drops entire 2D feature maps instead of individual elements. If adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout2D will help promote independence between feature maps and should be used instead. Arguments: rate: Float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension (the depth) is at index 1, in 'channels_last' mode is it at index 3. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Call arguments: inputs: A 4D tensor. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. Output shape: Same as input. References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) """ def __init__(self, rate, data_format=None, **kwargs): super(SpatialDropout2D, self).__init__(rate, **kwargs) if data_format is None: data_format = K.image_data_format() if data_format not in {'channels_last', 'channels_first'}: raise ValueError('data_format must be in ' '{"channels_last", "channels_first"}') self.data_format = data_format self.input_spec = InputSpec(ndim=4) def _get_noise_shape(self, inputs): input_shape = array_ops.shape(inputs) if self.data_format == 'channels_first': return (input_shape[0], input_shape[1], 1, 1) elif self.data_format == 'channels_last': return (input_shape[0], 1, 1, input_shape[3]) @keras_export('keras.layers.SpatialDropout3D') class SpatialDropout3D(Dropout): """Spatial 3D version of Dropout. This version performs the same function as Dropout, however it drops entire 3D feature maps instead of individual elements. If adjacent voxels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout3D will help promote independence between feature maps and should be used instead. Arguments: rate: Float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension (the depth) is at index 1, in 'channels_last' mode is it at index 4. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Call arguments: inputs: A 5D tensor. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: 5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first' or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'. Output shape: Same as input. References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) """ def __init__(self, rate, data_format=None, **kwargs): super(SpatialDropout3D, self).__init__(rate, **kwargs) if data_format is None: data_format = K.image_data_format() if data_format not in {'channels_last', 'channels_first'}: raise ValueError('data_format must be in ' '{"channels_last", "channels_first"}') self.data_format = data_format self.input_spec = InputSpec(ndim=5) def _get_noise_shape(self, inputs): input_shape = array_ops.shape(inputs) if self.data_format == 'channels_first': return (input_shape[0], input_shape[1], 1, 1, 1) elif self.data_format == 'channels_last': return (input_shape[0], 1, 1, 1, input_shape[4]) @keras_export('keras.layers.Activation') class Activation(Layer): """Applies an activation function to an output. Arguments: activation: Activation function, such as `tf.nn.relu`, or string name of built-in activation function, such as "relu". Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, activation, **kwargs): super(Activation, self).__init__(**kwargs) self.supports_masking = True self.activation = activations.get(activation) def call(self, inputs): return self.activation(inputs) def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = {'activation': activations.serialize(self.activation)} base_config = super(Activation, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Reshape') class Reshape(Layer): """Reshapes an output to a certain shape. Arguments: target_shape: Target shape. Tuple of integers, does not include the samples dimension (batch size). Input shape: Arbitrary, although all dimensions in the input shaped must be fixed. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: `(batch_size,) + target_shape` Example: ```python # as first layer in a Sequential model model = Sequential() model.add(Reshape((3, 4), input_shape=(12,))) # now: model.output_shape == (None, 3, 4) # note: `None` is the batch dimension # as intermediate layer in a Sequential model model.add(Reshape((6, 2))) # now: model.output_shape == (None, 6, 2) # also supports shape inference using `-1` as dimension model.add(Reshape((-1, 2, 2))) # now: model.output_shape == (None, None, 2, 2) ``` """ def __init__(self, target_shape, **kwargs): super(Reshape, self).__init__(**kwargs) self.target_shape = tuple(target_shape) def _fix_unknown_dimension(self, input_shape, output_shape): """Find and replace a missing dimension in an output shape. This is a near direct port of the internal Numpy function `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c` Arguments: input_shape: Shape of array being reshaped output_shape: Desired shape of the array with at most a single -1 which indicates a dimension that should be derived from the input shape. Returns: The new output shape with a -1 replaced with its computed value. Raises: ValueError: If the total array size of the output_shape is different than the input_shape, or more than one unknown dimension is specified. """ output_shape = list(output_shape) msg = 'total size of new array must be unchanged' known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError('Can only specify one unknown dimension.') else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return output_shape def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if None in input_shape[1:]: output_shape = [input_shape[0]] # input shape (partially) unknown? replace -1's with None's output_shape += tuple(s if s != -1 else None for s in self.target_shape) else: output_shape = [input_shape[0]] output_shape += self._fix_unknown_dimension(input_shape[1:], self.target_shape) return tensor_shape.TensorShape(output_shape) def call(self, inputs): return array_ops.reshape(inputs, (array_ops.shape(inputs)[0],) + self.target_shape) def get_config(self): config = {'target_shape': self.target_shape} base_config = super(Reshape, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Permute') class Permute(Layer): """Permutes the dimensions of the input according to a given pattern. Useful for e.g. connecting RNNs and convnets together. Example: ```python model = Sequential() model.add(Permute((2, 1), input_shape=(10, 64))) # now: model.output_shape == (None, 64, 10) # note: `None` is the batch dimension ``` Arguments: dims: Tuple of integers. Permutation pattern, does not include the samples dimension. Indexing starts at 1. For instance, `(2, 1)` permutes the first and second dimensions of the input. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same as the input shape, but with the dimensions re-ordered according to the specified pattern. """ def __init__(self, dims, **kwargs): super(Permute, self).__init__(**kwargs) self.dims = tuple(dims) if sorted(dims) != list(range(1, len(dims) + 1)): raise ValueError( 'Invalid permutation `dims` for Permute Layer: %s. ' 'The set of indices in `dims` must be consecutive and start from 1.' % (dims,)) self.input_spec = InputSpec(ndim=len(self.dims) + 1) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape = copy.copy(input_shape) for i, dim in enumerate(self.dims): target_dim = input_shape[dim] output_shape[i + 1] = target_dim return tensor_shape.TensorShape(output_shape) def call(self, inputs): return array_ops.transpose(inputs, perm=(0,) + self.dims) def get_config(self): config = {'dims': self.dims} base_config = super(Permute, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Flatten') class Flatten(Layer): """Flattens the input. Does not affect the batch size. If inputs are shaped `(batch,)` without a channel dimension, then flattening adds an extra channel dimension and output shapes are `(batch, 1)`. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Example: ```python model = Sequential() model.add(Convolution2D(64, 3, 3, border_mode='same', input_shape=(3, 32, 32))) # now: model.output_shape == (None, 64, 32, 32) model.add(Flatten()) # now: model.output_shape == (None, 65536) ``` """ def __init__(self, data_format=None, **kwargs): super(Flatten, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(min_ndim=1) def call(self, inputs): if (self.data_format == 'channels_first' and K.ndim(inputs) is not None and K.ndim(inputs) > 1): permutation = [0] permutation.extend([i for i in range(2, K.ndim(inputs))]) permutation.append(1) inputs = array_ops.transpose(inputs, perm=permutation) input_shape = inputs.shape if input_shape[1:].is_fully_defined(): outputs = array_ops.reshape( inputs, (-1, tensor_shape.dimension_value(int(np.prod(input_shape[1:]))))) else: outputs = array_ops.reshape( inputs, (tensor_shape.dimension_value(inputs.shape[0]) or array_ops.shape(inputs)[0], -1)) if not context.executing_eagerly(): outputs.set_shape(self.compute_output_shape(inputs.shape)) return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if not input_shape: output_shape = tensor_shape.TensorShape([1]) output_shape = [input_shape[0]] if all(input_shape[1:]): output_shape += [np.prod(input_shape[1:])] else: output_shape += [None] return tensor_shape.TensorShape(output_shape) def get_config(self): config = {'data_format': self.data_format} base_config = super(Flatten, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.RepeatVector') class RepeatVector(Layer): """Repeats the input n times. Example: ```python model = Sequential() model.add(Dense(32, input_dim=32)) # now: model.output_shape == (None, 32) # note: `None` is the batch dimension model.add(RepeatVector(3)) # now: model.output_shape == (None, 3, 32) ``` Arguments: n: Integer, repetition factor. Input shape: 2D tensor of shape `(num_samples, features)`. Output shape: 3D tensor of shape `(num_samples, n, features)`. """ def __init__(self, n, **kwargs): super(RepeatVector, self).__init__(**kwargs) self.n = n self.input_spec = InputSpec(ndim=2) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]]) def call(self, inputs): return K.repeat(inputs, self.n) def get_config(self): config = {'n': self.n} base_config = super(RepeatVector, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.Lambda') class Lambda(Layer): """Wraps arbitrary expressions as a `Layer` object. The `Lambda` layer exists so that arbitrary TensorFlow functions can be used when constructing `Sequential` and Functional API models. `Lambda` layers are best suited for simple operations or quick experimentation. For more advanced use cases, subclassing `keras.layers.Layer` is preferred. One reason for this is that when saving a Model, `Lambda` layers are saved by serializing the Python bytecode, whereas subclassed Layers are saved via overriding their `get_config` method and are thus more portable. Models that rely on subclassed Layers are also often easier to visualize and reason about. Examples: ```python # add a x -> x^2 layer model.add(Lambda(lambda x: x ** 2)) ``` ```python # add a layer that returns the concatenation # of the positive part of the input and # the opposite of the negative part def antirectifier(x): x -= K.mean(x, axis=1, keepdims=True) x = K.l2_normalize(x, axis=1) pos = K.relu(x) neg = K.relu(-x) return K.concatenate([pos, neg], axis=1) model.add(Lambda(antirectifier)) ``` Variables can be created within a `Lambda` layer. Like with other layers, these variables will be created only once and reused if the `Lambda` layer is called on new inputs. If creating more than one variable in a given `Lambda` instance, be sure to use a different name for each variable. Note that calling sublayers from within a `Lambda` is not supported. Example of variable creation: ```python def linear_transform(x): v1 = tf.Variable(1., name='multiplier') v2 = tf.Variable(0., name='bias') return x*v1 + v2 linear_layer = Lambda(linear_transform) model.add(linear_layer) model.add(keras.layers.Dense(10, activation='relu')) model.add(linear_layer) # Reuses existing Variables ``` Note that creating two instances of `Lambda` using the same function will *not* share Variables between the two instances. Each instance of `Lambda` will create and manage its own weights. Arguments: function: The function to be evaluated. Takes input tensor as first argument. output_shape: Expected output shape from function. This argument can be inferred if not explicitly provided. Can be a tuple or function. If a tuple, it only specifies the first dimension onward; sample dimension is assumed either the same as the input: `output_shape = (input_shape[0], ) + output_shape` or, the input is `None` and the sample dimension is also `None`: `output_shape = (None, ) + output_shape` If a function, it specifies the entire shape as a function of the input shape: `output_shape = f(input_shape)` mask: Either None (indicating no masking) or a callable with the same signature as the `compute_mask` layer method, or a tensor that will be returned as output mask regardless what the input is. arguments: Optional dictionary of keyword arguments to be passed to the function. Input shape: Arbitrary. Use the keyword argument input_shape (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Specified by `output_shape` argument """ def __init__(self, function, output_shape=None, mask=None, arguments=None, **kwargs): super(Lambda, self).__init__(**kwargs) self.function = function self.arguments = arguments if arguments else {} if mask is not None: self.supports_masking = True self.mask = mask self._output_shape = output_shape self._variable_dict = {} # These attributes are inherited from `Layer`. self._trainable_weights = [] self._non_trainable_weights = [] function_args = tf_inspect.getfullargspec(self.function).args self._fn_expects_training_arg = 'training' in function_args self._fn_expects_mask_arg = 'mask' in function_args @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if self._output_shape is None: # Make use of existing autocomputation but provide Lambda-specific # error message. This is always safe to run even when the outer context # is Graph mode because Lambda layers don't have side effects such as # `add_loss`. with context.eager_mode(): try: return super(Lambda, self).compute_output_shape(input_shape) except NotImplementedError: raise NotImplementedError( 'We could not automatically infer the shape of the Lambda\'s ' 'output. Please specify `output_shape` for this Lambda.') if callable(self._output_shape): output_shapes = self._output_shape(input_shape) return tf_utils.convert_shapes(output_shapes, to_tuples=False) # Output shapes are passed directly and don't include batch dimension. input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None def _add_batch(shape): return tensor_shape.TensorShape([batch_size] + shape.as_list()) output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False) return nest.map_structure(_add_batch, output_shapes) def call(self, inputs, mask=None, training=None): arguments = self.arguments if self._fn_expects_mask_arg: arguments['mask'] = mask if self._fn_expects_training_arg: arguments['training'] = training with variable_scope.variable_creator_scope(self._variable_creator): return self.function(inputs, **arguments) def _variable_creator(self, next_creator, **kwargs): name = kwargs['name'] if name in self._variable_dict: return self._variable_dict[name] var = next_creator(**kwargs) self._variable_dict[name] = var if var.trainable: self._trainable_weights.append(var) else: self._non_trainable_weights.append(var) K.track_variable(var) return var def compute_mask(self, inputs, mask=None): if callable(self.mask): return self.mask(inputs, mask) return self.mask def get_config(self): function_config = self._serialize_function_to_config(self.function) output_shape_config = self._serialize_function_to_config(self._output_shape, allow_raw=True) config = { 'function': function_config[0], 'function_type': function_config[1], 'module': function_config[2], 'output_shape': output_shape_config[0], 'output_shape_type': output_shape_config[1], 'output_shape_module': output_shape_config[2], } if self.mask is not None: mask_config = self._serialize_function_to_config(self.mask) config.update({ 'mask': mask_config[0], 'mask_type': mask_config[1], 'mask_module': mask_config[2] }) config['arguments'] = self.arguments base_config = super(Lambda, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _serialize_function_to_config(self, inputs, allow_raw=False): if isinstance(inputs, python_types.LambdaType): output = generic_utils.func_dump(inputs) output_type = 'lambda' module = inputs.__module__ elif callable(inputs): output = inputs.__name__ output_type = 'function' module = inputs.__module__ elif allow_raw: output = inputs output_type = 'raw' module = None else: raise ValueError( 'Invalid input for serialization, type: %s ' % type(inputs)) return output, output_type, module @classmethod def from_config(cls, config, custom_objects=None): config = config.copy() function = cls._parse_function_from_config( config, custom_objects, 'function', 'module', 'function_type') output_shape = cls._parse_function_from_config( config, custom_objects, 'output_shape', 'output_shape_module', 'output_shape_type') if 'mask' in config: mask = cls._parse_function_from_config( config, custom_objects, 'mask', 'mask_module', 'mask_type') else: mask = None config['function'] = function config['output_shape'] = output_shape config['mask'] = mask # If arguments were numpy array, they have been saved as # list. We need to recover the ndarray if 'arguments' in config: for key in config['arguments']: if isinstance(config['arguments'][key], dict): arg_dict = config['arguments'][key] if 'type' in arg_dict and arg_dict['type'] == 'ndarray': # Overwrite the argument with its numpy translation config['arguments'][key] = np.array(arg_dict['value']) return cls(**config) @classmethod def _parse_function_from_config( cls, config, custom_objects, func_attr_name, module_attr_name, func_type_attr_name): globs = globals() module = config.pop(module_attr_name, None) if module in sys.modules: globs.update(sys.modules[module].__dict__) elif module is not None: # Note: we don't know the name of the function if it's a lambda. warnings.warn('{} is not loaded, but a Lambda layer uses it. ' 'It may cause errors.'.format(module) , UserWarning) if custom_objects: globs.update(custom_objects) function_type = config.pop(func_type_attr_name) if function_type == 'function': # Simple lookup in custom objects function = generic_utils.deserialize_keras_object( config[func_attr_name], custom_objects=custom_objects, printable_module_name='function in Lambda layer') elif function_type == 'lambda': # Unsafe deserialization from bytecode function = generic_utils.func_load( config[func_attr_name], globs=globs) elif function_type == 'raw': function = config[func_attr_name] else: raise TypeError('Unknown function type:', function_type) return function @keras_export('keras.layers.Dense') class Dense(Layer): """Just your regular densely-connected NN layer. `Dense` implements the operation: `output = activation(dot(input, kernel) + bias)` where `activation` is the element-wise activation function passed as the `activation` argument, `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only applicable if `use_bias` is `True`). Note: If the input to the layer has a rank greater than 2, then it is flattened prior to the initial dot product with `kernel`. Example: ```python # as first layer in a sequential model: model = Sequential() model.add(Dense(32, input_shape=(16,))) # now the model will take as input arrays of shape (*, 16) # and output arrays of shape (*, 32) # after the first layer, you don't need to specify # the size of the input anymore: model.add(Dense(32)) ``` Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common situation would be a 2D input with shape `(batch_size, input_dim)`. Output shape: N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input with shape `(batch_size, input_dim)`, the output would have shape `(batch_size, units)`. """ def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(Dense, self).__init__( activity_regularizer=regularizers.get(activity_regularizer), **kwargs) self.units = int(units) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.supports_masking = True self.input_spec = InputSpec(min_ndim=2) def build(self, input_shape): dtype = dtypes.as_dtype(self.dtype or K.floatx()) if not (dtype.is_floating or dtype.is_complex): raise TypeError('Unable to build `Dense` layer with non-floating point ' 'dtype %s' % (dtype,)) input_shape = tensor_shape.TensorShape(input_shape) if tensor_shape.dimension_value(input_shape[-1]) is None: raise ValueError('The last dimension of the inputs to `Dense` ' 'should be defined. Found `None`.') last_dim = tensor_shape.dimension_value(input_shape[-1]) self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim}) self.kernel = self.add_weight( 'kernel', shape=[last_dim, self.units], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, dtype=self.dtype, trainable=True) if self.use_bias: self.bias = self.add_weight( 'bias', shape=[self.units,], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, dtype=self.dtype, trainable=True) else: self.bias = None self.built = True def call(self, inputs): rank = len(inputs.shape) if rank > 2: # Broadcasting is required for the inputs. outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]]) # Reshape the output back to the original ndim of the input. if not context.executing_eagerly(): shape = inputs.shape.as_list() output_shape = shape[:-1] + [self.units] outputs.set_shape(output_shape) else: inputs = math_ops.cast(inputs, self._compute_dtype) if K.is_sparse(inputs): outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel) else: outputs = gen_math_ops.mat_mul(inputs, self.kernel) if self.use_bias: outputs = nn.bias_add(outputs, self.bias) if self.activation is not None: return self.activation(outputs) # pylint: disable=not-callable return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) input_shape = input_shape.with_rank_at_least(2) if tensor_shape.dimension_value(input_shape[-1]) is None: raise ValueError( 'The innermost dimension of input_shape must be defined, but saw: %s' % input_shape) return input_shape[:-1].concatenate(self.units) def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(Dense, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.ActivityRegularization') class ActivityRegularization(Layer): """Layer that applies an update to the cost function based input activity. Arguments: l1: L1 regularization factor (positive float). l2: L2 regularization factor (positive float). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, l1=0., l2=0., **kwargs): super(ActivityRegularization, self).__init__( activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs) self.supports_masking = True self.l1 = l1 self.l2 = l2 def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = {'l1': self.l1, 'l2': self.l2} base_config = super(ActivityRegularization, self).get_config() return dict(list(base_config.items()) + list(config.items()))
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/core.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for convolutional transpose layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class Conv2DTransposeTest(keras_parameterized.TestCase): def _run_test(self, kwargs): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Conv2DTranspose, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size)) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}), ('padding_same', {'padding': 'same'}), ('strides', {'strides': (2, 2)}), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ('data_format', {'data_format': 'channels_first'}), ('strides_output_padding', {'strides': (2, 2), 'output_padding': (1, 1)}), ) def test_conv2d_transpose(self, kwargs): kwargs['filters'] = 2 kwargs['kernel_size'] = (3, 3) if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True): self._run_test(kwargs) def test_conv2d_transpose_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv2DTranspose(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv2d_transpose_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv2DTranspose(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv2d_transpose_dilation(self): testing_utils.layer_test(keras.layers.Conv2DTranspose, kwargs={'filters': 2, 'kernel_size': 3, 'padding': 'same', 'data_format': 'channels_last', 'dilation_rate': (2, 2)}, input_shape=(2, 5, 6, 3)) input_data = np.arange(48).reshape((1, 4, 4, 3)).astype(np.float32) expected_output = np.float32([[192, 228, 192, 228], [336, 372, 336, 372], [192, 228, 192, 228], [336, 372, 336, 372]]).reshape((1, 4, 4, 1)) testing_utils.layer_test(keras.layers.Conv2DTranspose, input_data=input_data, kwargs={'filters': 1, 'kernel_size': 3, 'padding': 'same', 'data_format': 'channels_last', 'dilation_rate': (2, 2), 'kernel_initializer': 'ones'}, expected_output=expected_output) @keras_parameterized.run_all_keras_modes class Conv3DTransposeTest(keras_parameterized.TestCase): def _run_test(self, kwargs): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 depth = 5 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Conv3DTranspose, kwargs=kwargs, input_shape=(num_samples, depth, num_row, num_col, stack_size)) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}), ('padding_same', {'padding': 'same'}), ('strides', {'strides': (2, 2, 2)}), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ('data_format', {'data_format': 'channels_first'}), ('strides_output_padding', {'strides': (2, 2, 2), 'output_padding': (1, 1, 1)}), ) def test_conv3d_transpose(self, kwargs): kwargs['filters'] = 2 kwargs['kernel_size'] = (3, 3, 3) if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True): self._run_test(kwargs) def test_conv3d_transpose_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv3DTranspose(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv3d_transpose_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv3DTranspose(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv3d_transpose_dynamic_shape(self): input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32) with self.cached_session(use_gpu=True): # Won't raise error here. testing_utils.layer_test( keras.layers.Conv3DTranspose, kwargs={ 'data_format': 'channels_last', 'filters': 3, 'kernel_size': 3 }, input_shape=(None, None, None, None, 3), input_data=input_data) if test.is_gpu_available(cuda_only=True): testing_utils.layer_test( keras.layers.Conv3DTranspose, kwargs={ 'data_format': 'channels_first', 'filters': 3, 'kernel_size': 3 }, input_shape=(None, 3, None, None, None), input_data=input_data)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/convolutional_transpose_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras core layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.mixed_precision.experimental import policy from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class DropoutLayersTest(keras_parameterized.TestCase): def test_dropout(self): testing_utils.layer_test( keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2)) testing_utils.layer_test( keras.layers.Dropout, kwargs={'rate': 0.5, 'noise_shape': [3, 1]}, input_shape=(3, 2)) def test_dropout_supports_masking(self): dropout = keras.layers.Dropout(0.5) self.assertEqual(True, dropout.supports_masking) def test_spatial_dropout_1d(self): testing_utils.layer_test( keras.layers.SpatialDropout1D, kwargs={'rate': 0.5}, input_shape=(2, 3, 4)) def test_spatial_dropout_2d(self): testing_utils.layer_test( keras.layers.SpatialDropout2D, kwargs={'rate': 0.5}, input_shape=(2, 3, 4, 5)) testing_utils.layer_test( keras.layers.SpatialDropout2D, kwargs={'rate': 0.5, 'data_format': 'channels_first'}, input_shape=(2, 3, 4, 5)) def test_spatial_dropout_3d(self): testing_utils.layer_test( keras.layers.SpatialDropout3D, kwargs={'rate': 0.5}, input_shape=(2, 3, 4, 4, 5)) testing_utils.layer_test( keras.layers.SpatialDropout3D, kwargs={'rate': 0.5, 'data_format': 'channels_first'}, input_shape=(2, 3, 4, 4, 5)) def test_dropout_partial_noise_shape(self): inputs = keras.Input(shape=(5, 10)) layer = keras.layers.Dropout(0.5, noise_shape=(None, 1, None)) outputs = layer(inputs) model = keras.Model(inputs, outputs) out = model(np.ones((20, 5, 10)), training=True) out_np = keras.backend.get_value(out) # Test that dropout mask is shared across second dim. self.assertAllClose(out_np[:, 0, :], out_np[:, 1, :]) @keras_parameterized.run_all_keras_modes class LambdaLayerTest(keras_parameterized.TestCase): def test_lambda(self): testing_utils.layer_test( keras.layers.Lambda, kwargs={'function': lambda x: x + 1}, input_shape=(3, 2)) testing_utils.layer_test( keras.layers.Lambda, kwargs={ 'function': lambda x, a, b: x * a + b, 'arguments': { 'a': 0.6, 'b': 0.4 } }, input_shape=(3, 2)) # test serialization with function def f(x): return x + 1 ld = keras.layers.Lambda(f) config = ld.get_config() ld = keras.layers.deserialize({ 'class_name': 'Lambda', 'config': config }) self.assertEqual(ld.function(3), 4) # test with lambda ld = keras.layers.Lambda( lambda x: keras.backend.concatenate([math_ops.square(x), x])) config = ld.get_config() ld = keras.layers.Lambda.from_config(config) self.assertAllEqual(self.evaluate(ld.function([3])), [9, 3]) def test_lambda_multiple_inputs(self): ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0]) x1 = np.ones([3, 2], np.float32) x2 = np.ones([3, 5], np.float32) out = ld([x1, x2]) self.assertAllEqual(out.shape, [3, 2]) def test_lambda_output_shape(self): l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1)) l(keras.backend.variable(np.ones((1, 1)))) self.assertEqual((1, 1), l.get_config()['output_shape']) def test_lambda_output_shape_function(self): def get_output_shape(input_shape): return 1 * input_shape l = keras.layers.Lambda(lambda x: x + 1, output_shape=get_output_shape) l(keras.backend.variable(np.ones((1, 1)))) self.assertEqual('lambda', l.get_config()['output_shape_type']) def test_lambda_output_shape_autocalculate_multiple_inputs(self): def lambda_fn(x): return math_ops.matmul(x[0], x[1]) l = keras.layers.Lambda(lambda_fn, dtype=dtypes.float64) output_shape = l.compute_output_shape([(10, 10), (10, 20)]) self.assertAllEqual((10, 20), output_shape) output_signature = l.compute_output_signature([ tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 10)), tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 20))]) self.assertAllEqual((10, 20), output_signature.shape) self.assertAllEqual(dtypes.float64, output_signature.dtype) def test_lambda_output_shape_list_multiple_outputs(self): def lambda_fn(x): return x l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)]) output_shape = l.compute_output_shape([(10, 10), (10, 20)]) self.assertAllEqual([(10, 10), (10, 20)], output_shape) def test_lambda_output_shape_tuple_with_none(self): def lambda_fn(x): return x l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10)) output_shape = l.compute_output_shape((5, 10, 20)) self.assertAllEqual([5, None, 10], output_shape.as_list()) def test_lambda_output_shape_function_multiple_outputs(self): def lambda_fn(x): return x def output_shape_fn(input_shape): return input_shape l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn) output_shape = l.compute_output_shape([(10, 10), (10, 20)]) self.assertAllEqual([(10, 10), (10, 20)], output_shape) def test_lambda_output_shape_nested(self): def lambda_fn(inputs): return (inputs[1]['a'], {'b': inputs[0]}) l = keras.layers.Lambda(lambda_fn) output_shape = l.compute_output_shape(((10, 20), {'a': (10, 5)})) self.assertAllEqual(((10, 5), {'b': (10, 20)}), output_shape) def test_lambda_config_serialization(self): # Test serialization with output_shape and output_shape_type layer = keras.layers.Lambda( lambda x: x + 1, output_shape=(1, 1), mask=lambda i, m: m) layer(keras.backend.variable(np.ones((1, 1)))) config = layer.get_config() layer = keras.layers.deserialize({ 'class_name': 'Lambda', 'config': config }) self.assertAllEqual(layer.function(1), 2) self.assertAllEqual(layer._output_shape, (1, 1)) self.assertAllEqual(layer.mask(1, True), True) layer = keras.layers.Lambda.from_config(config) self.assertAllEqual(layer.function(1), 2) self.assertAllEqual(layer._output_shape, (1, 1)) self.assertAllEqual(layer.mask(1, True), True) def test_lambda_with_variable(self): def fn(x): return x * variables.Variable(2., name='multiplier') layer = keras.layers.Lambda(fn) for _ in range(10): layer(np.ones((10, 10), 'float32')) self.assertLen(layer.trainable_weights, 1) self.assertEqual(layer.trainable_weights[0].name, 'lambda/multiplier:0') def test_lambda_with_training_arg(self): def fn(x, training=True): return keras.backend.in_train_phase(x, 2 * x, training=training) layer = keras.layers.Lambda(fn) x = keras.backend.ones(()) train_out = layer(x, training=True) eval_out = layer(x, training=False) self.assertEqual(keras.backend.get_value(train_out), 1.) self.assertEqual(keras.backend.get_value(eval_out), 2.) def test_lambda_with_mask(self): def add_one(inputs): return inputs + 1.0 def mask(unused_inputs, previous_mask): return previous_mask layer = keras.layers.Lambda(add_one, mask=mask) x = np.ones([5, 4, 3]) x[:, -1, :] = 0 masking = keras.layers.Masking() out = layer(masking(x)) expected_out = np.full([5, 4, 3], 2.0) expected_out[:, -1, :] = 1.0 expected_mask = np.ones([5, 4]) expected_mask[:, -1] = 0.0 self.assertAllClose(self.evaluate(out), expected_out) self.assertIsNotNone(out._keras_mask) self.assertAllClose(self.evaluate(out._keras_mask), expected_mask) class TestStatefulLambda(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_lambda_with_variable_in_model(self): def lambda_fn(x): # Variable will only get created once. v = variables.Variable(1., trainable=True) return x * v model = testing_utils.get_model_from_layers( [keras.layers.Lambda(lambda_fn)], input_shape=(10,)) model.compile( keras.optimizer_v2.gradient_descent.SGD(0.1), 'mae', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32') model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y)) self.assertLen(model.trainable_weights, 1) self.assertAllClose(keras.backend.get_value(model.trainable_weights[0]), 2.) @keras_parameterized.run_all_keras_modes class CoreLayersTest(keras_parameterized.TestCase): def test_masking(self): testing_utils.layer_test( keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3)) def test_keras_mask(self): x = np.ones((10, 10)) y = keras.layers.Masking(1.)(x) self.assertTrue(hasattr(y, '_keras_mask')) self.assertTrue(y._keras_mask is not None) self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,))) def test_compute_mask_with_positional_mask_arg(self): class MyLayer(keras.layers.Layer): def call(self, inputs, mask=None): return inputs def compute_mask(self, inputs, mask=None): if mask is not None: return array_ops.ones(()) else: return array_ops.zeros(()) x, mask = array_ops.ones((1, 1)), array_ops.ones((1, 1)) layer = MyLayer() y = layer(x, mask) # Check that `mask` was correctly sent to `compute_mask`. self.assertEqual(keras.backend.get_value(y._keras_mask), 1) def test_activation(self): # with string argument testing_utils.layer_test( keras.layers.Activation, kwargs={'activation': 'relu'}, input_shape=(3, 2)) # with function argument testing_utils.layer_test( keras.layers.Activation, kwargs={'activation': keras.backend.relu}, input_shape=(3, 2)) def test_reshape(self): testing_utils.layer_test( keras.layers.Reshape, kwargs={'target_shape': (8, 1)}, input_shape=(3, 2, 4)) testing_utils.layer_test( keras.layers.Reshape, kwargs={'target_shape': (-1, 1)}, input_shape=(3, 2, 4)) testing_utils.layer_test( keras.layers.Reshape, kwargs={'target_shape': (1, -1)}, input_shape=(3, 2, 4)) testing_utils.layer_test( keras.layers.Reshape, kwargs={'target_shape': (-1, 1)}, input_shape=(None, None, 2)) def test_permute(self): testing_utils.layer_test( keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4)) def test_permute_errors_on_invalid_starting_dims_index(self): with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'): testing_utils.layer_test( keras.layers.Permute, kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4)) def test_permute_errors_on_invalid_set_of_dims_indices(self): with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'): testing_utils.layer_test( keras.layers.Permute, kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4)) def test_flatten(self): testing_utils.layer_test( keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4)) # Test channels_first inputs = np.random.random((10, 3, 5, 5)).astype('float32') outputs = testing_utils.layer_test( keras.layers.Flatten, kwargs={'data_format': 'channels_first'}, input_data=inputs) target_outputs = np.reshape( np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3)) self.assertAllClose(outputs, target_outputs) def test_flatten_scalar_channels(self): testing_utils.layer_test( keras.layers.Flatten, kwargs={}, input_shape=(3,)) # Test channels_first inputs = np.random.random((10,)).astype('float32') outputs = testing_utils.layer_test( keras.layers.Flatten, kwargs={'data_format': 'channels_first'}, input_data=inputs) target_outputs = np.expand_dims(inputs, -1) self.assertAllClose(outputs, target_outputs) def test_repeat_vector(self): testing_utils.layer_test( keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2)) def test_dense(self): testing_utils.layer_test( keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2)) testing_utils.layer_test( keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2)) testing_utils.layer_test( keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2)) def test_dense_dtype(self): inputs = ops.convert_to_tensor( np.random.randint(low=0, high=7, size=(2, 2))) layer = keras.layers.Dense(5, dtype='float32') outputs = layer(inputs) self.assertEqual(outputs.dtype, 'float32') def test_dense_with_policy(self): inputs = ops.convert_to_tensor( np.random.randint(low=0, high=7, size=(2, 2)), dtype='float16') layer = keras.layers.Dense(5, dtype=policy.Policy('infer_float32_vars')) outputs = layer(inputs) output_signature = layer.compute_output_signature( tensor_spec.TensorSpec(dtype='float16', shape=(2, 2))) self.assertEqual(output_signature.dtype, dtypes.float16) self.assertEqual(output_signature.shape, (2, 5)) self.assertEqual(outputs.dtype, 'float16') self.assertEqual(layer.kernel.dtype, 'float32') def test_dense_regularization(self): layer = keras.layers.Dense( 3, kernel_regularizer=keras.regularizers.l1(0.01), bias_regularizer='l1', activity_regularizer='l2', name='dense_reg') layer(keras.backend.variable(np.ones((2, 4)))) self.assertEqual(3, len(layer.losses)) def test_dense_constraints(self): k_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) layer = keras.layers.Dense( 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) layer(keras.backend.variable(np.ones((2, 4)))) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_activity_regularization(self): layer = keras.layers.ActivityRegularization(l1=0.1) layer(keras.backend.variable(np.ones((2, 4)))) self.assertEqual(1, len(layer.losses)) config = layer.get_config() self.assertEqual(config.pop('l1'), 0.1) def test_numpy_inputs(self): if context.executing_eagerly(): layer = keras.layers.RepeatVector(2) x = np.ones((10, 10)) self.assertAllEqual(np.ones((10, 2, 10)), layer(x)) layer = keras.layers.Concatenate() x, y = np.ones((10, 10)), np.ones((10, 10)) self.assertAllEqual(np.ones((10, 20)), layer([x, y])) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/core_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for pooling layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test class GlobalPoolingTest(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_globalpooling_1d(self): testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D, input_shape=(3, 4, 5)) testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 5)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling1D, input_shape=(3, 4, 5)) testing_utils.layer_test(keras.layers.pooling.GlobalAveragePooling1D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 5)) @tf_test_util.run_in_graph_and_eager_modes def test_globalpooling_1d_masking_support(self): model = keras.Sequential() model.add(keras.layers.Masking(mask_value=0., input_shape=(3, 4))) model.add(keras.layers.GlobalAveragePooling1D()) model.compile(loss='mae', optimizer='rmsprop') model_input = np.random.random((2, 3, 4)) model_input[0, 1:, :] = 0 output = model.predict(model_input) self.assertAllClose(output[0], model_input[0, 0, :]) @tf_test_util.run_in_graph_and_eager_modes def test_globalpooling_2d(self): testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling2D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 5, 6)) testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling2D, kwargs={'data_format': 'channels_last'}, input_shape=(3, 5, 6, 4)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling2D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 5, 6)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling2D, kwargs={'data_format': 'channels_last'}, input_shape=(3, 5, 6, 4)) @tf_test_util.run_in_graph_and_eager_modes def test_globalpooling_3d(self): testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling3D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 3, 4, 3)) testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling3D, kwargs={'data_format': 'channels_last'}, input_shape=(3, 4, 3, 4, 3)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling3D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 3, 4, 3)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling3D, kwargs={'data_format': 'channels_last'}, input_shape=(3, 4, 3, 4, 3)) class Pooling2DTest(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_maxpooling_2d(self): pool_size = (3, 3) for strides in [(1, 1), (2, 2)]: testing_utils.layer_test( keras.layers.MaxPooling2D, kwargs={ 'strides': strides, 'padding': 'valid', 'pool_size': pool_size }, input_shape=(3, 5, 6, 4)) @tf_test_util.run_in_graph_and_eager_modes def test_averagepooling_2d(self): testing_utils.layer_test( keras.layers.AveragePooling2D, kwargs={'strides': (2, 2), 'padding': 'same', 'pool_size': (2, 2)}, input_shape=(3, 5, 6, 4)) testing_utils.layer_test( keras.layers.AveragePooling2D, kwargs={'strides': (2, 2), 'padding': 'valid', 'pool_size': (3, 3)}, input_shape=(3, 5, 6, 4)) # This part of the test can only run on GPU but doesn't appear # to be properly assigned to a GPU when running in eager mode. if not context.executing_eagerly(): # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. if test.is_gpu_available(cuda_only=True): testing_utils.layer_test( keras.layers.AveragePooling2D, kwargs={ 'strides': (1, 1), 'padding': 'valid', 'pool_size': (2, 2), 'data_format': 'channels_first' }, input_shape=(3, 4, 5, 6)) class Pooling3DTest(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_maxpooling_3d(self): if test.is_built_with_rocm(): self.skipTest('Pooling with 3D tensors is not supported in ROCm') pool_size = (3, 3, 3) testing_utils.layer_test( keras.layers.MaxPooling3D, kwargs={'strides': 2, 'padding': 'valid', 'pool_size': pool_size}, input_shape=(3, 11, 12, 10, 4)) testing_utils.layer_test( keras.layers.MaxPooling3D, kwargs={ 'strides': 3, 'padding': 'valid', 'data_format': 'channels_first', 'pool_size': pool_size }, input_shape=(3, 4, 11, 12, 10)) @tf_test_util.run_in_graph_and_eager_modes def test_averagepooling_3d(self): if test.is_built_with_rocm(): self.skipTest('Pooling with 3D tensors is not supported in ROCm') pool_size = (3, 3, 3) testing_utils.layer_test( keras.layers.AveragePooling3D, kwargs={'strides': 2, 'padding': 'valid', 'pool_size': pool_size}, input_shape=(3, 11, 12, 10, 4)) testing_utils.layer_test( keras.layers.AveragePooling3D, kwargs={ 'strides': 3, 'padding': 'valid', 'data_format': 'channels_first', 'pool_size': pool_size }, input_shape=(3, 4, 11, 12, 10)) class Pooling1DTest(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_maxpooling_1d(self): for padding in ['valid', 'same']: for stride in [1, 2]: testing_utils.layer_test( keras.layers.MaxPooling1D, kwargs={'strides': stride, 'padding': padding}, input_shape=(3, 5, 4)) testing_utils.layer_test( keras.layers.MaxPooling1D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 2, 6)) @tf_test_util.run_in_graph_and_eager_modes def test_averagepooling_1d(self): for padding in ['valid', 'same']: for stride in [1, 2]: testing_utils.layer_test( keras.layers.AveragePooling1D, kwargs={'strides': stride, 'padding': padding}, input_shape=(3, 5, 4)) testing_utils.layer_test( keras.layers.AveragePooling1D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 2, 6)) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/pooling_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module implementing for RNN wrappers for TF v2.""" # Note that all the APIs under this module are exported as tf.nn.*. This is due # to the fact that those APIs were from tf.nn.rnn_cell_impl. They are ported # here to avoid the cyclic dependency issue for serialization. These APIs will # probably be deprecated and removed in future since similar API is available in # existing Keras RNN API. from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.layers import AbstractRNNCell from tensorflow.python.ops import rnn_cell_wrapper_impl from tensorflow.python.util.tf_export import tf_export class _RNNCellWrapperV2(AbstractRNNCell): """Base class for cells wrappers V2 compatibility. This class along with `rnn_cell_impl._RNNCellWrapperV1` allows to define wrappers that are compatible with V1 and V2, and defines helper methods for this purpose. """ def __init__(self, cell, *args, **kwargs): super(_RNNCellWrapperV2, self).__init__(*args, **kwargs) self.cell = cell def call(self, inputs, state, **kwargs): """Runs the RNN cell step computation. When `call` is being used, we assume that the wrapper object has been built, and therefore the wrapped cells has been built via its `build` method and its `call` method can be used directly. This allows to use the wrapped cell and the non-wrapped cell equivalently when using `call` and `build`. Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. **kwargs: Additional arguments passed to the wrapped cell's `call`. Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state. """ return self._call_wrapped_cell( inputs, state, cell_call_fn=self.cell.call, **kwargs) def build(self, inputs_shape): """Builds the wrapped cell.""" self.cell.build(inputs_shape) self.built = True def get_config(self): config = { "cell": { "class_name": self.cell.__class__.__name__, "config": self.cell.get_config() }, } base_config = super(_RNNCellWrapperV2, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): config = config.copy() from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top cell = deserialize_layer(config.pop("cell"), custom_objects=custom_objects) return cls(cell, **config) @tf_export("nn.RNNCellDropoutWrapper", v1=[]) class DropoutWrapper(rnn_cell_wrapper_impl.DropoutWrapperBase, _RNNCellWrapperV2): """Operator adding dropout to inputs and outputs of the given cell.""" def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation super(DropoutWrapper, self).__init__(*args, **kwargs) __init__.__doc__ = rnn_cell_wrapper_impl.DropoutWrapperBase.__init__.__doc__ @tf_export("nn.RNNCellResidualWrapper", v1=[]) class ResidualWrapper(rnn_cell_wrapper_impl.ResidualWrapperBase, _RNNCellWrapperV2): """RNNCell wrapper that ensures cell inputs are added to the outputs.""" def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation super(ResidualWrapper, self).__init__(*args, **kwargs) __init__.__doc__ = rnn_cell_wrapper_impl.ResidualWrapperBase.__init__.__doc__ @tf_export("nn.RNNCellDeviceWrapper", v1=[]) class DeviceWrapper(rnn_cell_wrapper_impl.DeviceWrapperBase, _RNNCellWrapperV2): """Operator that ensures an RNNCell runs on a particular device.""" def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation super(DeviceWrapper, self).__init__(*args, **kwargs) __init__.__doc__ = rnn_cell_wrapper_impl.DeviceWrapperBase.__init__.__doc__
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/rnn_cell_wrapper_v2.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Embedding layer. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.Embedding') class Embedding(Layer): """Turns positive integers (indexes) into dense vectors of fixed size. e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]` This layer can only be used as the first layer in a model. Example: ```python model = Sequential() model.add(Embedding(1000, 64, input_length=10)) # the model will take as input an integer matrix of size (batch, # input_length). # the largest integer (i.e. word index) in the input should be no larger # than 999 (vocabulary size). # now model.output_shape == (None, 10, 64), where None is the batch # dimension. input_array = np.random.randint(1000, size=(32, 10)) model.compile('rmsprop', 'mse') output_array = model.predict(input_array) assert output_array.shape == (32, 10, 64) ``` Arguments: input_dim: int > 0. Size of the vocabulary, i.e. maximum integer index + 1. output_dim: int >= 0. Dimension of the dense embedding. embeddings_initializer: Initializer for the `embeddings` matrix. embeddings_regularizer: Regularizer function applied to the `embeddings` matrix. embeddings_constraint: Constraint function applied to the `embeddings` matrix. mask_zero: Whether or not the input value 0 is a special "padding" value that should be masked out. This is useful when using recurrent layers which may take variable length input. If this is `True` then all subsequent layers in the model need to support masking or an exception will be raised. If mask_zero is set to True, as a consequence, index 0 cannot be used in the vocabulary (input_dim should equal size of vocabulary + 1). input_length: Length of input sequences, when it is constant. This argument is required if you are going to connect `Flatten` then `Dense` layers upstream (without it, the shape of the dense outputs cannot be computed). Input shape: 2D tensor with shape: `(batch_size, input_length)`. Output shape: 3D tensor with shape: `(batch_size, input_length, output_dim)`. """ def __init__(self, input_dim, output_dim, embeddings_initializer='uniform', embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None, mask_zero=False, input_length=None, **kwargs): if 'input_shape' not in kwargs: if input_length: kwargs['input_shape'] = (input_length,) else: kwargs['input_shape'] = (None,) dtype = kwargs.pop('dtype', K.floatx()) # We set autocast to False, as we do not want to cast floating- point inputs # to self.dtype. In call(), we cast to int32, and casting to self.dtype # before casting to int32 might cause the int32 values to be different due # to a loss of precision. kwargs['autocast'] = False super(Embedding, self).__init__(dtype=dtype, **kwargs) self.input_dim = input_dim self.output_dim = output_dim self.embeddings_initializer = initializers.get(embeddings_initializer) self.embeddings_regularizer = regularizers.get(embeddings_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.embeddings_constraint = constraints.get(embeddings_constraint) self.mask_zero = mask_zero self.supports_masking = mask_zero self.input_length = input_length @tf_utils.shape_type_conversion def build(self, input_shape): # Note: most sparse optimizers do not have GPU kernels defined. When # building graphs, the placement algorithm is able to place variables on CPU # since it knows all kernels using the variable only exist on CPU. # When eager execution is enabled, the placement decision has to be made # right now. Checking for the presence of GPUs to avoid complicating the # TPU codepaths which can handle sparse optimizers. if context.executing_eagerly() and context.context().num_gpus(): with ops.device('cpu:0'): self.embeddings = self.add_weight( shape=(self.input_dim, self.output_dim), initializer=self.embeddings_initializer, name='embeddings', regularizer=self.embeddings_regularizer, constraint=self.embeddings_constraint) else: self.embeddings = self.add_weight( shape=(self.input_dim, self.output_dim), initializer=self.embeddings_initializer, name='embeddings', regularizer=self.embeddings_regularizer, constraint=self.embeddings_constraint) self.built = True def compute_mask(self, inputs, mask=None): if not self.mask_zero: return None return math_ops.not_equal(inputs, 0) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if self.input_length is None: return input_shape + (self.output_dim,) else: # input_length can be tuple if input is 3D or higher if isinstance(self.input_length, (list, tuple)): in_lens = list(self.input_length) else: in_lens = [self.input_length] if len(in_lens) != len(input_shape) - 1: raise ValueError('"input_length" is %s, ' 'but received input has shape %s' % (str( self.input_length), str(input_shape))) else: for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])): if s1 is not None and s2 is not None and s1 != s2: raise ValueError('"input_length" is %s, ' 'but received input has shape %s' % (str( self.input_length), str(input_shape))) elif s1 is None: in_lens[i] = s2 return (input_shape[0],) + tuple(in_lens) + (self.output_dim,) def call(self, inputs): dtype = K.dtype(inputs) if dtype != 'int32' and dtype != 'int64': inputs = math_ops.cast(inputs, 'int32') out = embedding_ops.embedding_lookup(self.embeddings, inputs) return out def get_config(self): config = { 'input_dim': self.input_dim, 'output_dim': self.output_dim, 'embeddings_initializer': initializers.serialize(self.embeddings_initializer), 'embeddings_regularizer': regularizers.serialize(self.embeddings_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'embeddings_constraint': constraints.serialize(self.embeddings_constraint), 'mask_zero': self.mask_zero, 'input_length': self.input_length } base_config = super(Embedding, self).get_config() return dict(list(base_config.items()) + list(config.items()))
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/embeddings.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for GRU V2 layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil from absl.testing import parameterized import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers import recurrent as rnn_v1 from tensorflow.python.keras.layers import recurrent_v2 as rnn from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent # Global config for grappler setting that is used for graph mode test. _rewrites = rewriter_config_pb2.RewriterConfig() _rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON _rewrites.min_graph_nodes = -1 _graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites) _config = config_pb2.ConfigProto(graph_options=_graph_options) @keras_parameterized.run_all_keras_modes(config=_config) class GRUV2Test(keras_parameterized.TestCase): @parameterized.named_parameters( ('non_tan_activation', 'relu', 'sigmoid', 0, False, True, True), ('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True, True), ('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True, True), ('unroll', 'tanh', 'sigmoid', 0, True, True, True), ('not_use_bias', 'tanh', 'sigmoid', 0, False, False, True), ('not_reset_after', 'tanh', 'sigmoid', 0, False, True, False) ) def test_could_use_defun_backend(self, activation, recurrent_activation, recurrent_dropout, unroll, use_bias, reset_after): layer = rnn.GRU(1, activation=activation, recurrent_activation=recurrent_activation, recurrent_dropout=recurrent_dropout, unroll=unroll, use_bias=use_bias, reset_after=reset_after) self.assertFalse(layer.could_use_cudnn) def test_keras_model_with_gru(self): input_shape = 10 rnn_state_size = 8 output_shape = 8 timestep = 4 batch = 100 epoch = 10 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=output_shape) y_train = keras.utils.to_categorical(y_train, output_shape) layer = rnn.GRU(rnn_state_size) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) outputs = layer(inputs) model = keras.models.Model(inputs, outputs) model.compile('rmsprop', loss='mse') model.fit(x_train, y_train, epochs=epoch) model.evaluate(x_train, y_train) model.predict(x_train) def test_dynamic_behavior_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer = rnn.GRU(units, input_shape=(None, embedding_dim)) model = keras.models.Sequential() model.add(layer) model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse') x = np.random.random((num_samples, timesteps, embedding_dim)) y = np.random.random((num_samples, units)) model.train_on_batch(x, y) def test_stacking_GRU(self): inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(rnn.GRU(10, return_sequences=True, unroll=False)) model.add(rnn.GRU(5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_from_config_GRU(self): layer_class = rnn.GRU for stateful in (False, True): l1 = layer_class(units=1, stateful=stateful) l2 = layer_class.from_config(l1.get_config()) assert l1.get_config() == l2.get_config() @test_util.run_v2_only def test_gru_v2_feature_parity_with_canonical_gru(self): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 20 (x_train, y_train), _ = testing_utils.get_test_data( train_samples=batch, test_samples=0, input_shape=(timestep, input_shape), num_classes=rnn_state_size, random_seed=random_seed.DEFAULT_GRAPH_SEED) y_train = keras.utils.to_categorical(y_train, rnn_state_size) # For the last batch item of the test data, we filter out the last # timestep to simulate the variable length sequence and masking test. x_train[-2:, -1, :] = 0.0 y_train[-2:] = 0 inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) masked_input = keras.layers.Masking()(inputs) gru_layer = rnn_v1.GRU(rnn_state_size, recurrent_activation='sigmoid', reset_after=True) output = gru_layer(masked_input) gru_model = keras.models.Model(inputs, output) weights = gru_model.get_weights() y_1 = gru_model.predict(x_train) gru_model.compile('rmsprop', 'mse') gru_model.fit(x_train, y_train) y_2 = gru_model.predict(x_train) with test_util.device(use_gpu=True): cudnn_layer = rnn.GRU(rnn_state_size, recurrent_activation='sigmoid', reset_after=True) cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input)) cudnn_model.set_weights(weights) y_3 = cudnn_model.predict(x_train) cudnn_model.compile('rmsprop', 'mse') cudnn_model.fit(x_train, y_train) y_4 = cudnn_model.predict(x_train) self.assertAllClose(y_1, y_3, rtol=2e-5, atol=2e-5) self.assertAllClose(y_2, y_4, rtol=2e-5, atol=2e-5) @parameterized.named_parameters( # test_name, use_bias, bias_initializer, activation ('normal', True, 'zeros'), ('no_bias', False, 'zeros'), ('random_bias', True, 'random_uniform'), ) def test_gru_v2_model_save_load(self, use_bias, bias_initializer): temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir) h5_path = os.path.join(temp_dir, 'test.h5') batch = 10 timestep = 3 input_dim = 5 units = 2 x = np.random.random((batch, timestep, input_dim)) def build_model(): inputs = keras.layers.Input( shape=[timestep, input_dim], dtype=dtypes.float32) layer = rnn.GRU( units, use_bias=use_bias, bias_initializer=bias_initializer) output = layer(inputs) return keras.models.Model(inputs, output), layer model, layer = build_model() y_ref = model.predict(x) model.save_weights(h5_path) cloned_model, new_layer = build_model() cloned_model.load_weights(h5_path) y = cloned_model.predict(x) self.assertAllClose(y, y_ref) self.assertAllClose(layer.get_weights(), new_layer.get_weights()) def test_gru_v2_output_on_multiple_kernel(self): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 100 x_train = np.random.random((batch, timestep, input_shape)) inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) with test_util.device(use_gpu=False): layer = rnn.GRU(rnn_state_size) output = layer(inputs) cpu_model = keras.models.Model(inputs, output) weights = cpu_model.get_weights() y_1 = cpu_model.predict(x_train) with test_util.device(use_gpu=True): layer = rnn.GRU(rnn_state_size) output = layer(inputs) gpu_model = keras.models.Model(inputs, output) gpu_model.set_weights(weights) y_2 = gpu_model.predict(x_train) # Note that CuDNN uses 'sigmoid' as activation, so the GRU V2 uses # 'sigmoid' as default. Construct the canonical GRU with sigmoid to achieve # the same output. with test_util.device(use_gpu=True): layer = rnn_v1.GRU(rnn_state_size, recurrent_activation='sigmoid', reset_after=True) output = layer(inputs) canonical_model = keras.models.Model(inputs, output) canonical_model.set_weights(weights) y_3 = canonical_model.predict(x_train) self.assertAllClose(y_1, y_2, rtol=1e-5, atol=1e-5) self.assertAllClose(y_2, y_3, rtol=1e-5, atol=1e-5) @parameterized.named_parameters( # test_name, time_major, go_backwards ('normal', False, False), ('time_major', True, False), ('go_backwards', False, True), ('both', True, True), ) def test_time_major_and_go_backward(self, time_major, go_backwards): input_shape = 10 rnn_state_size = 8 timestep = 4 batch = 100 x_train = np.random.random((batch, timestep, input_shape)) def build_model(layer_cls): inputs = keras.layers.Input( shape=[timestep, input_shape], dtype=dtypes.float32) layer = layer_cls(rnn_state_size, recurrent_activation='sigmoid', time_major=time_major, return_sequences=True, go_backwards=go_backwards, reset_after=True) if time_major: converted_input = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs) outputs = layer(converted_input) outputs = keras.layers.Lambda( lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs) else: outputs = layer(inputs) return keras.models.Model(inputs, outputs) gru_model = build_model(rnn_v1.GRU) y_ref = gru_model.predict(x_train) weights = gru_model.get_weights() gru_v2_model = build_model(rnn.GRU) gru_v2_model.set_weights(weights) y = gru_v2_model.predict(x_train) self.assertAllClose(y, y_ref) def test_with_masking_layer_GRU(self): layer_class = rnn.GRU inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(layer_class(units=5, return_sequences=True, unroll=False)) model.compile(loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.001)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_masking_with_stacking_GRU(self): inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(rnn.GRU(10, return_sequences=True, unroll=False)) model.add(rnn.GRU(5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.01)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_return_sequences_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.GRU, kwargs={'units': units, 'return_sequences': True}, input_shape=(num_samples, timesteps, embedding_dim)) def test_float64_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.GRU, kwargs={'units': units, 'return_sequences': True, 'dtype': 'float64'}, input_shape=(num_samples, timesteps, embedding_dim), input_dtype='float64') def test_return_states_GRU(self): layer_class = rnn.GRU x = np.random.random((2, 3, 4)) y = np.abs(np.random.random((2, 5))) s = np.abs(np.random.random((2, 5))) inputs = keras.layers.Input( shape=[3, 4], dtype=dtypes.float32) masked = keras.layers.Masking()(inputs) outputs, states = layer_class(units=5, return_state=True)(masked) model = keras.models.Model(inputs, [outputs, states]) model.compile(loss='categorical_crossentropy', optimizer=gradient_descent.GradientDescentOptimizer(0.001)) model.fit(x, [y, s], epochs=1, batch_size=2, verbose=1) def test_dropout_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.GRU, kwargs={'units': units, 'dropout': 0.1, 'recurrent_dropout': 0.1}, input_shape=(num_samples, timesteps, embedding_dim)) def test_constraints_GRU(self): embedding_dim = 4 layer_class = rnn.GRU k_constraint = keras.constraints.max_norm(0.01) r_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_constraint=k_constraint, recurrent_constraint=r_constraint, bias_constraint=b_constraint) layer.build((None, None, embedding_dim)) self.assertEqual(layer.cell.kernel.constraint, k_constraint) self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint) self.assertEqual(layer.cell.bias.constraint, b_constraint) @parameterized.parameters([0, 1, 2]) def test_implementation_mode_GRU(self, implementation_mode): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( rnn.GRU, kwargs={'units': units, 'implementation': implementation_mode}, input_shape=(num_samples, timesteps, embedding_dim)) def test_regularizers_GRU(self): embedding_dim = 4 layer_class = rnn.GRU layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_regularizer=keras.regularizers.l1(0.01), recurrent_regularizer=keras.regularizers.l1(0.01), bias_regularizer='l2', activity_regularizer='l1') layer.build((None, None, 2)) self.assertEqual(len(layer.losses), 3) x = keras.backend.variable(np.ones((2, 3, 2))) layer(x) if context.executing_eagerly(): self.assertEqual(len(layer.losses), 4) else: self.assertEqual(len(layer.get_losses_for(x)), 1) def test_statefulness_GRU(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer_class = rnn.GRU model = keras.models.Sequential() model.add( keras.layers.Embedding( 4, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class( units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile( optimizer=gradient_descent.GradientDescentOptimizer(0.01), loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) out1 = model.predict(np.ones((num_samples, timesteps))) self.assertEqual(out1.shape, (num_samples, units)) # train once so that the states change model.train_on_batch( np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) # if the state is not reset, output should be different self.assertNotEqual(out1.max(), out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out2.max(), out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) np.testing.assert_allclose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out4.max(), out5.max()) # Check masking layer.reset_states() left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) layer.reset_states() right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) layer.reset_states() mix_padded_input = np.ones((num_samples, timesteps)) mix_padded_input[0, 1] = 0 mix_padded_input[1, 0] = 0 mix_padded_input[1, 2] = 0 out8 = model.predict(mix_padded_input) self.assertAllClose(out7, out6, atol=1e-5) self.assertAllClose(out8, out7, atol=1e-5) def test_stateful_GRU_training(self): # See b/123587692 for more context. vocab_size = 20 embedding_dim = 10 batch_size = 8 timestep = 12 units = 5 x = np.random.randint(0, vocab_size, size=(batch_size, timestep)) y = np.random.randint(0, vocab_size, size=(batch_size, timestep)) model = keras.Sequential([ keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, timestep]), rnn.GRU(units, return_sequences=True, stateful=True), keras.layers.Dense(vocab_size) ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, shuffle=False) @test_util.run_v2_only def test_explicit_device_with_go_backward_and_mask(self): batch_size = 8 timestep = 7 masksteps = 5 units = 4 inputs = np.random.randn(batch_size, timestep, units).astype(np.float32) mask = np.ones((batch_size, timestep)).astype(np.bool) mask[:, masksteps:] = 0 # Test for V1 behavior. lstm_v1 = rnn_v1.GRU(units, return_sequences=True, go_backwards=True) with test_util.device(use_gpu=True): outputs_masked_v1 = lstm_v1(inputs, mask=constant_op.constant(mask)) outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps]) self.assertAllClose(outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1) # Test for V2 behavior. lstm = rnn.GRU(units, return_sequences=True, go_backwards=True) with test_util.device(use_gpu=True): outputs_masked = lstm(inputs, mask=constant_op.constant(mask)) outputs_trimmed = lstm(inputs[:, :masksteps]) self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed) @test_util.run_deprecated_v1 def test_v1_session_behavior(self): # See b/139132348 for more details. x = np.random.uniform(size=(100, 4, 8)) y = np.random.uniform(size=(100, 1)) dataset = dataset_ops.Dataset.from_tensor_slices( (x, y)).shuffle(100).batch(32) inp = keras.layers.Input(shape=(4, 8)) layer = rnn.GRU(1)(inp) layer = keras.layers.Dense(1)(layer) model = keras.models.Model(inp, layer) model.compile(loss='mse', optimizer='sgd') model.fit(dataset) class GRULayerGradientTapeTest(test.TestCase): @test_util.run_in_graph_and_eager_modes(config=_config) def test_in_tape(self): if not context.executing_eagerly(): self.skipTest('bloo') time_steps = 10 embedding_size = 11 gru_unit_size = 12 gru = rnn.GRU(gru_unit_size, return_sequences=True, return_state=True, recurrent_activation='sigmoid', recurrent_initializer='glorot_uniform') x = random_ops.random_uniform([1, time_steps, embedding_size]) y = random_ops.random_uniform([1, gru_unit_size]) with backprop.GradientTape() as tape: hidden_state = array_ops.zeros([1, gru_unit_size], dtype=dtypes.float32) _, state = gru(x, initial_state=hidden_state) loss = math_ops.reduce_mean(math_ops.square(state - y)) tape.gradient(loss, gru.variables) @keras_parameterized.run_all_keras_modes(config=_config) class GRUGraphRewriteTest(keras_parameterized.TestCase): input_shape = 10 output_shape = 8 rnn_state_size = 8 timestep = 4 batch = 100 epoch = 1 def _test_runtime_with_model(self, model): (x_train, y_train), _ = testing_utils.get_test_data( train_samples=self.batch, test_samples=0, input_shape=(self.timestep, self.input_shape), num_classes=self.output_shape) y_train = keras.utils.to_categorical(y_train, self.output_shape) model.compile( optimizer='sgd', loss=['categorical_crossentropy', None], experimental_run_tf_function=testing_utils.should_run_tf_function()) existing_loss = 0 for _ in range(self.epoch): history = model.fit(x_train, y_train) loss_value = history.history['loss'][0] self.assertNotEqual(existing_loss, loss_value) existing_loss = loss_value _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) @test_util.run_v2_only def test_GRU_runtime(self): layer = rnn.GRU(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) outputs, runtime = layer(inputs) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) self._test_runtime_with_model(model) @test_util.run_v2_only def test_GRU_runtime_with_mask(self): # Masking will affect which backend is selected based on whether the mask # is strictly right padded. layer = rnn.GRU(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) masked_inputs = keras.layers.Masking()(inputs) outputs, runtime = layer(masked_inputs) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=self.batch, test_samples=0, input_shape=(self.timestep, self.input_shape), num_classes=self.output_shape) y_train = keras.utils.to_categorical(y_train, self.output_shape) model.compile( optimizer='sgd', loss=['categorical_crossentropy', None], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x_train, y_train) # Verify unpadded data. _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) # Update x/y to be right padded by setting the last timestep to 0 x_train[:, -1, :] = 0 y_train[:, -1] = 0 _, runtime_value = model.predict(x_train) if test.is_gpu_available(): self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU) else: self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) # Further update x/y to be mix padded (masks in the middle), and verify # only cpu kernel can be selected. x_train[:, -3, :] = 0 y_train[:, -3] = 0 _, runtime_value = model.predict(x_train) self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU) @test_util.run_v2_only def test_GRU_runtime_with_cond(self): # This test is to demonstrate the graph rewrite of grappler plugin under # the condition that the function returns different number of internal # states. layer = rnn.GRU(self.rnn_state_size, return_runtime=True) inputs = keras.layers.Input( shape=[self.timestep, self.input_shape], dtype=dtypes.float32) zeros = array_ops.zeros([self.batch, self.output_shape]) dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN) a = constant_op.constant(0) b = constant_op.constant(1) # Will always run the GRU layer. outputs, runtime = control_flow_ops.cond( gen_math_ops.less(a, b), lambda: layer(inputs), lambda: (zeros, dummy_runtime)) # Expand the runtime so that it is a 1D tensor instead of scalar. # TF model does not work with scalar model output, specially during # aggregation. runtime = keras.layers.Lambda( lambda x: array_ops.expand_dims(x, axis=-1))(runtime) model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime]) self._test_runtime_with_model(model) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/gru_v2_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Wrapper layers: layers that augment the functionality of another layer. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.layers.recurrent import _standardize_args from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import layer_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.Wrapper') class Wrapper(Layer): """Abstract wrapper base class. Wrappers take another layer and augment it in various ways. Do not use this class as a layer, it is only an abstract base class. Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. Arguments: layer: The layer to be wrapped. """ def __init__(self, layer, **kwargs): assert isinstance(layer, Layer) self.layer = layer # Tracks mapping of Wrapper inputs to inner layer inputs. Useful when # the inner layer has update ops that depend on its inputs (as opposed # to the inputs to the Wrapper layer). self._input_map = {} super(Wrapper, self).__init__(**kwargs) def build(self, input_shape=None): if not self.layer.built: self.layer.build(input_shape) self.built = True @property def activity_regularizer(self): if hasattr(self.layer, 'activity_regularizer'): return self.layer.activity_regularizer else: return None def get_config(self): config = { 'layer': { 'class_name': self.layer.__class__.__name__, 'config': self.layer.get_config() } } base_config = super(Wrapper, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top layer = deserialize_layer( config.pop('layer'), custom_objects=custom_objects) return cls(layer, **config) @keras_export('keras.layers.TimeDistributed') class TimeDistributed(Wrapper): """This wrapper allows to apply a layer to every temporal slice of an input. The input should be at least 3D, and the dimension of index one will be considered to be the temporal dimension. Consider a batch of 32 samples, where each sample is a sequence of 10 vectors of 16 dimensions. The batch input shape of the layer is then `(32, 10, 16)`, and the `input_shape`, not including the samples dimension, is `(10, 16)`. You can then use `TimeDistributed` to apply a `Dense` layer to each of the 10 timesteps, independently: ```python # as the first layer in a model model = Sequential() model.add(TimeDistributed(Dense(8), input_shape=(10, 16))) # now model.output_shape == (None, 10, 8) ``` The output will then have shape `(32, 10, 8)`. In subsequent layers, there is no need for the `input_shape`: ```python model.add(TimeDistributed(Dense(32))) # now model.output_shape == (None, 10, 32) ``` The output will then have shape `(32, 10, 32)`. `TimeDistributed` can be used with arbitrary layers, not just `Dense`, for instance with a `Conv2D` layer: ```python model = Sequential() model.add(TimeDistributed(Conv2D(64, (3, 3)), input_shape=(10, 299, 299, 3))) ``` Arguments: layer: a layer instance. Call arguments: inputs: Input tensor. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the wrapped layer (only if the layer supports this argument). mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. This argument is passed to the wrapped layer (only if the layer supports this argument). Raises: ValueError: If not initialized with a `Layer` instance. """ def __init__(self, layer, **kwargs): if not isinstance(layer, Layer): raise ValueError( 'Please initialize `TimeDistributed` layer with a ' '`Layer` instance. You passed: {input}'.format(input=layer)) super(TimeDistributed, self).__init__(layer, **kwargs) self.supports_masking = True # It is safe to use the fast, reshape-based approach with all of our # built-in Layers. self._always_use_reshape = ( layer_utils.is_builtin_layer(layer) and not getattr(layer, 'stateful', False)) def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None): """Finds non-specific dimensions in the static shapes. The static shapes are replaced with the corresponding dynamic shapes of the tensor. Arguments: init_tuple: a tuple, the first part of the output shape tensor: the tensor from which to get the (static and dynamic) shapes as the last part of the output shape start_idx: int, which indicate the first dimension to take from the static shape of the tensor int_shape: an alternative static shape to take as the last part of the output shape Returns: The new int_shape with the first part from init_tuple and the last part from either `int_shape` (if provided) or `tensor.shape`, where every `None` is replaced by the corresponding dimension from `tf.shape(tensor)`. """ # replace all None in int_shape by K.shape if int_shape is None: int_shape = K.int_shape(tensor)[start_idx:] if not any(not s for s in int_shape): return init_tuple + tuple(int_shape) shape = K.shape(tensor) int_shape = list(int_shape) for i, s in enumerate(int_shape): if not s: int_shape[i] = shape[start_idx + i] return init_tuple + tuple(int_shape) def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if len(input_shape) < 3: raise ValueError( '`TimeDistributed` Layer should be passed an `input_shape ` ' 'with at least 3 dimensions, received: ' + str(input_shape)) # Don't enforce the batch or time dimension. self.input_spec = InputSpec(shape=[None, None] + input_shape[2:]) child_input_shape = [input_shape[0]] + input_shape[2:] super(TimeDistributed, self).build(tuple(child_input_shape)) self.built = True def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() child_input_shape = tensor_shape.TensorShape([input_shape[0]] + input_shape[2:]) child_output_shape = self.layer.compute_output_shape(child_input_shape) if not isinstance(child_output_shape, tensor_shape.TensorShape): child_output_shape = tensor_shape.TensorShape(child_output_shape) child_output_shape = child_output_shape.as_list() timesteps = input_shape[1] return tensor_shape.TensorShape([child_output_shape[0], timesteps] + child_output_shape[1:]) def call(self, inputs, training=None, mask=None): kwargs = {} if generic_utils.has_arg(self.layer.call, 'training'): kwargs['training'] = training input_shape = K.int_shape(inputs) if input_shape[0] and not self._always_use_reshape: # batch size matters, use rnn-based implementation def step(x, _): output = self.layer(x, **kwargs) return output, [] _, outputs, _ = K.rnn( step, inputs, initial_states=[], input_length=input_shape[1], unroll=False) y = outputs else: # No batch size specified, therefore the layer will be able # to process batches of any size. # We can go with reshape-based implementation for performance. input_length = input_shape[1] if not input_length: input_length = array_ops.shape(inputs)[1] inner_input_shape = self._get_shape_tuple((-1,), inputs, 2) # Shape: (num_samples * timesteps, ...). And track the # transformation in self._input_map. input_uid = generic_utils.object_list_uid(inputs) inputs = array_ops.reshape(inputs, inner_input_shape) self._input_map[input_uid] = inputs # (num_samples * timesteps, ...) if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) kwargs['mask'] = K.reshape(mask, inner_mask_shape) y = self.layer(inputs, **kwargs) # Shape: (num_samples, timesteps, ...) output_shape = self.compute_output_shape(input_shape).as_list() output_shape = self._get_shape_tuple( (-1, input_length), y, 1, output_shape[2:]) y = array_ops.reshape(y, output_shape) return y def compute_mask(self, inputs, mask=None): """Computes an output mask tensor for Embedding layer. This is based on the inputs, mask, and the inner layer. If batch size is specified: Simply return the input `mask`. (An rnn-based implementation with more than one rnn inputs is required but not supported in tf.keras yet.) Otherwise we call `compute_mask` of the inner layer at each time step. If the output mask at each time step is not `None`: (E.g., inner layer is Masking or RNN) Concatenate all of them and return the concatenation. If the output mask at each time step is `None` and the input mask is not `None`:(E.g., inner layer is Dense) Reduce the input_mask to 2 dimensions and return it. Otherwise (both the output mask and the input mask are `None`): (E.g., `mask` is not used at all) Return `None`. Arguments: inputs: Tensor with shape [batch size, timesteps, ...] indicating the input to TimeDistributed. If static shape information is available for "batch size", `mask` is returned unmodified. mask: Either None (indicating no masking) or a Tensor indicating the input mask for TimeDistributed. The shape can be static or dynamic. Returns: Either None (no masking), or a [batch size, timesteps, ...] Tensor with an output mask for the TimeDistributed layer with the shape beyond the second dimension being the value of the input mask shape(if the computed output mask is none), an output mask with the shape beyond the first dimension being the value of the mask shape(if mask is not None) or output mask with the shape beyond the first dimension being the value of the computed output shape. """ # cases need to call the layer.compute_mask when input_mask is None: # Masking layer and Embedding layer with mask_zero input_shape = K.int_shape(inputs) if input_shape[0]: # batch size matters, we currently do not handle mask explicitly return mask inner_mask = mask if inner_mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) inner_mask = K.reshape(inner_mask, inner_mask_shape) input_uid = generic_utils.object_list_uid(inputs) inner_inputs = self._input_map.get(input_uid, inputs) output_mask = self.layer.compute_mask(inner_inputs, inner_mask) if output_mask is None: if mask is None: return None # input_mask is not None, and output_mask is None: # we should return a not-None mask output_mask = mask for _ in range(2, len(K.int_shape(mask))): output_mask = K.any(output_mask, axis=-1) else: # output_mask is not None. We need to reshape it input_length = input_shape[1] if not input_length: input_length = K.shape(inputs)[1] output_mask_int_shape = K.int_shape(output_mask) if output_mask_int_shape is None: # if the output_mask does not have a static shape, # its shape must be the same as mask's if mask is not None: output_mask_int_shape = K.int_shape(mask) else: output_mask_int_shape = K.compute_output_shape(input_shape)[:-1] output_mask_shape = self._get_shape_tuple( (-1, input_length), output_mask, 1, output_mask_int_shape[1:]) output_mask = K.reshape(output_mask, output_mask_shape) return output_mask @keras_export('keras.layers.Bidirectional') class Bidirectional(Wrapper): """Bidirectional wrapper for RNNs. Arguments: layer: `Recurrent` instance. merge_mode: Mode by which outputs of the forward and backward RNNs will be combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the outputs will not be combined, they will be returned as a list. backward_layer: Optional `Recurrent` instance to be used to handle backwards input processing. If `backward_layer` is not provided, the layer instance passed as the `layer` argument will be used to generate the backward layer automatically. Note that the provided `backward_layer` layer should have properties matching those of the `layer` argument, in particular it should have the same values for `stateful`, `return_states`, `return_sequence`, etc. In addition, `backward_layer` and `layer` should have different `go_backwards` argument values. A `ValueError` will be raised if these requirements are not met. Call arguments: The call arguments for this layer are the same as those of the wrapped RNN layer. Raises: ValueError: 1. If `layer` or `backward_layer` is not a `Layer` instance. 2. In case of invalid `merge_mode` argument. 3. If `backward_layer` has mismatched properties compared to `layer`. Examples: ```python model = Sequential() model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10))) model.add(Bidirectional(LSTM(10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # With custom backward layer model = Sequential() forward_layer = LSTM(10, return_sequences=True) backard_layer = LSTM(10, activation='relu', return_sequences=True, go_backwards=True) model.add(Bidirectional(forward_layer, backward_layer=backward_layer, input_shape=(5, 10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') ``` """ def __init__(self, layer, merge_mode='concat', weights=None, backward_layer=None, **kwargs): if not isinstance(layer, Layer): raise ValueError( 'Please initialize `Bidirectional` layer with a ' '`Layer` instance. You passed: {input}'.format(input=layer)) if backward_layer is not None and not isinstance(backward_layer, Layer): raise ValueError('`backward_layer` need to be a `Layer` instance. ' 'You passed: {input}'.format(input=backward_layer)) if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]: raise ValueError('Invalid merge mode. ' 'Merge mode should be one of ' '{"sum", "mul", "ave", "concat", None}') # Recreate the forward layer from the original layer config, so that it will # not carry over any state from the layer. self.forward_layer = self._recreate_layer_from_config(layer) if backward_layer is None: self.backward_layer = self._recreate_layer_from_config( layer, go_backwards=True) else: self.backward_layer = backward_layer # Keep the custom backward layer config, so that we can save it later. The # layer's name might be updated below with prefix 'backward_', and we want # to preserve the original config. self._backward_layer_config = backward_layer.get_config() self.forward_layer._name = 'forward_' + self.forward_layer.name self.backward_layer._name = 'backward_' + self.backward_layer.name self._verify_layer_config() def force_zero_output_for_mask(layer): # Force the zero_output_for_mask to be True if returning sequences. if getattr(layer, 'zero_output_for_mask', None) is not None: layer.zero_output_for_mask = layer.return_sequences force_zero_output_for_mask(self.forward_layer) force_zero_output_for_mask(self.backward_layer) self.merge_mode = merge_mode if weights: nw = len(weights) self.forward_layer.initial_weights = weights[:nw // 2] self.backward_layer.initial_weights = weights[nw // 2:] self.stateful = layer.stateful self.return_sequences = layer.return_sequences self.return_state = layer.return_state self.supports_masking = True self._trainable = True self._num_constants = 0 # We don't want to track `layer` since we're already tracking the two copies # of it we actually run. self._setattr_tracking = False super(Bidirectional, self).__init__(layer, **kwargs) self._setattr_tracking = True self.input_spec = layer.input_spec def _verify_layer_config(self): """Ensure the forward and backward layers have valid common property.""" if self.forward_layer.go_backwards == self.backward_layer.go_backwards: raise ValueError('Forward layer and backward layer should have different ' '`go_backwards` value.') common_attributes = ('stateful', 'return_sequences', 'return_state') for a in common_attributes: forward_value = getattr(self.forward_layer, a) backward_value = getattr(self.backward_layer, a) if forward_value != backward_value: raise ValueError( 'Forward layer and backward layer are expected to have the same ' 'value for attribute {attr}, got {forward} and {backward}'.format( attr=a, forward=forward_value, backward=backward_value)) def _recreate_layer_from_config(self, layer, go_backwards=False): # When recreating the layer from its config, it is possible that the layer # is a RNN layer that contains custom cells. In this case we inspect the # layer and pass the custom cell class as part of the `custom_objects` # argument when calling `from_config`. # See https://github.com/tensorflow/tensorflow/issues/26581 for more detail. config = layer.get_config() if go_backwards: config['go_backwards'] = not config['go_backwards'] if 'custom_objects' in tf_inspect.getfullargspec( layer.__class__.from_config).args: custom_objects = {} cell = getattr(layer, 'cell', None) if cell is not None: custom_objects[cell.__class__.__name__] = cell.__class__ # For StackedRNNCells stacked_cells = getattr(cell, 'cells', []) for c in stacked_cells: custom_objects[c.__class__.__name__] = c.__class__ return layer.__class__.from_config(config, custom_objects=custom_objects) else: return layer.__class__.from_config(config) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): output_shape = self.forward_layer.compute_output_shape(input_shape) if not isinstance(output_shape, tensor_shape.TensorShape): output_shape = tensor_shape.TensorShape(output_shape) output_shape = tuple(output_shape.as_list()) if self.return_state: state_shape = output_shape[1:] output_shape = output_shape[0] if self.merge_mode == 'concat': output_shape = list(output_shape) output_shape[-1] *= 2 output_shape = tuple(output_shape) elif self.merge_mode is None: output_shape = [output_shape, copy.copy(output_shape)] if self.return_state: if self.merge_mode is None: return output_shape + state_shape + copy.copy(state_shape) return [output_shape] + state_shape + copy.copy(state_shape) return output_shape def __call__(self, inputs, initial_state=None, constants=None, **kwargs): """`Bidirectional.__call__` implements the same API as the wrapped `RNN`.""" inputs, initial_state, constants = _standardize_args( inputs, initial_state, constants, self._num_constants) if isinstance(inputs, list): if len(inputs) > 1: initial_state = inputs[1:] inputs = inputs[0] if initial_state is None and constants is None: return super(Bidirectional, self).__call__(inputs, **kwargs) # Applies the same workaround as in `RNN.__call__` additional_inputs = [] additional_specs = [] if initial_state is not None: # Check if `initial_state` can be splitted into half num_states = len(initial_state) if num_states % 2 > 0: raise ValueError( 'When passing `initial_state` to a Bidirectional RNN, ' 'the state should be a list containing the states of ' 'the underlying RNNs. ' 'Found: ' + str(initial_state)) kwargs['initial_state'] = initial_state additional_inputs += initial_state state_specs = [InputSpec(shape=K.int_shape(state)) for state in initial_state] self.forward_layer.state_spec = state_specs[:num_states // 2] self.backward_layer.state_spec = state_specs[num_states // 2:] additional_specs += state_specs if constants is not None: kwargs['constants'] = constants additional_inputs += constants constants_spec = [InputSpec(shape=K.int_shape(constant)) for constant in constants] self.forward_layer.constants_spec = constants_spec self.backward_layer.constants_spec = constants_spec additional_specs += constants_spec self._num_constants = len(constants) self.forward_layer._num_constants = self._num_constants self.backward_layer._num_constants = self._num_constants is_keras_tensor = K.is_keras_tensor(additional_inputs[0]) for tensor in additional_inputs: if K.is_keras_tensor(tensor) != is_keras_tensor: raise ValueError('The initial state of a Bidirectional' ' layer cannot be specified with a mix of' ' Keras tensors and non-Keras tensors' ' (a "Keras tensor" is a tensor that was' ' returned by a Keras layer, or by `Input`)') if is_keras_tensor: # Compute the full input spec, including state full_input = [inputs] + additional_inputs # The original input_spec is None since there could be a nested tensor # input. Update the input_spec to match the inputs. full_input_spec = [None for _ in range(len(nest.flatten(inputs))) ] + additional_specs # Removing kwargs since the value are passed with input list. kwargs['initial_state'] = None kwargs['constants'] = None # Perform the call with temporarily replaced input_spec original_input_spec = self.input_spec self.input_spec = full_input_spec output = super(Bidirectional, self).__call__(full_input, **kwargs) self.input_spec = original_input_spec return output else: return super(Bidirectional, self).__call__(inputs, **kwargs) def call(self, inputs, training=None, mask=None, initial_state=None, constants=None): """`Bidirectional.call` implements the same API as the wrapped `RNN`.""" kwargs = {} if generic_utils.has_arg(self.layer.call, 'training'): kwargs['training'] = training if generic_utils.has_arg(self.layer.call, 'mask'): kwargs['mask'] = mask if generic_utils.has_arg(self.layer.call, 'constants'): kwargs['constants'] = constants if generic_utils.has_arg(self.layer.call, 'initial_state'): if isinstance(inputs, list) and len(inputs) > 1: # initial_states are keras tensors, which means they are passed in # together with inputs as list. The initial_states need to be split into # forward and backward section, and be feed to layers accordingly. forward_inputs = [inputs[0]] backward_inputs = [inputs[0]] pivot = (len(inputs) - self._num_constants) // 2 + 1 # add forward initial state forward_inputs += inputs[1:pivot] if not self._num_constants: # add backward initial state backward_inputs += inputs[pivot:] else: # add backward initial state backward_inputs += inputs[pivot:-self._num_constants] # add constants for forward and backward layers forward_inputs += inputs[-self._num_constants:] backward_inputs += inputs[-self._num_constants:] forward_state, backward_state = None, None if 'constants' in kwargs: kwargs['constants'] = None elif initial_state is not None: # initial_states are not keras tensors, eg eager tensor from np array. # They are only passed in from kwarg initial_state, and should be passed # to forward/backward layer via kwarg initial_state as well. forward_inputs, backward_inputs = inputs, inputs half = len(initial_state) // 2 forward_state = initial_state[:half] backward_state = initial_state[half:] else: forward_inputs, backward_inputs = inputs, inputs forward_state, backward_state = None, None y = self.forward_layer(forward_inputs, initial_state=forward_state, **kwargs) y_rev = self.backward_layer(backward_inputs, initial_state=backward_state, **kwargs) else: y = self.forward_layer(inputs, **kwargs) y_rev = self.backward_layer(inputs, **kwargs) if self.return_state: states = y[1:] + y_rev[1:] y = y[0] y_rev = y_rev[0] if self.return_sequences: y_rev = K.reverse(y_rev, 1) if self.merge_mode == 'concat': output = K.concatenate([y, y_rev]) elif self.merge_mode == 'sum': output = y + y_rev elif self.merge_mode == 'ave': output = (y + y_rev) / 2 elif self.merge_mode == 'mul': output = y * y_rev elif self.merge_mode is None: output = [y, y_rev] else: raise ValueError( 'Unrecognized value for `merge_mode`: %s' % (self.merge_mode)) if self.return_state: if self.merge_mode is None: return output + states return [output] + states return output def reset_states(self): self.forward_layer.reset_states() self.backward_layer.reset_states() def build(self, input_shape): with K.name_scope(self.forward_layer.name): self.forward_layer.build(input_shape) with K.name_scope(self.backward_layer.name): self.backward_layer.build(input_shape) self.built = True def compute_mask(self, inputs, mask): if isinstance(mask, list): mask = mask[0] if self.return_sequences: if not self.merge_mode: output_mask = [mask, mask] else: output_mask = mask else: output_mask = [None, None] if not self.merge_mode else None if self.return_state: states = self.forward_layer.states state_mask = [None for _ in states] if isinstance(output_mask, list): return output_mask + state_mask * 2 return [output_mask] + state_mask * 2 return output_mask @property def constraints(self): constraints = {} if hasattr(self.forward_layer, 'constraints'): constraints.update(self.forward_layer.constraints) constraints.update(self.backward_layer.constraints) return constraints def get_config(self): config = {'merge_mode': self.merge_mode} if self._num_constants: config['num_constants'] = self._num_constants if hasattr(self, '_backward_layer_config'): config['backward_layer'] = { 'class_name': self.backward_layer.__class__.__name__, 'config': self._backward_layer_config, } base_config = super(Bidirectional, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): # Instead of updating the input, create a copy and use that. config = config.copy() num_constants = config.pop('num_constants', 0) backward_layer_config = config.pop('backward_layer', None) if backward_layer_config is not None: from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top backward_layer = deserialize_layer( backward_layer_config, custom_objects=custom_objects) config['backward_layer'] = backward_layer layer = super(Bidirectional, cls).from_config(config, custom_objects=custom_objects) layer._num_constants = num_constants return layer
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/wrappers.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for separable convolutional layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class SeparableConv1DTest(keras_parameterized.TestCase): def _run_test(self, kwargs): num_samples = 2 stack_size = 3 length = 7 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.SeparableConv1D, kwargs=kwargs, input_shape=(num_samples, length, stack_size)) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}), ('padding_same', {'padding': 'same'}), ('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}), ('padding_causal', {'padding': 'causal'}), ('strides', {'strides': 2}), ('dilation_rate', {'dilation_rate': 2}), ('depth_multiplier', {'depth_multiplier': 2}), ) def test_separable_conv1d(self, kwargs): kwargs['filters'] = 2 kwargs['kernel_size'] = 3 self._run_test(kwargs) def test_separable_conv1d_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'depthwise_regularizer': 'l2', 'pointwise_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.SeparableConv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(len(layer.losses), 3) layer(keras.backend.variable(np.ones((1, 5, 2)))) self.assertEqual(len(layer.losses), 4) def test_separable_conv1d_constraints(self): d_constraint = lambda x: x p_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'pointwise_constraint': p_constraint, 'depthwise_constraint': d_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.SeparableConv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(layer.depthwise_kernel.constraint, d_constraint) self.assertEqual(layer.pointwise_kernel.constraint, p_constraint) self.assertEqual(layer.bias.constraint, b_constraint) @keras_parameterized.run_all_keras_modes class SeparableConv2DTest(keras_parameterized.TestCase): def _run_test(self, kwargs): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.SeparableConv2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size)) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}), ('padding_same', {'padding': 'same'}), ('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}), ('strides', {'strides': 2}), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ('data_format', {'data_format': 'channels_first'}), ('dilation_rate', {'dilation_rate': 2}), ('depth_multiplier', {'depth_multiplier': 2}), ) def test_separable_conv2d(self, kwargs): kwargs['filters'] = 2 kwargs['kernel_size'] = 3 if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True): self._run_test(kwargs) def test_separable_conv2d_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'depthwise_regularizer': 'l2', 'pointwise_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.SeparableConv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(len(layer.losses), 3) layer(keras.backend.variable(np.ones((1, 5, 5, 2)))) self.assertEqual(len(layer.losses), 4) def test_separable_conv2d_constraints(self): d_constraint = lambda x: x p_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'pointwise_constraint': p_constraint, 'depthwise_constraint': d_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.SeparableConv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(layer.depthwise_kernel.constraint, d_constraint) self.assertEqual(layer.pointwise_kernel.constraint, p_constraint) self.assertEqual(layer.bias.constraint, b_constraint)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/separable_convolutional_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Recurrent layers for TF 2.0. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import uuid from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import device from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.layers import recurrent from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_cudnn_rnn_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.util.tf_export import keras_export # The following string constants are used by Defun approach for unified backend # of LSTM and GRU. _DEFUN_API_NAME_ATTRIBUTE = 'api_implements' _DEFUN_DEVICE_ATTRIBUTE = 'api_preferred_device' _CPU_DEVICE_NAME = 'CPU' _GPU_DEVICE_NAME = 'GPU' # The following number constants are used to represent the runtime of the defun # backend function. Since the CPU/GPU implementation are mathematically same, we # need some signal for the function to indicate which function is executed. This # is for testing purpose to verify the correctness of swapping backend function. _RUNTIME_UNKNOWN = 0 _RUNTIME_CPU = 1 _RUNTIME_GPU = 2 @keras_export('keras.layers.GRUCell', v1=[]) class GRUCell(recurrent.GRUCell): """Cell class for the GRU layer. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass None, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 (default) will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = "before", True = "after" (default and CuDNN compatible). Call arguments: inputs: A 2D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. """ def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, reset_after=True, **kwargs): super(GRUCell, self).__init__( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, reset_after=reset_after, **kwargs) @keras_export('keras.layers.GRU', v1=[]) class GRU(recurrent.DropoutRNNCellMixin, recurrent.GRU): """Gated Recurrent Unit - Cho et al. 2014. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the CuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: 1. `activation` == 'tanh' 2. `recurrent_activation` == 'sigmoid' 3. `recurrent_dropout` == 0 4. `unroll` is False 5. `use_bias` is True 6. `reset_after` is True 7. Inputs are not masked or strictly right padded. There are two variants of the GRU implementation. The default one is based on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden state before matrix multiplication. The other one is based on [original](https://arxiv.org/abs/1406.1078v1) and has the order reversed. The second variant is compatible with CuDNNGRU (GPU-only) and allows inference on CPU. Thus it has separate biases for `kernel` and `recurrent_kernel`. To use this variant, set `'reset_after'=True` and `recurrent_activation='sigmoid'`. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = "before", True = "after" (default and CuDNN compatible). Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked (optional, defaults to `None`). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used (optional, defaults to `None`). initial_state: List of initial state tensors to be passed to the first call of the cell (optional, defaults to `None` which causes creation of zero-filled initial state tensors). """ def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, time_major=False, reset_after=True, **kwargs): # return_runtime is a flag for testing, which shows the real backend # implementation chosen by grappler in graph mode. self._return_runtime = kwargs.pop('return_runtime', False) super(GRU, self).__init__( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, time_major=time_major, reset_after=reset_after, **kwargs) # CuDNN uses following setting by default and not configurable. self.could_use_cudnn = ( activation == 'tanh' and recurrent_activation == 'sigmoid' and recurrent_dropout == 0 and not unroll and use_bias and reset_after and ops.executing_eagerly_outside_functions()) def call(self, inputs, mask=None, training=None, initial_state=None): # GRU does not support constants. Ignore it during process. inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None) if isinstance(mask, list): mask = mask[0] input_shape = K.int_shape(inputs) timesteps = input_shape[0] if self.time_major else input_shape[1] if not self.could_use_cudnn: kwargs = {'training': training} self.cell.reset_dropout_mask() self.cell.reset_recurrent_dropout_mask() def step(cell_inputs, cell_states): return self.cell.call(cell_inputs, cell_states, **kwargs) last_output, outputs, states = K.rnn( step, inputs, initial_state, constants=None, go_backwards=self.go_backwards, mask=mask, unroll=self.unroll, input_length=timesteps, time_major=self.time_major, zero_output_for_mask=self.zero_output_for_mask) # This is a dummy tensor for testing purpose. runtime = _runtime(_RUNTIME_UNKNOWN) else: last_output, outputs, runtime, states = self._defun_gru_call( inputs, initial_state, training, mask) if self.stateful: updates = [state_ops.assign(self.states[0], states[0])] self.add_update(updates) if self.return_sequences: output = outputs else: output = last_output if self.return_state: return [output] + list(states) elif self._return_runtime: return output, runtime else: return output def _defun_gru_call(self, inputs, initial_state, training, mask): # Use the new defun approach for backend implementation swap. # Note that different implementations need to have same function # signature, eg, the tensor parameters need to have same shape and dtypes. self.reset_dropout_mask() dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=3) if dropout_mask is not None: inputs = inputs * dropout_mask[0] cudnn_gru_kwargs = { 'inputs': inputs, 'init_h': initial_state[0], 'kernel': self.cell.kernel, 'recurrent_kernel': self.cell.recurrent_kernel, 'bias': self.cell.bias, 'mask': mask, 'time_major': self.time_major, 'go_backwards': self.go_backwards } normal_gru_kwargs = cudnn_gru_kwargs.copy() normal_gru_kwargs.update({ 'activation': self.activation, 'recurrent_activation': self.recurrent_activation }) if context.executing_eagerly(): device_type = _get_context_device_type() can_use_gpu = ( # Either user specified GPU or unspecified but GPU is available. (device_type == _GPU_DEVICE_NAME or (device_type is None and context.num_gpus() > 0)) and (mask is None or is_sequence_right_padded(mask, self.time_major))) # Under eager context, check the device placement and prefer the if can_use_gpu: last_output, outputs, new_h, runtime = cudnn_gru(**cudnn_gru_kwargs) else: last_output, outputs, new_h, runtime = standard_gru(**normal_gru_kwargs) else: last_output, outputs, new_h, runtime = gru_with_backend_selection( **normal_gru_kwargs) states = [new_h] return last_output, outputs, runtime, states def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, activation, recurrent_activation, mask, time_major, go_backwards): """GRU with standard kernel implementation. This implementation can be run on all types of hardware. This implementation lifts out all the layer weights and make them function parameters. It has same number of tensor input params as the CuDNN counterpart. The RNN step logic has been simplified, eg dropout and mask is removed since CuDNN implementation does not support that. Arguments: inputs: Input tensor of GRU layer. init_h: Initial state tensor for the cell output. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. The bias contains the combined input_bias and recurrent_bias. activation: Activation function to use for output. recurrent_activation: Activation function to use for hidden recurrent state. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. Returns: last_output: output tensor for the last timestep, which has shape [batch, units]. outputs: output tensor for all timesteps, which has shape [batch, time, units]. state_0: the cell output, which has same shape as init_h. runtime: constant string tensor which indicate real runtime hardware. This value is for testing purpose and should be used by user. """ input_shape = K.int_shape(inputs) timesteps = input_shape[0] if time_major else input_shape[1] input_bias, recurrent_bias = array_ops.unstack(bias) def step(cell_inputs, cell_states): """Step function that will be used by Keras RNN backend.""" h_tm1 = cell_states[0] # inputs projected by all gate matrices at once matrix_x = K.dot(cell_inputs, kernel) matrix_x = K.bias_add(matrix_x, input_bias) x_z, x_r, x_h = array_ops.split(matrix_x, 3, axis=1) # hidden state projected by all gate matrices at once matrix_inner = K.dot(h_tm1, recurrent_kernel) matrix_inner = K.bias_add(matrix_inner, recurrent_bias) recurrent_z, recurrent_r, recurrent_h = array_ops.split(matrix_inner, 3, axis=1) z = recurrent_activation(x_z + recurrent_z) r = recurrent_activation(x_r + recurrent_r) hh = activation(x_h + r * recurrent_h) # previous and candidate state mixed by update gate h = z * h_tm1 + (1 - z) * hh return h, [h] last_output, outputs, new_states = K.rnn( step, inputs, [init_h], constants=None, unroll=False, time_major=time_major, mask=mask, go_backwards=go_backwards, input_length=timesteps) return last_output, outputs, new_states[0], _runtime(_RUNTIME_CPU) def cudnn_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major, go_backwards): """GRU with CuDNN implementation which is only available for GPU.""" if not time_major and mask is None: inputs = array_ops.transpose(inputs, perm=(1, 0, 2)) seq_axis, batch_axis = (0, 1) else: seq_axis, batch_axis = (0, 1) if time_major else (1, 0) # For init_h, cuDNN expects one more dim of num_layers before or after batch # dim for time major or batch major inputs respectively init_h = array_ops.expand_dims(init_h, axis=seq_axis) weights = array_ops.split(kernel, 3, axis=1) weights += array_ops.split(recurrent_kernel, 3, axis=1) # Note that the bias was initialized as shape (2, 3 * units), flat it into # (6 * units) bias = array_ops.split(K.flatten(bias), 6) # Note that the gate order for CuDNN is different from the canonical format. # canonical format is [z, r, h], whereas CuDNN is [r, z, h]. The swap need to # be done for kernel, recurrent_kernel, input_bias, recurrent_bias. # z is update gate weights. # r is reset gate weights. # h is output gate weights. weights[0], weights[1] = weights[1], weights[0] weights[3], weights[4] = weights[4], weights[3] bias[0], bias[1] = bias[1], bias[0] bias[3], bias[4] = bias[4], bias[3] params = _canonical_to_params( weights=weights, biases=bias, shape=constant_op.constant([-1]), transpose_weights=True) if mask is not None: sequence_length = calculate_sequence_by_mask(mask, time_major) if go_backwards: # Three reversals are required. E.g., # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked # reversed_input_to_cudnn = [3, 2, 1, 0, 0] # output_from_cudnn = [6, 5, 4, 0, 0] # expected_output = [0, 0, 6, 5 ,4] inputs = array_ops.reverse_sequence_v2( inputs, sequence_length, seq_axis=seq_axis, batch_axis=batch_axis) outputs, h, _, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3( inputs, input_h=init_h, input_c=0, params=params, is_training=True, rnn_mode='gru', sequence_lengths=sequence_length, time_major=time_major) if go_backwards: outputs = array_ops.reverse_sequence_v2( outputs, sequence_length, seq_axis=seq_axis, batch_axis=batch_axis) outputs = array_ops.reverse(outputs, axis=[seq_axis]) else: if go_backwards: # Reverse axis 0 since the input is already convert to time major. inputs = array_ops.reverse(inputs, axis=[0]) outputs, h, _, _ = gen_cudnn_rnn_ops.cudnn_rnn( inputs, input_h=init_h, input_c=0, params=params, is_training=True, rnn_mode='gru') last_output = outputs[-1] if not time_major and mask is None: outputs = array_ops.transpose(outputs, perm=[1, 0, 2]) h = array_ops.squeeze(h, axis=seq_axis) # In the case of variable length input, the cudnn kernel will fill zeros for # the output, whereas the default keras behavior is to bring over the previous # output for t-1, so that in the return_sequence=False case, user can quickly # get the final effect output instead just 0s at the last timestep. # In order to mimic the default keras behavior, we copy the final h state as # the last_output, since it is numerically same as the output. if mask is not None: last_output = h return last_output, outputs, h, _runtime(_RUNTIME_GPU) def gru_with_backend_selection( inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, activation, recurrent_activation): """Call the GRU with optimized backend kernel selection. Under the hood, this function will create two TF function, one with the most generic kernel and can run on all device condition, and the second one with CuDNN specific kernel, which can only run on GPU. The first function will be called with normal_lstm_params, while the second function is not called, but only registered in the graph. The Grappler will do the proper graph rewrite and swap the optimized TF function based on the device placement. Args: inputs: Input tensor of GRU layer. init_h: Initial state tensor for the cell output. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. activation: Activation function to use for output. recurrent_activation: Activation function to use for hidden recurrent state. Returns: List of output tensors, same as standard_gru. """ params = { 'inputs': inputs, 'init_h': init_h, 'kernel': kernel, 'recurrent_kernel': recurrent_kernel, 'bias': bias, 'mask': mask, 'time_major': time_major, 'go_backwards': go_backwards, 'activation': activation, 'recurrent_activation': recurrent_activation } def cudnn_gru_with_fallback(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, activation, recurrent_activation): """Use CuDNN kernel when mask is none or strictly right padded.""" if mask is None: return cudnn_gru(inputs=inputs, init_h=init_h, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards) # Note that mask is a boolean tensor, which doesn't need to do gradient # calculation, when using tf.cond, a default gradient is added for it, # which then cause the backward function to have a signature mismatch. # Force the mask to not generate gradient to allow implementation_selector # to work properly. # TODO(b/80444525): Remove the stop_gradient(). mask = array_ops.stop_gradient(mask) def input_right_padded(): return cudnn_gru(inputs=inputs, init_h=init_h, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards) def input_not_right_padded(): return standard_gru(inputs=inputs, init_h=init_h, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, activation=activation, recurrent_activation=recurrent_activation) return control_flow_ops.cond( is_sequence_right_padded(mask, time_major), true_fn=input_right_padded, false_fn=input_not_right_padded) # Each time a `tf.function` is called, we will give it a unique # identifiable API name, so that Grappler won't get confused when it # sees multiple GRU layers added into same graph, and it will be able # to pair up the different implementations across them. api_name = 'gru_' + str(uuid.uuid4()) defun_standard_gru = _generate_defun_backend( api_name, _CPU_DEVICE_NAME, standard_gru) defun_cudnn_gru = _generate_defun_backend( api_name, _GPU_DEVICE_NAME, cudnn_gru_with_fallback) # Call the normal GRU impl and register the CuDNN impl function. The # grappler will kick in during session execution to optimize the graph. last_output, outputs, new_h, runtime = defun_standard_gru(**params) function.register(defun_cudnn_gru, **params) return last_output, outputs, new_h, runtime @keras_export('keras.layers.LSTMCell', v1=[]) class LSTMCell(recurrent.LSTMCell): """Cell class for the LSTM layer. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 (default) will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Call arguments: inputs: A 2D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. """ def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, **kwargs): super(LSTMCell, self).__init__( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, **kwargs) @keras_export('keras.layers.LSTM', v1=[]) class LSTM(recurrent.DropoutRNNCellMixin, recurrent.LSTM): """Long Short-Term Memory layer - Hochreiter 1997. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the CuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: 1. `activation` == 'tanh' 2. `recurrent_activation` == 'sigmoid' 3. `recurrent_dropout` == 0 4. `unroll` is False 5. `use_bias` is True 6. Inputs are not masked or strictly right padded. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs.. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state.. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked (optional, defaults to `None`). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used (optional, defaults to `None`). initial_state: List of initial state tensors to be passed to the first call of the cell (optional, defaults to `None` which causes creation of zero-filled initial state tensors). """ def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=2, return_sequences=False, return_state=False, go_backwards=False, stateful=False, time_major=False, unroll=False, **kwargs): # return_runtime is a flag for testing, which shows the real backend # implementation chosen by grappler in graph mode. self.return_runtime = kwargs.pop('return_runtime', False) super(LSTM, self).__init__( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, time_major=time_major, unroll=unroll, **kwargs) self.state_spec = [ InputSpec(shape=(None, dim)) for dim in (self.units, self.units) ] self.could_use_cudnn = ( activation == 'tanh' and recurrent_activation == 'sigmoid' and recurrent_dropout == 0 and not unroll and use_bias and ops.executing_eagerly_outside_functions()) def call(self, inputs, mask=None, training=None, initial_state=None): # LSTM does not support constants. Ignore it during process. inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None) if isinstance(mask, list): mask = mask[0] input_shape = K.int_shape(inputs) timesteps = input_shape[0] if self.time_major else input_shape[1] if not self.could_use_cudnn: # Fall back to use the normal LSTM. kwargs = {'training': training} self.cell.reset_dropout_mask() self.cell.reset_recurrent_dropout_mask() def step(inputs, states): return self.cell.call(inputs, states, **kwargs) last_output, outputs, states = K.rnn( step, inputs, initial_state, constants=None, go_backwards=self.go_backwards, mask=mask, unroll=self.unroll, input_length=timesteps, time_major=self.time_major, zero_output_for_mask=self.zero_output_for_mask) runtime = _runtime(_RUNTIME_UNKNOWN) else: # Use the new defun approach for backend implementation swap. # Note that different implementations need to have same function # signature, eg, the tensor parameters need to have same shape and dtypes. # Since the CuDNN has an extra set of bias, those bias will be passed to # both normal and CuDNN implementations. self.reset_dropout_mask() dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=4) if dropout_mask is not None: inputs = inputs * dropout_mask[0] cudnn_lstm_kwargs = { 'inputs': inputs, 'init_h': initial_state[0], 'init_c': initial_state[1], 'kernel': self.cell.kernel, 'recurrent_kernel': self.cell.recurrent_kernel, 'bias': self.cell.bias, 'mask': mask, 'time_major': self.time_major, 'go_backwards': self.go_backwards } normal_lstm_kwargs = cudnn_lstm_kwargs.copy() normal_lstm_kwargs.update({ 'activation': self.activation, 'recurrent_activation': self.recurrent_activation }) if context.executing_eagerly(): device_type = _get_context_device_type() can_use_gpu = ( # Either user specified GPU or unspecified but GPU is available. (device_type == _GPU_DEVICE_NAME or (device_type is None and context.num_gpus() > 0)) and (mask is None or is_sequence_right_padded(mask, self.time_major))) # Under eager context, check the device placement and prefer the # GPU implementation when GPU is available. if can_use_gpu: last_output, outputs, new_h, new_c, runtime = cudnn_lstm( **cudnn_lstm_kwargs) else: last_output, outputs, new_h, new_c, runtime = standard_lstm( **normal_lstm_kwargs) else: (last_output, outputs, new_h, new_c, runtime) = lstm_with_backend_selection(**normal_lstm_kwargs) states = [new_h, new_c] if self.stateful: updates = [] for i in range(len(states)): updates.append(state_ops.assign(self.states[i], states[i])) self.add_update(updates) if self.return_sequences: output = outputs else: output = last_output if self.return_state: return [output] + list(states) elif self.return_runtime: return output, runtime else: return output def _canonical_to_params(weights, biases, shape, transpose_weights=False): """Utility function convert variable to CuDNN compatible parameter. Note that Keras weights for kernels are different from the CuDNN format. Eg.: ``` Keras CuDNN [[0, 1, 2], <---> [[0, 2, 4], [3, 4, 5]] [1, 3, 5]] ``` If the input weights need to be in a unified format, then set `transpose_weights=True` to convert the weights. Args: weights: list of weights for the individual kernels and recurrent kernels. biases: list of biases for individual gate. shape: the shape for the converted variables that will be feed to CuDNN. transpose_weights: boolean, whether to transpose the weights. Returns: The converted weights that can be feed to CuDNN ops as param. """ def convert(w): return array_ops.transpose(w) if transpose_weights else w weights = [array_ops.reshape(convert(x), shape) for x in weights] biases = [array_ops.reshape(x, shape) for x in biases] return array_ops.concat(weights + biases, axis=0) def standard_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, activation, recurrent_activation, mask, time_major, go_backwards): """LSTM with standard kernel implementation. This implementation can be run on all types for hardware. This implementation lifts out all the layer weights and make them function parameters. It has same number of tensor input params as the CuDNN counterpart. The RNN step logic has been simplified, eg dropout and mask is removed since CuDNN implementation does not support that. Note that the first half of the bias tensor should be ignored by this impl. The CuDNN impl need an extra set of input gate bias. In order to make the both function take same shape of parameter, that extra set of bias is also feed here. Args: inputs: input tensor of LSTM layer. init_h: initial state tensor for the cell output. init_c: initial state tensor for the cell hidden state. kernel: weights for cell kernel. recurrent_kernel: weights for cell recurrent kernel. bias: weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. activation: Activation function to use for output. recurrent_activation: Activation function to use for hidden recurrent state. mask: Boolean tensor for mask out the steps within sequence. time_major: boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. Returns: last_output: output tensor for the last timestep, which has shape [batch, units]. outputs: output tensor for all timesteps, which has shape [batch, time, units]. state_0: the cell output, which has same shape as init_h. state_1: the cell hidden state, which has same shape as init_c. runtime: constant string tensor which indicate real runtime hardware. This value is for testing purpose and should be used by user. """ input_shape = K.int_shape(inputs) timesteps = input_shape[0] if time_major else input_shape[1] def step(cell_inputs, cell_states): """Step function that will be used by Keras RNN backend.""" h_tm1 = cell_states[0] # previous memory state c_tm1 = cell_states[1] # previous carry state z = K.dot(cell_inputs, kernel) z += K.dot(h_tm1, recurrent_kernel) z = K.bias_add(z, bias) z0, z1, z2, z3 = array_ops.split(z, 4, axis=1) i = recurrent_activation(z0) f = recurrent_activation(z1) c = f * c_tm1 + i * activation(z2) o = recurrent_activation(z3) h = o * activation(c) return h, [h, c] last_output, outputs, new_states = K.rnn( step, inputs, [init_h, init_c], constants=None, unroll=False, time_major=time_major, mask=mask, go_backwards=go_backwards, input_length=timesteps) return (last_output, outputs, new_states[0], new_states[1], _runtime(_RUNTIME_CPU)) def cudnn_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards): """LSTM with CuDNN implementation which is only available for GPU. Note that currently only right padded data is supported, or the result will be polluted by the unmasked data which should be filtered. Args: inputs: Input tensor of LSTM layer. init_h: Initial state tensor for the cell output. init_c: Initial state tensor for the cell hidden state. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. Returns: last_output: Output tensor for the last timestep, which has shape [batch, units]. outputs: Output tensor for all timesteps, which has shape [batch, time, units]. state_0: The cell output, which has same shape as init_h. state_1: The cell hidden state, which has same shape as init_c. runtime: Constant string tensor which indicate real runtime hardware. This value is for testing purpose and should not be used by user. """ if not time_major and mask is None: inputs = array_ops.transpose(inputs, perm=(1, 0, 2)) seq_axis, batch_axis = (0, 1) else: seq_axis, batch_axis = (0, 1) if time_major else (1, 0) # For init_h and init_c, cuDNN expects one more dim of num_layers before or # after batch dim for time major or batch major inputs respectively init_h = array_ops.expand_dims(init_h, axis=seq_axis) init_c = array_ops.expand_dims(init_c, axis=seq_axis) weights = array_ops.split(kernel, 4, axis=1) weights += array_ops.split(recurrent_kernel, 4, axis=1) # CuDNN has an extra set of bias for inputs, we disable them (setting to 0), # so that mathematically it is same as the canonical LSTM implementation. full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0) params = _canonical_to_params( weights=weights, biases=array_ops.split(full_bias, 8), shape=constant_op.constant([-1]), transpose_weights=True) if mask is not None: sequence_length = calculate_sequence_by_mask(mask, time_major) if go_backwards: # Three reversals are required. E.g., # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked # reversed_input_to_cudnn = [3, 2, 1, 0, 0] # output_from_cudnn = [6, 5, 4, 0, 0] # expected_output = [0, 0, 6, 5 ,4] inputs = array_ops.reverse_sequence_v2( inputs, sequence_length, seq_axis=seq_axis, batch_axis=batch_axis) outputs, h, c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3( inputs, input_h=init_h, input_c=init_c, params=params, is_training=True, rnn_mode='lstm', sequence_lengths=sequence_length, time_major=time_major) if go_backwards: outputs = array_ops.reverse_sequence_v2( outputs, sequence_length, seq_axis=seq_axis, batch_axis=batch_axis) outputs = array_ops.reverse(outputs, axis=[seq_axis]) else: # # Fill the array with shape [batch] with value of max timesteps. # sequence_length = array_ops.fill([array_ops.shape(inputs)[1]], # array_ops.shape(inputs)[0]) if go_backwards: # Reverse axis 0 since the input is already convert to time major. inputs = array_ops.reverse(inputs, axis=[0]) outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn( inputs, input_h=init_h, input_c=init_c, params=params, is_training=True, rnn_mode='lstm') last_output = outputs[-1] if not time_major and mask is None: outputs = array_ops.transpose(outputs, perm=[1, 0, 2]) h = array_ops.squeeze(h, axis=seq_axis) c = array_ops.squeeze(c, axis=seq_axis) # In the case of variable length input, the cudnn kernel will fill zeros for # the output, whereas the default keras behavior is to bring over the previous # output for t-1, so that in the return_sequence=False case, user can quickly # get the final effect output instead just 0s at the last timestep. # In order to mimic the default keras behavior, we copy the final h state as # the last_output, since it is numerically same as the output. if mask is not None: last_output = h return last_output, outputs, h, c, _runtime(_RUNTIME_GPU) def lstm_with_backend_selection( inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, activation, recurrent_activation): """Call the LSTM with optimized backend kernel selection. Under the hood, this function will create two TF function, one with the most generic kernel and can run on all device condition, and the second one with CuDNN specific kernel, which can only run on GPU. The first function will be called with normal_lstm_params, while the second function is not called, but only registered in the graph. The Grappler will do the proper graph rewrite and swap the optimized TF function based on the device placement. Args: inputs: Input tensor of LSTM layer. init_h: Initial state tensor for the cell output. init_c: Initial state tensor for the cell hidden state. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. activation: Activation function to use for output. recurrent_activation: Activation function to use for hidden recurrent state. Returns: List of output tensors, same as standard_lstm. """ params = { 'inputs': inputs, 'init_h': init_h, 'init_c': init_c, 'kernel': kernel, 'recurrent_kernel': recurrent_kernel, 'bias': bias, 'mask': mask, 'time_major': time_major, 'go_backwards': go_backwards, 'activation': activation, 'recurrent_activation': recurrent_activation } def cudnn_lstm_with_fallback(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, activation, recurrent_activation): """Use CuDNN kernel when mask is none or strictly right padded.""" if mask is None: return cudnn_lstm(inputs=inputs, init_h=init_h, init_c=init_c, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards) # Note that mask is a boolean tensor, which doesn't need to do gradient # calculation, when using tf.cond, a default gradient is added for it, # which then cause the backward function to have a signature mismatch. # Force the mask to not generate gradient to allow implementation_selector # to work properly. # TODO(b/80444525): Remove the stop_gradient(). mask = array_ops.stop_gradient(mask) def input_right_padded(): return cudnn_lstm(inputs=inputs, init_h=init_h, init_c=init_c, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards) def input_not_right_padded(): return standard_lstm(inputs=inputs, init_h=init_h, init_c=init_c, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, activation=activation, recurrent_activation=recurrent_activation) return control_flow_ops.cond( is_sequence_right_padded(mask, time_major), true_fn=input_right_padded, false_fn=input_not_right_padded) # Each time a `tf.function` is called, we will give it a unique # identifiable API name, so that Grappler won't get confused when it # sees multiple LSTM layers added into same graph, and it will be able # to pair up the different implementations across them. api_name = 'lstm_' + str(uuid.uuid4()) defun_standard_lstm = _generate_defun_backend( api_name, _CPU_DEVICE_NAME, standard_lstm) defun_cudnn_lstm = _generate_defun_backend( api_name, _GPU_DEVICE_NAME, cudnn_lstm_with_fallback) # Call the normal LSTM impl and register the CuDNN impl function. The # grappler will kick in during session execution to optimize the graph. last_output, outputs, new_h, new_c, runtime = defun_standard_lstm( **params) function.register(defun_cudnn_lstm, **params) return last_output, outputs, new_h, new_c, runtime def is_sequence_right_padded(mask, time_major): """Check the mask tensor and see if it right padded. For CuDNN kernel, it uses the sequence length param to skip the tailing timestep. If the data is left padded, or not a strict right padding (has masked value in the middle of the sequence), then CuDNN kernel won't be work properly in those cases. Left padded data: [[False, False, True, True, True]]. Right padded data: [[True, True, True, False, False]]. Mixture of mask/unmasked data: [[True, False, True, False, False]]. Note that for the mixed data example above, the actually data RNN should see are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not pollute the internal states. Args: mask: the Boolean tensor with shape [batch, timestep] or [timestep, batch] when time_major is True. time_major: Boolean, whether the input mask is time major or batch major. Returns: boolean scalar tensor, whether the mask is strictly right padded. """ if time_major: mask = array_ops.transpose(mask) max_seq_length = array_ops.shape(mask)[1] count_of_true = math_ops.reduce_sum(math_ops.cast(mask, dtypes.int32), axis=1) right_padded_mask = array_ops.sequence_mask( count_of_true, maxlen=max_seq_length) return math_ops.reduce_all(math_ops.equal(mask, right_padded_mask)) def calculate_sequence_by_mask(mask, time_major): """Calculate the sequence length tensor (1-D) based on the masking tensor. The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For any timestep that should be masked, the corresponding field will be False. Consider the following example: a = [[True, True, False, False], [True, True, True, False]] It is a (2, 4) tensor, and the corresponding sequence length result should be 1D tensor with value [2, 3]. Note that the masking tensor must be right padded that could be checked by, e.g., `is_sequence_right_padded()`. Args: mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if time_major=True. time_major: Boolean, which indicates whether the mask is time major or batch major. Returns: sequence_length: 1D int32 tensor. """ timestep_index = 0 if time_major else 1 return math_ops.reduce_sum(math_ops.cast(mask, dtypes.int32), axis=timestep_index) def _generate_defun_backend(unique_api_name, preferred_device, func): function_attributes = { _DEFUN_API_NAME_ATTRIBUTE: unique_api_name, _DEFUN_DEVICE_ATTRIBUTE: preferred_device, } return function.defun_with_attributes(func=func, attributes=function_attributes, autograph=False) def _get_context_device_type(): """Parse the current context and return the device type, eg CPU/GPU.""" current_device = context.context().device_name if current_device is None: return None return device.DeviceSpec.from_string(current_device).device_type def _runtime(runtime_name): with ops.device('/cpu:0'): return constant_op.constant( runtime_name, dtype=dtypes.float32, name='runtime')
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/recurrent_v2.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for allowing TF ops to work with Keras Functional API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import saving from tensorflow.python.keras import testing_utils from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.util import nest def _single_op_at_end(): inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(10)(inputs) outputs = gen_nn_ops.relu(x) return keras.Model(inputs, outputs) def _single_identity_op_at_end(): inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(10)(inputs) outputs = array_ops.identity(x) assert 'Identity' in outputs.name return keras.Model(inputs, outputs) def _multiple_ops_at_end(): inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(10)(inputs) x = gen_nn_ops.relu(x) outputs = gen_nn_ops.relu(x) return keras.Model(inputs, outputs) def _single_op_in_middle(): inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(10)(inputs) x = gen_nn_ops.relu(x) outputs = keras.layers.Dense(10)(x) return keras.Model(inputs, outputs) def _multiple_ops_in_middle(): inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(10)(inputs) x = gen_nn_ops.relu(x) x = gen_nn_ops.relu(x) outputs = keras.layers.Dense(10)(x) return keras.Model(inputs, outputs) def _single_standalone_branch(): inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(10)(inputs) outputs = x * 2 return keras.Model(inputs, outputs) def _single_op_with_attrs(): inputs = keras.Input(shape=(10,)) x = math_ops.reduce_mean(inputs, axis=1, keepdims=True) outputs = keras.layers.Dense(10)(x) return keras.Model(inputs, outputs) def _multiple_uses(): inputs = keras.Input(shape=(10,)) x = math_ops.reduce_mean(inputs, axis=1, keepdims=True) x1 = keras.layers.Dense(10)(x) x2 = keras.layers.Dense(10)(x) outputs = x1 + x2 return keras.Model(inputs, outputs) def _op_with_tensor_list(): inputs = keras.Input(shape=(10,)) x = array_ops.concat([inputs, inputs], axis=1) outputs = keras.layers.Dense(10)(x) return keras.Model(inputs, outputs) def _add_n(): inputs = keras.Input(shape=(10,)) outputs = math_ops.add_n([inputs, inputs, inputs]) return keras.Model(inputs, outputs) def _reuse_op(): inputs = keras.Input(shape=(10,)) # This op needs to be checked multiple times. x = gen_nn_ops.relu(inputs) y = keras.layers.Dense(10)(x) x2 = x * 2 y2 = keras.layers.Dense(10)(x2) outputs = y + y2 return keras.Model(inputs, outputs) def _float64_op(): inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(10, dtype='float64')(inputs) x = gen_nn_ops.relu(x) assert x.dtype == 'float64', 'x has dtype: %s' % x.dtype outputs = keras.layers.Dense(10)(x) return keras.Model(inputs, outputs) class MyAdd(keras.layers.Layer): def call(self, x, y): return x + y def _layer_with_tensor_arg(): inputs = keras.Input(shape=(10,)) x = inputs * 2 outputs = MyAdd()(inputs, x) return keras.Model(inputs, outputs) class LayerWithLayer(keras.layers.Layer): def build(self, input_shape): self.bias = self.add_weight(name='bias', dtype='float32') self.layer = keras.layers.Dense(10) def call(self, inputs): inputs = inputs * self.bias # Would throw an error if Keras History was created here. return self.layer(inputs) def _inner_layer(): inputs = keras.Input(shape=(10,)) outputs = LayerWithLayer()(inputs) return keras.Model(inputs, outputs) def _reuse_ancillary_layer(): inputs = (keras.Input(shape=(5,)), keras.Input(shape=(5,))) base_model = keras.Sequential([ keras.layers.Dense(3, input_shape=(5,)), ]) outputs = base_model(inputs[0]) model = keras.Model(inputs, outputs) # The second input is only involved in ancillary layers. outputs_delta = outputs - base_model(0.5 * inputs[1]) l2_loss = math_ops.reduce_mean( math_ops.reduce_sum(math_ops.square(outputs_delta), -1)) model.add_loss(l2_loss) model.add_metric(l2_loss, aggregation='mean', name='l2_loss') l1_loss = 0.01 * math_ops.reduce_mean( math_ops.reduce_sum(math_ops.abs(outputs_delta), -1)) model.add_loss(l1_loss) model.add_metric(l1_loss, aggregation='mean', name='l1_loss') return model @keras_parameterized.run_all_keras_modes class AutoLambdaTest(keras_parameterized.TestCase): @parameterized.named_parameters( ('single_op_at_end', _single_op_at_end), ('single_identity_op_at_end', _single_identity_op_at_end), ('multiple_ops_at_end', _multiple_ops_at_end), ('single_op_in_middle', _single_op_in_middle), ('multiple_ops_in_middle', _multiple_ops_in_middle), ('single_standalone_branch', _single_standalone_branch), ('single_op_with_attrs', _single_op_with_attrs), ('multiple_uses', _multiple_uses), ('op_with_tensor_list', _op_with_tensor_list), ('add_n', _add_n), ('_reuse_op', _reuse_op), ('_float64_op', _float64_op), ('_inner_layer', _inner_layer), ('_reuse_ancillary_layer', _reuse_ancillary_layer), ('_layer_with_tensor_arg', _layer_with_tensor_arg), ) def test_autolambda(self, model_fn): model = model_fn() model.compile( adam.Adam(0.001), 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) np_inputs = nest.map_structure( lambda x: np.ones((10,) + tuple(x.shape[1:]), 'float32'), model.inputs) np_outputs = nest.map_structure( lambda x: np.ones((10,) + tuple(x.shape[1:]), 'float32'), model.outputs) model.fit(np_inputs, np_outputs, batch_size=2) model(np_inputs) # Test calling the model directly on inputs. new_model = keras.Model.from_config( model.get_config(), custom_objects={ 'LayerWithLayer': LayerWithLayer, 'MyAdd': MyAdd }) new_model.compile( adam.Adam(0.001), 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) new_model.fit(np_inputs, np_outputs, batch_size=2) new_model(np_inputs) # Test calling the new model directly on inputs. # Assert that metrics are preserved and in the right order. self.assertAllEqual(model.metrics_names, new_model.metrics_names) # Assert that layer names don't change. self.assertAllEqual([layer.name for layer in model.layers], [layer.name for layer in new_model.layers]) def test_numerical_correctness_simple(self): x = ops.convert_to_tensor([[-1., 0., -2., 1.]]) inputs = keras.Input(shape=(4,)) outputs = gen_nn_ops.relu(inputs) model = keras.Model(inputs, outputs) y = self.evaluate(model(x)) self.assertAllClose(y, [[0., 0., 0., 1.]]) def test_numerical_correctness_with_attrs(self): x = ops.convert_to_tensor([[1.5, 1.5], [2.5, 3.5]]) inputs = keras.Input(shape=(10,)) outputs = math_ops.reduce_mean(inputs, axis=1) model = keras.Model(inputs, outputs) y = self.evaluate(model(x)) self.assertAllClose(y, [1.5, 3.]) def test_numerical_correctness_serialization(self): x = ops.convert_to_tensor([-1., 0., -2., 1.]) inputs = keras.Input(shape=(4,)) outputs = gen_nn_ops.relu(inputs) model1 = keras.Model(inputs, outputs) y1 = self.evaluate(model1(x)) model2 = keras.Model.from_config(model1.get_config()) y2 = self.evaluate(model2(x)) self.assertAllClose(y1, y2) def test_gradient_tape_in_function(self): z = keras.Input((1,)) x = math_ops.matmul(z, constant_op.constant(2.0, shape=(1, 1))) x = math_ops.reduce_mean(x, axis=0, keepdims=True) h = gen_nn_ops.relu(x) m = keras.Model(z, h) @def_function.function() def f(x): with backprop.GradientTape() as t: t.watch(x) z = m(x ** 2) grads = t.gradient(z, x) return grads self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))), constant_op.constant(40.0, shape=(1, 1))) f = def_function.function(f) self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))), constant_op.constant(40.0, shape=(1, 1))) def test_no_tracking(self): x = keras.backend.placeholder((10, 10)) keras.layers.Dense(1)(x) self.assertTrue(x._keras_history_checked) def test_timing_scales_linearly(self): def _construct_graph_of_size(size): start = time.time() x = keras.backend.placeholder(shape=(10, 4)) for _ in range(size): x = keras.layers.Dense(4)(x) x = gen_nn_ops.relu(x) end = time.time() return end - start size_50 = _construct_graph_of_size(50) size_500 = _construct_graph_of_size(500) # Check construction time grows approx. linearly with size. e = 3 # Fudge factor to prevent flakiness. self.assertLess(size_500, (10 * e) * size_50) def test_no_mask_tracking(self): x = keras.backend.placeholder((10, 10)) y = keras.layers.Masking(0.)(x) self.assertTrue(y._keras_mask._keras_history_checked) def test_built(self): inputs = keras.Input(shape=(10,)) outputs = gen_nn_ops.relu(inputs) model = keras.Model(inputs, outputs) model.compile('sgd', 'mse') for layer in model.layers: self.assertTrue(layer.built) # Test something that requires Layers to be built. model.summary() def test_json_serialization(self): inputs = keras.Input(shape=(4,), dtype='uint8') outputs = math_ops.cast(inputs, 'float32') / 4. model = saving.model_from_json(keras.Model(inputs, outputs).to_json()) self.assertAllEqual( self.evaluate(model(np.array([0, 64, 128, 192], np.uint8))), [0., 16., 32., 48.]) model.summary() class InputInEagerTest(test.TestCase): """Tests ops on graph tensors in Eager runtime. Input returns graph/symbolic tensors in the Eager runtime (this happens, for example, with tensors returned from Keras layers). These should be routed to the graph-style branch of these ops (b/134715641) """ def test_identity(self): with context.eager_mode(): x = keras.Input(shape=(1,)) self.assertTrue(hasattr(x, 'graph')) ident = array_ops.identity(x) # This is now a graph tensor, and should be able to continue in graphland self.assertIn('Identity', ident.name) def test_size(self): with context.eager_mode(): x = keras.Input(shape=(3,)) self.assertTrue(hasattr(x, 'graph')) self.assertAllEqual(x.get_shape().as_list(), [None, 3]) sz = array_ops.size(x) # This is now a graph tensor, and should be able to continue in graphland self.assertIn('Size', sz.name) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/tensorflow_op_layer_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layers that operate regularization via the addition of noise. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.GaussianNoise') class GaussianNoise(Layer): """Apply additive zero-centered Gaussian noise. This is useful to mitigate overfitting (you could see it as a form of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs. As it is a regularization layer, it is only active at training time. Arguments: stddev: Float, standard deviation of the noise distribution. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding noise) or in inference mode (doing nothing). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, stddev, **kwargs): super(GaussianNoise, self).__init__(**kwargs) self.supports_masking = True self.stddev = stddev def call(self, inputs, training=None): def noised(): return inputs + K.random_normal( shape=array_ops.shape(inputs), mean=0., stddev=self.stddev) return K.in_train_phase(noised, inputs, training=training) def get_config(self): config = {'stddev': self.stddev} base_config = super(GaussianNoise, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @keras_export('keras.layers.GaussianDropout') class GaussianDropout(Layer): """Apply multiplicative 1-centered Gaussian noise. As it is a regularization layer, it is only active at training time. Arguments: rate: Float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, rate, **kwargs): super(GaussianDropout, self).__init__(**kwargs) self.supports_masking = True self.rate = rate def call(self, inputs, training=None): if 0 < self.rate < 1: def noised(): stddev = np.sqrt(self.rate / (1.0 - self.rate)) return inputs * K.random_normal( shape=array_ops.shape(inputs), mean=1.0, stddev=stddev) return K.in_train_phase(noised, inputs, training=training) return inputs def get_config(self): config = {'rate': self.rate} base_config = super(GaussianDropout, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @keras_export('keras.layers.AlphaDropout') class AlphaDropout(Layer): """Applies Alpha Dropout to the input. Alpha Dropout is a `Dropout` that keeps mean and variance of inputs to their original values, in order to ensure the self-normalizing property even after this dropout. Alpha Dropout fits well to Scaled Exponential Linear Units by randomly setting activations to the negative saturation value. Arguments: rate: float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. seed: A Python integer to use as random seed. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, rate, noise_shape=None, seed=None, **kwargs): super(AlphaDropout, self).__init__(**kwargs) self.rate = rate self.noise_shape = noise_shape self.seed = seed self.supports_masking = True def _get_noise_shape(self, inputs): return self.noise_shape if self.noise_shape else array_ops.shape(inputs) def call(self, inputs, training=None): if 0. < self.rate < 1.: noise_shape = self._get_noise_shape(inputs) def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring alpha = 1.6732632423543772848170429916717 scale = 1.0507009873554804934193349852946 alpha_p = -alpha * scale kept_idx = math_ops.greater_equal( K.random_uniform(noise_shape, seed=seed), rate) kept_idx = math_ops.cast(kept_idx, K.floatx()) # Get affine transformation params a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5 b = -a * alpha_p * rate # Apply mask x = inputs * kept_idx + alpha_p * (1 - kept_idx) # Do affine transformation return a * x + b return K.in_train_phase(dropped_inputs, inputs, training=training) return inputs def get_config(self): config = {'rate': self.rate} base_config = super(AlphaDropout, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/noise.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras layers that implement explicit (approximate) kernel feature maps.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import six from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import input_spec from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import nn _SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian'] class RandomFourierFeatures(base_layer.Layer): r"""Layer that maps its inputs using random Fourier features. This layer implements a feature map \\(\phi: \mathbb{R}^d \rightarrow \mathbb{R}^D\\) which approximates shift-invariant kernels. A kernel function K(x, y) defined over \\(\mathbb{R}^d x \mathbb{R}^d\\) is shift-invariant if K(x, y) = k(x-y) for some function defined over \\(\mathbb{R}^d\\). Many popular Radial Basis Functions (in short RBF), including gaussian and laplacian kernels are shift-invariant. The layer approximates a (shift invariant) kernel K in the following sense: up to a scaling factor, for all inputs \\(x, y \in \mathbb{R}^d\\) \\(\phi(x)^T \cdot \phi(y) \approx K(x, y)\\) The implementation of this layer is based on the following paper: "Random Features for Large-Scale Kernel Machines" by Ali Rahimi and Ben Recht. (link: https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf) The distribution from which the parameters of the random features map (layer) are sampled, determines which shift-invariant kernel the layer approximates (see paper for more details). The users can use the distribution of their choice. Due to their popularity, the layer supports the out-of-the-box approximation of the following RBF kernels: - Gaussian: \\(K(x, y) = e^{-\frac{\|x-y\|_2^2}{2 \cdot scale^2}}\\) - Laplacian: \\(K(x, y) = e^{-\frac{\|x-y\|_1}{scale}}\\) NOTE: Unlike the map described in the paper and the scikit-learn implementation, the output of this layer does not apply the sqrt(2/D) normalization factor. Usage for ML: Typically, this layer is used to "kernelize" linear models by applying a non-linear transformation (this layer) to the input features and then training a linear model on top of the transformed features. Depending on the loss function of the linear model, the composition of this layer and the linear model results to models that are equivalent (up to approximation) to kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss), kernel linear regression (for squared loss) etc. Example of building a kernel multinomial logistic regression model with Gaussian kernel in keras: ```python random_features_layer = RandomFourierFeatures( output_dim=500, kernel_initializer='gaussian', scale=5.0, ...) model = tf.keras.models.Sequential() model.add(random_features_layer) model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax') model.compile( loss=tf.keras.losses.categorical_crossentropy, optimizer=..., metrics=...) ``` To use another kernel, replace the layer creation command with: ```python random_features_layer = RandomFourierFeatures( output_dim=500, kernel_initializer=<my_initializer>, scale=..., ...) ``` Arguments: output_dim: Positive integer, the dimension of the layer's output, i.e., the number of random features used to approximate the kernel. kernel_initializer: Determines the distribution of the parameters of the random features map (and therefore the kernel approximated by the layer). It can be either a string or an instance of TensorFlow's Initializer class. Currently only 'gaussian' and 'laplacian' are supported as string initializers (case insensitive). Note that these parameters are not trainable. scale: For gaussian and laplacian kernels, this corresponds to a scaling factor of the corresponding kernel approximated by the layer (see concrete definitions above). When provided, it should be a positive float. If None, the implementation chooses a default value (1.0 typically). Both the approximation error of the kernel and the classification quality are sensitive to this parameter. If trainable is set to True, this parameter is learned end-to-end during training and the provided value serves as an initialization value. NOTE: When this layer is used to map the initial features and then the transformed features are fed to a linear model, by making `scale` trainable, the resulting optimization problem is no longer convex (even if the loss function used by the linear model is convex). trainable: Whether the scaling parameter of th layer is trainable. Defaults to False. name: name for the RandomFourierFeatures layer. Raises: ValueError: if output_dim or stddev are not positive or if the provided kernel_initializer is not supported. """ def __init__(self, output_dim, kernel_initializer='gaussian', scale=None, trainable=False, name=None, **kwargs): if output_dim <= 0: raise ValueError( '`output_dim` should be a positive integer. Given: {}.'.format( output_dim)) if isinstance(kernel_initializer, six.string_types): if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES: raise ValueError( 'Unsupported kernel type: \'{}\'. Supported kernel types: {}.' .format(kernel_initializer, _SUPPORTED_RBF_KERNEL_TYPES)) if scale is not None and scale <= 0.0: raise ValueError('When provided, `scale` should be a positive float. ' 'Given: {}.'.format(scale)) super(RandomFourierFeatures, self).__init__( trainable=trainable, name=name, **kwargs) self.output_dim = output_dim self.kernel_initializer = kernel_initializer self.scale = scale def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) # TODO(sibyl-vie3Poto): Allow higher dimension inputs. Currently the input is expected # to have shape [batch_size, dimension]. if input_shape.rank != 2: raise ValueError( 'The rank of the input tensor should be 2. Got {} instead.'.format( input_shape.ndims)) if input_shape.dims[1].value is None: raise ValueError( 'The last dimension of the inputs to `RandomFourierFeatures` ' 'should be defined. Found `None`.') self.input_spec = input_spec.InputSpec( ndim=2, axes={1: input_shape.dims[1].value}) input_dim = input_shape.dims[1].value kernel_initializer = _get_random_features_initializer( self.kernel_initializer, shape=(input_dim, self.output_dim)) unscaled_kernel = self.add_weight( name='unscaled_random_features', shape=(input_dim, self.output_dim), dtype=dtypes.float32, initializer=kernel_initializer, trainable=False) self.bias = self.add_weight( name='random_features_bias', shape=(self.output_dim,), dtype=dtypes.float32, initializer=init_ops.random_uniform_initializer( minval=0.0, maxval=2 * np.pi, dtype=dtypes.float32), trainable=False) if self.scale is None: self.scale = _get_default_scale(self.kernel_initializer, input_dim) scale = self.add_weight( name='random_features_scale', shape=(1,), dtype=dtypes.float32, initializer=init_ops.constant_initializer(self.scale), trainable=True, constraint='NonNeg') self.kernel = (1.0 / scale) * unscaled_kernel super(RandomFourierFeatures, self).build(input_shape) def call(self, inputs): inputs = ops.convert_to_tensor(inputs, dtype=self.dtype) inputs = gen_math_ops.cast(inputs, dtypes.float32) outputs = gen_math_ops.mat_mul(inputs, self.kernel) outputs = nn.bias_add(outputs, self.bias) return gen_math_ops.cos(outputs) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) input_shape = input_shape.with_rank(2) if input_shape.dims[-1].value is None: raise ValueError( 'The innermost dimension of input shape must be defined. Given: %s' % input_shape) return input_shape[:-1].concatenate(self.output_dim) def get_config(self): kernel_initializer = self.kernel_initializer if isinstance(self.kernel_initializer, init_ops.Initializer): kernel_initializer = initializers.serialize(self.kernel_initializer) config = { 'output_dim': self.output_dim, 'kernel_initializer': kernel_initializer, 'scale': self.scale, } base_config = super(RandomFourierFeatures, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _get_random_features_initializer(initializer, shape): """Returns Initializer object for random features.""" def _get_cauchy_samples(loc, scale, shape): probs = np.random.uniform(low=0., high=1., size=shape) return loc + scale * np.tan(np.pi * (probs - 0.5)) random_features_initializer = initializer if isinstance(initializer, six.string_types): if initializer.lower() == 'gaussian': random_features_initializer = init_ops.random_normal_initializer( stddev=1.0) elif initializer.lower() == 'laplacian': random_features_initializer = init_ops.constant_initializer( _get_cauchy_samples(loc=0.0, scale=1.0, shape=shape)) else: raise ValueError( 'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'.format( random_features_initializer, _SUPPORTED_RBF_KERNEL_TYPES)) return random_features_initializer def _get_default_scale(initializer, input_dim): if (isinstance(initializer, six.string_types) and initializer.lower() == 'gaussian'): return np.sqrt(input_dim / 2.0) return 1.0
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/kernelized.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SimpleRNN layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent @keras_parameterized.run_all_keras_modes class SimpleRNNLayerTest(keras_parameterized.TestCase): def test_return_sequences_SimpleRNN(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.SimpleRNN, kwargs={'units': units, 'return_sequences': True}, input_shape=(num_samples, timesteps, embedding_dim)) def test_float64_SimpleRNN(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.SimpleRNN, kwargs={'units': units, 'return_sequences': True, 'dtype': 'float64'}, input_shape=(num_samples, timesteps, embedding_dim), input_dtype='float64') def test_dynamic_behavior_SimpleRNN(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim)) model = keras.models.Sequential() model.add(layer) model.compile('rmsprop', 'mse') x = np.random.random((num_samples, timesteps, embedding_dim)) y = np.random.random((num_samples, units)) model.train_on_batch(x, y) def test_dropout_SimpleRNN(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.SimpleRNN, kwargs={'units': units, 'dropout': 0.1, 'recurrent_dropout': 0.1}, input_shape=(num_samples, timesteps, embedding_dim)) def test_implementation_mode_SimpleRNN(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 for mode in [0, 1, 2]: testing_utils.layer_test( keras.layers.SimpleRNN, kwargs={'units': units, 'implementation': mode}, input_shape=(num_samples, timesteps, embedding_dim)) def test_constraints_SimpleRNN(self): embedding_dim = 4 layer_class = keras.layers.SimpleRNN k_constraint = keras.constraints.max_norm(0.01) r_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_constraint=k_constraint, recurrent_constraint=r_constraint, bias_constraint=b_constraint) layer.build((None, None, embedding_dim)) self.assertEqual(layer.cell.kernel.constraint, k_constraint) self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint) self.assertEqual(layer.cell.bias.constraint, b_constraint) def test_with_masking_layer_SimpleRNN(self): layer_class = keras.layers.SimpleRNN inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(layer_class(units=5, return_sequences=True, unroll=False)) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_from_config_SimpleRNN(self): layer_class = keras.layers.SimpleRNN for stateful in (False, True): l1 = layer_class(units=1, stateful=stateful) l2 = layer_class.from_config(l1.get_config()) assert l1.get_config() == l2.get_config() def test_regularizers_SimpleRNN(self): embedding_dim = 4 layer_class = keras.layers.SimpleRNN layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_regularizer=keras.regularizers.l1(0.01), recurrent_regularizer=keras.regularizers.l1(0.01), bias_regularizer='l2', activity_regularizer='l1') layer.build((None, None, 2)) self.assertEqual(len(layer.losses), 3) x = keras.backend.variable(np.ones((2, 3, 2))) layer(x) if context.executing_eagerly(): self.assertEqual(len(layer.losses), 4) else: self.assertEqual(len(layer.get_losses_for(x)), 1) def test_statefulness_SimpleRNN(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer_class = keras.layers.SimpleRNN model = keras.models.Sequential() model.add( keras.layers.Embedding( 4, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class( units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile( optimizer=gradient_descent.GradientDescentOptimizer(0.01), loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) out1 = model.predict(np.ones((num_samples, timesteps))) self.assertEqual(out1.shape, (num_samples, units)) # train once so that the states change model.train_on_batch( np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) # if the state is not reset, output should be different self.assertNotEqual(out1.max(), out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out2.max(), out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) np.testing.assert_allclose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out4.max(), out5.max()) # Check masking layer.reset_states() left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) layer.reset_states() right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) np.testing.assert_allclose(out7, out6, atol=1e-5) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/simplernn_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for RNN cell wrapper v2 implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras import layers from tensorflow.python.keras.layers import rnn_cell_wrapper_v2 from tensorflow.python.layers import base as base_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test class RNNCellWrapperTest(test.TestCase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testResidualWrapper(self): wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper x = ops.convert_to_tensor(np.array([[1., 1., 1.]]), dtype="float32") m = ops.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]), dtype="float32") base_cell = rnn_cell_impl.GRUCell( 3, kernel_initializer=init_ops.constant_initializer(0.5), bias_initializer=init_ops.constant_initializer(0.5)) g, m_new = base_cell(x, m) wrapper_object = wrapper_type(base_cell) (name, dep), = wrapper_object._checkpoint_dependencies wrapper_object.get_config() # Should not throw an error self.assertIs(dep, base_cell) self.assertEqual("cell", name) g_res, m_new_res = wrapper_object(x, m) self.evaluate([variables_lib.global_variables_initializer()]) res = self.evaluate([g, g_res, m_new, m_new_res]) # Residual connections self.assertAllClose(res[1], res[0] + [1., 1., 1.]) # States are left untouched self.assertAllClose(res[2], res[3]) @test_util.run_in_graph_and_eager_modes def testResidualWrapperWithSlice(self): wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper x = ops.convert_to_tensor(np.array([[1., 1., 1., 1., 1.]]), dtype="float32") m = ops.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]), dtype="float32") base_cell = rnn_cell_impl.GRUCell( 3, kernel_initializer=init_ops.constant_initializer(0.5), bias_initializer=init_ops.constant_initializer(0.5)) g, m_new = base_cell(x, m) def residual_with_slice_fn(inp, out): inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3]) return inp_sliced + out g_res, m_new_res = wrapper_type( base_cell, residual_with_slice_fn)(x, m) self.evaluate([variables_lib.global_variables_initializer()]) res_g, res_g_res, res_m_new, res_m_new_res = self.evaluate( [g, g_res, m_new, m_new_res]) # Residual connections self.assertAllClose(res_g_res, res_g + [1., 1., 1.]) # States are left untouched self.assertAllClose(res_m_new, res_m_new_res) def testDeviceWrapper(self): wrapper_type = rnn_cell_wrapper_v2.DeviceWrapper x = array_ops.zeros([1, 3]) m = array_ops.zeros([1, 3]) cell = rnn_cell_impl.GRUCell(3) wrapped_cell = wrapper_type(cell, "/cpu:0") (name, dep), = wrapped_cell._checkpoint_dependencies wrapped_cell.get_config() # Should not throw an error self.assertIs(dep, cell) self.assertEqual("cell", name) outputs, _ = wrapped_cell(x, m) self.assertIn("cpu:0", outputs.device.lower()) @parameterized.parameters( [[rnn_cell_impl.DropoutWrapper, rnn_cell_wrapper_v2.DropoutWrapper], [rnn_cell_impl.ResidualWrapper, rnn_cell_wrapper_v2.ResidualWrapper]]) @test_util.run_in_graph_and_eager_modes def testWrapperKerasStyle(self, wrapper, wrapper_v2): """Tests if wrapper cell is instantiated in keras style scope.""" wrapped_cell_v2 = wrapper_v2(rnn_cell_impl.BasicRNNCell(1)) self.assertIsNone(getattr(wrapped_cell_v2, "_keras_style", None)) wrapped_cell = wrapper(rnn_cell_impl.BasicRNNCell(1)) self.assertFalse(wrapped_cell._keras_style) @parameterized.parameters( [rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper]) @test_util.run_in_graph_and_eager_modes def testWrapperV2VariableNames(self, wrapper): """Tests that variables names do not depend on wrapper in RNN layer.""" def _rnn_input(apply_wrapper, name): """Creates a RNN layer with/without wrapper and returns built rnn cell.""" with base_layer.keras_style_scope(): base_cell = rnn_cell_impl.MultiRNNCell( [rnn_cell_impl.BasicRNNCell(1, name="basic_rnn_cell") for _ in range(2)]) if apply_wrapper: rnn_cell = wrapper(base_cell) else: rnn_cell = base_cell rnn_layer = layers.RNN(rnn_cell, name=name) inputs = ops.convert_to_tensor([[[1]]], dtype=dtypes.float32) _ = rnn_layer(inputs) return base_cell._cells[0] rnn_1 = _rnn_input(True, name="rnn_0") rnn_2 = _rnn_input(False, name="rnn_1") for i, cell in enumerate([rnn_1, rnn_2]): var_prefix = "rnn_{}/cell_0/basic_rnn_cell/".format(i) self.assertCountEqual([v.name for v in cell.weights], (var_prefix + "kernel:0", var_prefix + "bias:0")) @parameterized.parameters( [rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper]) @test_util.run_in_graph_and_eager_modes def testWrapperWeights(self, wrapper): """Tests that wrapper weights contain wrapped cells weights.""" base_cell = layers.SimpleRNNCell(1, name="basic_rnn_cell") rnn_cell = wrapper(base_cell) rnn_layer = layers.RNN(rnn_cell) inputs = ops.convert_to_tensor([[[1]]], dtype=dtypes.float32) rnn_layer(inputs) expected_weights = ["rnn/" + var for var in ("kernel:0", "recurrent_kernel:0", "bias:0")] self.assertLen(rnn_cell.weights, 3) self.assertCountEqual([v.name for v in rnn_cell.weights], expected_weights) self.assertCountEqual([v.name for v in rnn_cell.trainable_variables], expected_weights) self.assertCountEqual([v.name for v in rnn_cell.non_trainable_variables], []) self.assertCountEqual([v.name for v in rnn_cell.cell.weights], expected_weights) @parameterized.parameters( [rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper]) @test_util.run_in_graph_and_eager_modes def testWrapperV2Caller(self, wrapper): """Tests that wrapper V2 is using the LayerRNNCell's caller.""" with base_layer.keras_style_scope(): base_cell = rnn_cell_impl.MultiRNNCell( [rnn_cell_impl.BasicRNNCell(1) for _ in range(2)]) rnn_cell = wrapper(base_cell) inputs = ops.convert_to_tensor([[1]], dtype=dtypes.float32) state = ops.convert_to_tensor([[1]], dtype=dtypes.float32) _ = rnn_cell(inputs, [state, state]) weights = base_cell._cells[0].weights self.assertLen(weights, expected_len=2) self.assertTrue(all(["_wrapper" in v.name for v in weights])) @parameterized.parameters( [rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper]) @test_util.run_in_graph_and_eager_modes def testWrapperV2Build(self, wrapper): cell = rnn_cell_impl.LSTMCell(10) wrapper = wrapper(cell) wrapper.build((1,)) self.assertTrue(cell.built) def testDeviceWrapperSerialization(self): wrapper_cls = rnn_cell_wrapper_v2.DeviceWrapper cell = layers.LSTMCell(10) wrapper = wrapper_cls(cell, "/cpu:0") config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertDictEqual(config, reconstructed_wrapper.get_config()) self.assertIsInstance(reconstructed_wrapper, wrapper_cls) def testResidualWrapperSerialization(self): wrapper_cls = rnn_cell_wrapper_v2.ResidualWrapper cell = layers.LSTMCell(10) wrapper = wrapper_cls(cell) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertDictEqual(config, reconstructed_wrapper.get_config()) self.assertIsInstance(reconstructed_wrapper, wrapper_cls) wrapper = wrapper_cls(cell, residual_fn=lambda i, o: i + i + o) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) # Assert the reconstructed function will perform the math correctly. self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 4) def residual_fn(inputs, outputs): return inputs * 3 + outputs wrapper = wrapper_cls(cell, residual_fn=residual_fn) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) # Assert the reconstructed function will perform the math correctly. self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 5) def testDropoutWrapperSerialization(self): wrapper_cls = rnn_cell_wrapper_v2.DropoutWrapper cell = layers.LSTMCell(10) wrapper = wrapper_cls(cell) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertDictEqual(config, reconstructed_wrapper.get_config()) self.assertIsInstance(reconstructed_wrapper, wrapper_cls) wrapper = wrapper_cls(cell, dropout_state_filter_visitor=lambda s: True) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertTrue(reconstructed_wrapper._dropout_state_filter(None)) def dropout_state_filter_visitor(unused_state): return False wrapper = wrapper_cls( cell, dropout_state_filter_visitor=dropout_state_filter_visitor) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertFalse(reconstructed_wrapper._dropout_state_filter(None)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for recurrent v2 layers functionality other than GRU, LSTM. See also: lstm_v2_test.py, gru_v2_test.py. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2 from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class RNNV2Test(keras_parameterized.TestCase): @parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU]) def test_device_placement(self, layer): if not test.is_gpu_available(): self.skipTest('Need GPU for testing.') vocab_size = 20 embedding_dim = 10 batch_size = 8 timestep = 12 units = 5 x = np.random.randint(0, vocab_size, size=(batch_size, timestep)) y = np.random.randint(0, vocab_size, size=(batch_size, timestep)) # Test when GPU is available but not used, the graph should be properly # created with CPU ops. with test_util.device(use_gpu=False): model = keras.Sequential([ keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, timestep]), layer(units, return_sequences=True, stateful=True), keras.layers.Dense(vocab_size) ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, epochs=1, shuffle=False) @parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU]) def test_reset_dropout_mask_between_batch(self, layer): # See https://github.com/tensorflow/tensorflow/issues/29187 for more details batch_size = 8 timestep = 12 embedding_dim = 10 units = 5 layer = layer(units, dropout=0.5, recurrent_dropout=0.5) inputs = np.random.random((batch_size, timestep, embedding_dim)).astype( np.float32) previous_dropout, previous_recurrent_dropout = None, None for _ in range(5): layer(inputs, training=True) dropout = layer.cell.get_dropout_mask_for_cell(inputs, training=True) recurrent_dropout = layer.cell.get_recurrent_dropout_mask_for_cell( inputs, training=True) if previous_dropout is not None: self.assertNotAllClose(self.evaluate(previous_dropout), self.evaluate(dropout)) previous_dropout = dropout if previous_recurrent_dropout is not None: self.assertNotAllClose(self.evaluate(previous_recurrent_dropout), self.evaluate(recurrent_dropout)) previous_recurrent_dropout = recurrent_dropout @parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU]) def test_recurrent_dropout_with_stateful_RNN(self, layer): # See https://github.com/tensorflow/tensorflow/issues/27829 for details. # The issue was caused by using inplace mul for a variable, which was a # warning for RefVariable, but an error for ResourceVariable in 2.0 keras.models.Sequential([ layer(128, stateful=True, return_sequences=True, dropout=0.2, batch_input_shape=[32, None, 5], recurrent_dropout=0.2) ]) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/recurrent_v2_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layers that act as activation functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import math_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.LeakyReLU') class LeakyReLU(Layer): """Leaky version of a Rectified Linear Unit. It allows a small gradient when the unit is not active: `f(x) = alpha * x for x < 0`, `f(x) = x for x >= 0`. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha: Float >= 0. Negative slope coefficient. """ def __init__(self, alpha=0.3, **kwargs): super(LeakyReLU, self).__init__(**kwargs) self.supports_masking = True self.alpha = K.cast_to_floatx(alpha) def call(self, inputs): return K.relu(inputs, alpha=self.alpha) def get_config(self): config = {'alpha': float(self.alpha)} base_config = super(LeakyReLU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @keras_export('keras.layers.PReLU') class PReLU(Layer): """Parametric Rectified Linear Unit. It follows: `f(x) = alpha * x for x < 0`, `f(x) = x for x >= 0`, where `alpha` is a learned array with the same shape as x. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha_initializer: Initializer function for the weights. alpha_regularizer: Regularizer for the weights. alpha_constraint: Constraint for the weights. shared_axes: The axes along which to share learnable parameters for the activation function. For example, if the incoming feature maps are from a 2D convolution with output shape `(batch, height, width, channels)`, and you wish to share parameters across space so that each filter only has one set of parameters, set `shared_axes=[1, 2]`. """ def __init__(self, alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None, **kwargs): super(PReLU, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes) @tf_utils.shape_type_conversion def build(self, input_shape): param_shape = list(input_shape[1:]) if self.shared_axes is not None: for i in self.shared_axes: param_shape[i - 1] = 1 self.alpha = self.add_weight( shape=param_shape, name='alpha', initializer=self.alpha_initializer, regularizer=self.alpha_regularizer, constraint=self.alpha_constraint) # Set input spec axes = {} if self.shared_axes: for i in range(1, len(input_shape)): if i not in self.shared_axes: axes[i] = input_shape[i] self.input_spec = InputSpec(ndim=len(input_shape), axes=axes) self.built = True def call(self, inputs): pos = K.relu(inputs) neg = -self.alpha * K.relu(-inputs) return pos + neg def get_config(self): config = { 'alpha_initializer': initializers.serialize(self.alpha_initializer), 'alpha_regularizer': regularizers.serialize(self.alpha_regularizer), 'alpha_constraint': constraints.serialize(self.alpha_constraint), 'shared_axes': self.shared_axes } base_config = super(PReLU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @keras_export('keras.layers.ELU') class ELU(Layer): """Exponential Linear Unit. It follows: `f(x) = alpha * (exp(x) - 1.) for x < 0`, `f(x) = x for x >= 0`. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha: Scale for the negative factor. """ def __init__(self, alpha=1.0, **kwargs): super(ELU, self).__init__(**kwargs) self.supports_masking = True self.alpha = K.cast_to_floatx(alpha) def call(self, inputs): return K.elu(inputs, self.alpha) def get_config(self): config = {'alpha': float(self.alpha)} base_config = super(ELU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @keras_export('keras.layers.ThresholdedReLU') class ThresholdedReLU(Layer): """Thresholded Rectified Linear Unit. It follows: `f(x) = x for x > theta`, `f(x) = 0 otherwise`. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: theta: Float >= 0. Threshold location of activation. """ def __init__(self, theta=1.0, **kwargs): super(ThresholdedReLU, self).__init__(**kwargs) self.supports_masking = True self.theta = K.cast_to_floatx(theta) def call(self, inputs): return inputs * math_ops.cast( math_ops.greater(inputs, self.theta), K.floatx()) def get_config(self): config = {'theta': float(self.theta)} base_config = super(ThresholdedReLU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @keras_export('keras.layers.Softmax') class Softmax(Layer): """Softmax activation function. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: axis: Integer, axis along which the softmax normalization is applied. """ def __init__(self, axis=-1, **kwargs): super(Softmax, self).__init__(**kwargs) self.supports_masking = True self.axis = axis def call(self, inputs): return K.softmax(inputs, axis=self.axis) def get_config(self): config = {'axis': self.axis} base_config = super(Softmax, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @keras_export('keras.layers.ReLU') class ReLU(Layer): """Rectified Linear Unit activation function. With default values, it returns element-wise `max(x, 0)`. Otherwise, it follows: `f(x) = max_value` for `x >= max_value`, `f(x) = x` for `threshold <= x < max_value`, `f(x) = negative_slope * (x - threshold)` otherwise. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: max_value: Float >= 0. Maximum activation value. negative_slope: Float >= 0. Negative slope coefficient. threshold: Float. Threshold value for thresholded activation. """ def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs): super(ReLU, self).__init__(**kwargs) if max_value is not None and max_value < 0.: raise ValueError('max_value of Relu layer ' 'cannot be negative value: ' + str(max_value)) if negative_slope < 0.: raise ValueError('negative_slope of Relu layer ' 'cannot be negative value: ' + str(negative_slope)) self.support_masking = True if max_value is not None: max_value = K.cast_to_floatx(max_value) self.max_value = max_value self.negative_slope = K.cast_to_floatx(negative_slope) self.threshold = K.cast_to_floatx(threshold) def call(self, inputs): # alpha is used for leaky relu slope in activations instead of # negative_slope. return K.relu(inputs, alpha=self.negative_slope, max_value=self.max_value, threshold=self.threshold) def get_config(self): config = { 'max_value': self.max_value, 'negative_slope': self.negative_slope, 'threshold': self.threshold } base_config = super(ReLU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): return input_shape
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/advanced_activations.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for layer wrappers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.platform import test # TODO(b/125513261): Move this back into wrappers_test.py. class TimeDistributedLearningPhaseTest(test.TestCase): def test_TimeDistributed_learning_phase(self): with self.cached_session(): # test layers that need learning_phase to be set np.random.seed(1234) x = keras.layers.Input(shape=(3, 2)) y = keras.layers.TimeDistributed(keras.layers.Dropout(.999))( x, training=True) model = keras.models.Model(x, y) y = model.predict(np.random.random((10, 3, 2))) self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/time_distributed_learning_phase_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras subclassed layers utilizing desired user syntax.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import variables from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types class SubclassedLayersTest(keras_parameterized.TestCase): def test_simple_build_with_constant(self): class BuildConstantLayer(keras.layers.Layer): def build(self, input_shape): self.b = ops.convert_to_tensor(2.0) def call(self, inputs): return self.b * inputs layer = BuildConstantLayer() model = testing_utils.get_model_from_layers( [layer, keras.layers.Dense(1)], input_shape=(1,)) x = ops.convert_to_tensor([[3.0]]) self.assertEqual( tf_utils.is_symbolic_tensor(model(x)), not context.executing_eagerly()) self.assertEqual( tf_utils.is_symbolic_tensor(layer(x)), not context.executing_eagerly()) self.assertAllClose(keras.backend.get_value(layer(x)), [[6.0]]) def test_build_with_derived_constant(self): class BuildDerivedConstantLayer(keras.layers.Layer): def build(self, input_shape): a = ops.convert_to_tensor(1.0) b = 2.0 * a self.variable = variables.Variable(b) self.constant = ops.convert_to_tensor(self.variable) def call(self, inputs): return self.variable * self.constant * inputs layer = BuildDerivedConstantLayer() model = testing_utils.get_model_from_layers( [layer, keras.layers.Dense(1)], input_shape=(1,)) x = ops.convert_to_tensor([[3.0]]) self.assertEqual( tf_utils.is_symbolic_tensor(model(x)), not context.executing_eagerly()) self.assertEqual( tf_utils.is_symbolic_tensor(layer(x)), not context.executing_eagerly()) self.assertAllClose(keras.backend.get_value(layer(x)), [[12.0]]) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/subclassed_layers_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for cudnn recurrent layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop from tensorflow.python.ops import array_ops from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent @keras_parameterized.run_all_keras_modes class CuDNNTest(keras_parameterized.TestCase): @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( layer_class=[keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM], return_sequences=[True, False])) @test_util.run_gpu_only def test_cudnn_rnn_return_sequence(self, layer_class, return_sequences): input_size = 10 timesteps = 6 units = 2 num_samples = 32 testing_utils.layer_test( layer_class, kwargs={'units': units, 'return_sequences': return_sequences}, input_shape=(num_samples, timesteps, input_size)) @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( layer_class=[keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM], go_backwards=[True, False])) @test_util.run_gpu_only def test_cudnn_rnn_go_backward(self, layer_class, go_backwards): input_size = 10 timesteps = 6 units = 2 num_samples = 32 testing_utils.layer_test( layer_class, kwargs={'units': units, 'go_backwards': go_backwards}, input_shape=(num_samples, timesteps, input_size)) @parameterized.named_parameters( ('cudnngru', keras.layers.CuDNNGRU), ('cudnnlstm', keras.layers.CuDNNLSTM), ) @test_util.run_gpu_only def test_return_state(self, layer_class): input_size = 10 timesteps = 6 units = 2 num_samples = 32 num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size)) layer = layer_class(units, return_state=True, stateful=True) outputs = layer(inputs) _, state = outputs[0], outputs[1:] self.assertEqual(len(state), num_states) model = keras.models.Model(inputs, state[0]) model.run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() inputs = np.random.random((num_samples, timesteps, input_size)) state = model.predict(inputs) np.testing.assert_allclose( keras.backend.eval(layer.states[0]), state, atol=1e-4) @parameterized.named_parameters( ('cudnngru', keras.layers.CuDNNGRU), ('cudnnlstm', keras.layers.CuDNNLSTM), ) @test_util.run_gpu_only def test_time_major_input(self, layer_class): input_size = 10 timesteps = 6 units = 2 num_samples = 32 model = keras.models.Sequential() model.add( keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))) layer = layer_class(units, time_major=True, return_sequences=True) model.add(layer) model.add( keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(learning_rate=0.001)) model.fit( np.ones((num_samples, timesteps, input_size)), np.ones((num_samples, timesteps, units))) out = model.predict(np.ones((num_samples, timesteps, input_size))) self.assertEqual(out.shape, (num_samples, timesteps, units)) @parameterized.named_parameters( ('cudnngru', keras.layers.CuDNNGRU), ('cudnnlstm', keras.layers.CuDNNLSTM), ) @test_util.run_gpu_only def test_specify_initial_state_keras_tensor(self, layer_class): input_size = 10 timesteps = 6 units = 2 num_samples = 32 num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) self.assertTrue( any(initial_state[0] is t for t in layer._inbound_nodes[0].input_tensors)) model = keras.models.Model([inputs] + initial_state, output) model.compile( loss='categorical_crossentropy', optimizer=RMSprop(learning_rate=0.001), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [ np.random.random((num_samples, units)) for _ in range(num_states) ] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets) class CuDNNGraphOnlyTest(keras_parameterized.TestCase): @parameterized.named_parameters( ('cudnngru', keras.layers.CuDNNGRU), ('cudnnlstm', keras.layers.CuDNNLSTM), ) @test_util.run_deprecated_v1 @test_util.run_gpu_only def test_regularizer(self, layer_class): input_size = 10 timesteps = 6 units = 2 num_samples = 32 layer = layer_class( units, return_sequences=False, input_shape=(timesteps, input_size), kernel_regularizer=keras.regularizers.l1(0.01), recurrent_regularizer=keras.regularizers.l1(0.01), bias_regularizer='l2') layer.build((None, None, input_size)) self.assertEqual(len(layer.losses), 3) layer = layer_class( units, return_sequences=False, input_shape=(timesteps, input_size), activity_regularizer='l2') self.assertTrue(layer.activity_regularizer) x = keras.backend.variable( np.ones((num_samples, timesteps, input_size))) layer(x) self.assertEqual(len(layer.get_losses_for(x)), 1) @parameterized.named_parameters( ('cudnngru', keras.layers.CuDNNGRU), ('cudnnlstm', keras.layers.CuDNNLSTM), ) @test_util.run_gpu_only @test_util.run_v1_only('b/120941292') def test_statefulness(self, layer_class): input_size = 10 timesteps = 6 units = 2 num_samples = 32 with self.cached_session(use_gpu=True): model = keras.models.Sequential() model.add( keras.layers.Embedding( 10, input_size, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class( units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01), loss='mse') out1 = model.predict(np.ones((num_samples, timesteps))) self.assertEqual(out1.shape, (num_samples, units)) # train once so that the states change model.train_on_batch( np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) # if the state is not reset, output should be different self.assertNotEqual(out1.max(), out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out2.max(), out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) self.assertAllClose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out4.max(), out5.max()) @test_util.run_all_in_graph_and_eager_modes class CuDNNV1OnlyTest(keras_parameterized.TestCase): @test_util.run_gpu_only def test_trainability(self): input_size = 10 units = 2 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: layer = layer_class(units) layer.build((None, None, input_size)) self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) layer.trainable = False self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.non_trainable_weights), 3) self.assertEqual(len(layer.trainable_weights), 0) layer.trainable = True self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False], bidirectional=[True, False], implementation=[1, 2], model_nest_level=[1, 2], model_type=['seq', 'func'])) @test_util.run_v1_only('b/120911602, b/112083752') @test_util.run_gpu_only def test_load_weights_between_noncudnn_rnn(self, rnn_type, to_cudnn, bidirectional, implementation, model_nest_level, model_type): input_size = 10 timesteps = 6 input_shape = (timesteps, input_size) units = 2 num_samples = 32 inputs = np.random.random((num_samples, timesteps, input_size)) rnn_layer_kwargs = { 'recurrent_activation': 'sigmoid', # ensure biases are non-zero and properly converted 'bias_initializer': 'random_uniform', 'implementation': implementation } if rnn_type == 'LSTM': rnn_layer_class = keras.layers.LSTM cudnn_rnn_layer_class = keras.layers.CuDNNLSTM else: rnn_layer_class = keras.layers.GRU cudnn_rnn_layer_class = keras.layers.CuDNNGRU rnn_layer_kwargs['reset_after'] = True layer = rnn_layer_class(units, **rnn_layer_kwargs) if bidirectional: layer = keras.layers.Bidirectional(layer) cudnn_layer = cudnn_rnn_layer_class(units) if bidirectional: cudnn_layer = keras.layers.Bidirectional(cudnn_layer) model = self._make_nested_model(input_shape, layer, model_nest_level, model_type) cudnn_model = self._make_nested_model(input_shape, cudnn_layer, model_nest_level, model_type) if to_cudnn: self._convert_model_weights(model, cudnn_model) else: self._convert_model_weights(cudnn_model, model) self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4) def _make_nested_model(self, input_shape, layer, level=1, model_type='func'): # example: make_nested_seq_model((1,), Dense(10), level=2).summary() def make_nested_seq_model(input_shape, layer, level=1): model = layer for i in range(1, level + 1): layers = [keras.layers.InputLayer(input_shape), model] if (i == 1) else [model] model = keras.models.Sequential(layers) if i > 1: model.build((None,) + input_shape) return model # example: make_nested_func_model((1,), Dense(10), level=2).summary() def make_nested_func_model(input_shape, layer, level=1): model_input = keras.layers.Input(input_shape) model = layer for _ in range(level): model = keras.models.Model(model_input, model(model_input)) return model if model_type == 'func': return make_nested_func_model(input_shape, layer, level) elif model_type == 'seq': return make_nested_seq_model(input_shape, layer, level) def _convert_model_weights(self, source_model, target_model): _, fname = tempfile.mkstemp('.h5') source_model.save_weights(fname) target_model.load_weights(fname) os.remove(fname) @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False])) @test_util.run_v1_only('b/120911602') @test_util.run_gpu_only def test_load_weights_between_noncudnn_rnn_time_distributed(self, rnn_type, to_cudnn): # Similar test as test_load_weights_between_noncudnn_rnn() but has different # rank of input due to usage of TimeDistributed. Issue: #10356. input_size = 10 steps = 6 timesteps = 6 input_shape = (timesteps, steps, input_size) units = 2 num_samples = 32 inputs = np.random.random((num_samples, timesteps, steps, input_size)) rnn_layer_kwargs = { 'recurrent_activation': 'sigmoid', # ensure biases are non-zero and properly converted 'bias_initializer': 'random_uniform', } if rnn_type == 'LSTM': rnn_layer_class = keras.layers.LSTM cudnn_rnn_layer_class = keras.layers.CuDNNLSTM else: rnn_layer_class = keras.layers.GRU cudnn_rnn_layer_class = keras.layers.CuDNNGRU rnn_layer_kwargs['reset_after'] = True layer = rnn_layer_class(units, **rnn_layer_kwargs) layer = keras.layers.TimeDistributed(layer) cudnn_layer = cudnn_rnn_layer_class(units) cudnn_layer = keras.layers.TimeDistributed(cudnn_layer) model = self._make_nested_model(input_shape, layer) cudnn_model = self._make_nested_model(input_shape, cudnn_layer) if to_cudnn: self._convert_model_weights(model, cudnn_model) else: self._convert_model_weights(cudnn_model, model) self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4) @test_util.run_gpu_only def test_cudnnrnn_bidirectional(self): rnn = keras.layers.CuDNNGRU samples = 2 dim = 2 timesteps = 2 output_dim = 2 mode = 'concat' x = np.random.random((samples, timesteps, dim)) target_dim = 2 * output_dim if mode == 'concat' else output_dim y = np.random.random((samples, target_dim)) # test with Sequential model model = keras.Sequential() model.add( keras.layers.Bidirectional( rnn(output_dim), merge_mode=mode, input_shape=(None, dim))) model.compile(loss='mse', optimizer='rmsprop') model.fit(x, y, epochs=1, batch_size=1) # test config model.get_config() model = keras.models.model_from_json(model.to_json()) model.summary() # test stacked bidirectional layers model = keras.Sequential() model.add( keras.layers.Bidirectional( rnn(output_dim, return_sequences=True), merge_mode=mode, input_shape=(None, dim))) model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode)) model.compile(loss='mse', optimizer=R'rmsprop') model.fit(x, y, epochs=1, batch_size=1) # test with functional API inputs = keras.Input((timesteps, dim)) outputs = keras.layers.Bidirectional( rnn(output_dim), merge_mode=mode)( inputs) model = keras.Model(inputs, outputs) model.compile(loss='mse', optimizer=R'rmsprop') model.fit(x, y, epochs=1, batch_size=1) # Bidirectional and stateful inputs = keras.Input(batch_shape=(1, timesteps, dim)) outputs = keras.layers.Bidirectional( rnn(output_dim, stateful=True), merge_mode=mode)( inputs) model = keras.Model(inputs, outputs) model.compile(loss='mse', optimizer='rmsprop') model.fit(x, y, epochs=1, batch_size=1) @test_util.run_gpu_only def test_preprocess_weights_for_loading_gru_incompatible(self): """Test loading weights between incompatible layers. Should fail fast with an exception. """ input_shape = (3, 5) def gru(cudnn=False, **kwargs): layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU return layer_class(2, input_shape=input_shape, **kwargs) def get_layer_weights(layer): layer.build(input_shape=input_shape) return layer.get_weights() def assert_not_compatible(src, dest, message): with self.assertRaises(ValueError) as ex: keras.saving.preprocess_weights_for_loading( dest, get_layer_weights(src)) self.assertIn(message, str(ex.exception)) assert_not_compatible( gru(), gru(cudnn=True), 'GRU(reset_after=False) is not compatible with CuDNNGRU') assert_not_compatible( gru(cudnn=True), gru(), 'CuDNNGRU is not compatible with GRU(reset_after=False)') assert_not_compatible( gru(), gru(reset_after=True), 'GRU(reset_after=False) is not compatible with ' 'GRU(reset_after=True)') assert_not_compatible( gru(reset_after=True), gru(), 'GRU(reset_after=True) is not compatible with ' 'GRU(reset_after=False)') if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/cudnn_recurrent_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for LSTM layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test from tensorflow.python.training import adam from tensorflow.python.training import gradient_descent @keras_parameterized.run_all_keras_modes class LSTMLayerTest(keras_parameterized.TestCase): def test_return_sequences_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.LSTM, kwargs={'units': units, 'return_sequences': True}, input_shape=(num_samples, timesteps, embedding_dim)) def test_float64_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.LSTM, kwargs={'units': units, 'return_sequences': True, 'dtype': 'float64'}, input_shape=(num_samples, timesteps, embedding_dim), input_dtype='float64') def test_static_shape_inference_LSTM(self): # Github issue: 15165 timesteps = 3 embedding_dim = 4 units = 2 model = keras.models.Sequential() inputs = keras.layers.Dense(embedding_dim, input_shape=(timesteps, embedding_dim)) model.add(inputs) layer = keras.layers.LSTM(units, return_sequences=True) model.add(layer) outputs = model.layers[-1].output self.assertEqual(outputs.shape.as_list(), [None, timesteps, units]) def test_dynamic_behavior_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim)) model = keras.models.Sequential() model.add(layer) model.compile( 'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.random.random((num_samples, timesteps, embedding_dim)) y = np.random.random((num_samples, units)) model.train_on_batch(x, y) def test_dropout_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.LSTM, kwargs={'units': units, 'dropout': 0.1, 'recurrent_dropout': 0.1}, input_shape=(num_samples, timesteps, embedding_dim)) @parameterized.parameters([0, 1, 2]) def test_implementation_mode_LSTM(self, implementation_mode): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 testing_utils.layer_test( keras.layers.LSTM, kwargs={'units': units, 'implementation': implementation_mode}, input_shape=(num_samples, timesteps, embedding_dim)) def test_constraints_LSTM(self): embedding_dim = 4 layer_class = keras.layers.LSTM k_constraint = keras.constraints.max_norm(0.01) r_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_constraint=k_constraint, recurrent_constraint=r_constraint, bias_constraint=b_constraint) layer.build((None, None, embedding_dim)) self.assertEqual(layer.cell.kernel.constraint, k_constraint) self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint) self.assertEqual(layer.cell.bias.constraint, b_constraint) def test_with_masking_layer_LSTM(self): layer_class = keras.layers.LSTM inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) model.add(layer_class(units=5, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_masking_with_stacking_LSTM(self): inputs = np.random.random((2, 3, 4)) targets = np.abs(np.random.random((2, 3, 5))) targets /= targets.sum(axis=-1, keepdims=True) model = keras.models.Sequential() model.add(keras.layers.Masking(input_shape=(3, 4))) lstm_cells = [keras.layers.LSTMCell(10), keras.layers.LSTMCell(5)] model.add(keras.layers.RNN(lstm_cells, return_sequences=True, unroll=False)) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) def test_from_config_LSTM(self): layer_class = keras.layers.LSTM for stateful in (False, True): l1 = layer_class(units=1, stateful=stateful) l2 = layer_class.from_config(l1.get_config()) assert l1.get_config() == l2.get_config() def test_specify_initial_state_keras_tensor(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 # Test with Keras tensor inputs = keras.Input((timesteps, embedding_dim)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = keras.layers.LSTM(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) self.assertTrue( any(initial_state[0] is t for t in layer._inbound_nodes[0].input_tensors)) model = keras.models.Model([inputs] + initial_state, output) model.compile( loss='categorical_crossentropy', optimizer=adam.AdamOptimizer(), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([inputs] + initial_state, targets) def test_specify_initial_state_non_keras_tensor(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 # Test with non-Keras tensor inputs = keras.Input((timesteps, embedding_dim)) initial_state = [keras.backend.random_normal_variable( (num_samples, units), 0, 1) for _ in range(num_states)] layer = keras.layers.LSTM(units) output = layer(inputs, initial_state=initial_state) model = keras.models.Model(inputs, output) model.compile( loss='categorical_crossentropy', optimizer=adam.AdamOptimizer(), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.train_on_batch(inputs, targets) def test_reset_states_with_values(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 layer = keras.layers.LSTM(units, stateful=True) layer.build((num_samples, timesteps, embedding_dim)) layer.reset_states() assert len(layer.states) == num_states assert layer.states[0] is not None self.assertAllClose( keras.backend.eval(layer.states[0]), np.zeros(keras.backend.int_shape(layer.states[0])), atol=1e-4) state_shapes = [keras.backend.int_shape(state) for state in layer.states] values = [np.ones(shape) for shape in state_shapes] if len(values) == 1: values = values[0] layer.reset_states(values) self.assertAllClose( keras.backend.eval(layer.states[0]), np.ones(keras.backend.int_shape(layer.states[0])), atol=1e-4) # Test with invalid data with self.assertRaises(ValueError): layer.reset_states([1] * (len(layer.states) + 1)) def test_specify_state_with_masking(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 inputs = keras.Input((timesteps, embedding_dim)) _ = keras.layers.Masking()(inputs) initial_state = [keras.Input((units,)) for _ in range(num_states)] output = keras.layers.LSTM(units)(inputs, initial_state=initial_state) model = keras.models.Model([inputs] + initial_state, output) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([inputs] + initial_state, targets) def test_return_state(self): num_states = 2 timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = keras.layers.LSTM(units, return_state=True, stateful=True) outputs = layer(inputs) state = outputs[1:] assert len(state) == num_states model = keras.models.Model(inputs, state[0]) inputs = np.random.random((num_samples, timesteps, embedding_dim)) state = model.predict(inputs) self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4) def test_state_reuse(self): timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim)) layer = keras.layers.LSTM(units, return_state=True, return_sequences=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] output = keras.layers.LSTM(units)(output, initial_state=state) model = keras.models.Model(inputs, output) inputs = np.random.random((num_samples, timesteps, embedding_dim)) outputs = model.predict(inputs) def test_initial_states_as_other_inputs(self): timesteps = 3 embedding_dim = 4 units = 3 num_samples = 2 num_states = 2 layer_class = keras.layers.LSTM # Test with Keras tensor main_inputs = keras.Input((timesteps, embedding_dim)) initial_state = [keras.Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) self.assertTrue( any(initial_state[0] is t for t in layer._inbound_nodes[0].input_tensors)) model = keras.models.Model(inputs, output) model.compile( loss='categorical_crossentropy', optimizer=adam.AdamOptimizer(), run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets) def test_regularizers_LSTM(self): embedding_dim = 4 layer_class = keras.layers.LSTM layer = layer_class( 5, return_sequences=False, weights=None, input_shape=(None, embedding_dim), kernel_regularizer=keras.regularizers.l1(0.01), recurrent_regularizer=keras.regularizers.l1(0.01), bias_regularizer='l2', activity_regularizer='l1') layer.build((None, None, 2)) self.assertEqual(len(layer.losses), 3) x = keras.backend.variable(np.ones((2, 3, 2))) layer(x) if context.executing_eagerly(): self.assertEqual(len(layer.losses), 4) else: self.assertEqual(len(layer.get_losses_for(x)), 1) def test_statefulness_LSTM(self): num_samples = 2 timesteps = 3 embedding_dim = 4 units = 2 layer_class = keras.layers.LSTM model = keras.models.Sequential() model.add( keras.layers.Embedding( 4, embedding_dim, mask_zero=True, input_length=timesteps, batch_input_shape=(num_samples, timesteps))) layer = layer_class( units, return_sequences=False, stateful=True, weights=None) model.add(layer) model.compile( optimizer=gradient_descent.GradientDescentOptimizer(0.01), loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) out1 = model.predict(np.ones((num_samples, timesteps))) self.assertEqual(out1.shape, (num_samples, units)) # train once so that the states change model.train_on_batch( np.ones((num_samples, timesteps)), np.ones((num_samples, units))) out2 = model.predict(np.ones((num_samples, timesteps))) # if the state is not reset, output should be different self.assertNotEqual(out1.max(), out2.max()) # check that output changes after states are reset # (even though the model itself didn't change) layer.reset_states() out3 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out2.max(), out3.max()) # check that container-level reset_states() works model.reset_states() out4 = model.predict(np.ones((num_samples, timesteps))) self.assertAllClose(out3, out4, atol=1e-5) # check that the call to `predict` updated the states out5 = model.predict(np.ones((num_samples, timesteps))) self.assertNotEqual(out4.max(), out5.max()) # Check masking layer.reset_states() left_padded_input = np.ones((num_samples, timesteps)) left_padded_input[0, :1] = 0 left_padded_input[1, :2] = 0 out6 = model.predict(left_padded_input) layer.reset_states() right_padded_input = np.ones((num_samples, timesteps)) right_padded_input[0, -1:] = 0 right_padded_input[1, -2:] = 0 out7 = model.predict(right_padded_input) self.assertAllClose(out7, out6, atol=1e-5) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/lstm_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Normalization layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export class BatchNormalizationBase(Layer): """Base class of Batch normalization layer (Ioffe and Szegedy, 2014). Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. Arguments: axis: Integer, the axis that should be normalized (typically the features axis). For instance, after a `Conv2D` layer with `data_format="channels_first"`, set `axis=1` in `BatchNormalization`. momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling will be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. renorm: Whether to use Batch Renormalization (https://arxiv.org/abs/1702.03275). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. fused: if `True`, use a faster, fused implementation, or raise a ValueError if the fused implementation cannot be used. If `None`, use the faster implementation if possible. If False, do not used the fused implementation. trainable: Boolean, if `True` the variables will be marked as trainable. virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`, which means batch normalization is performed across the whole batch. When `virtual_batch_size` is not `None`, instead perform "Ghost Batch Normalization", which creates virtual sub-batches which are each normalized separately (with shared gamma, beta, and moving statistics). Must divide the actual batch size during execution. adjustment: A function taking the `Tensor` containing the (dynamic) shape of the input tensor and returning a pair (scale, bias) to apply to the normalized values (before gamma and beta), only during training. For example, if axis==-1, `adjustment = lambda shape: ( tf.random.uniform(shape[-1:], 0.93, 1.07), tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized value by up to 7% up or down, then shift the result by up to 0.1 (with independent scaling and bias for each feature but shared across all examples), and finally apply gamma and/or beta. If `None`, no adjustment is applied. Cannot be specified if virtual_batch_size is specified. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. - `training=True`: The layer will normalize its inputs using the mean and variance of the current batch of inputs. - `training=False`: The layer will normalize its inputs using the mean and variance of its moving statistics, learned during training. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. References: - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167) {{TRAINABLE_ATTRIBUTE_NOTE}} """ # By default, the base class uses V2 behavior. The BatchNormalization V1 # subclass sets this to False to use the V1 behavior. _USE_V2_BEHAVIOR = True def __init__(self, axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, fused=None, trainable=True, virtual_batch_size=None, adjustment=None, name=None, **kwargs): super(BatchNormalizationBase, self).__init__( name=name, **kwargs) if isinstance(axis, list): self.axis = axis[:] elif isinstance(axis, int): self.axis = axis else: raise TypeError('axis must be int or list, type given: %s' % type(axis)) self.momentum = momentum self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.moving_mean_initializer = initializers.get(moving_mean_initializer) self.moving_variance_initializer = initializers.get( moving_variance_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) self.renorm = renorm self.virtual_batch_size = virtual_batch_size self.adjustment = adjustment if self._USE_V2_BEHAVIOR: if fused: self._raise_if_fused_cannot_be_used() elif fused is None: fused = self._fused_can_be_used() elif fused is None: fused = True self.supports_masking = True self.fused = fused self._bessels_correction_test_only = True self._trainable_var = None self.trainable = trainable if renorm: renorm_clipping = renorm_clipping or {} keys = ['rmax', 'rmin', 'dmax'] if set(renorm_clipping) - set(keys): raise ValueError('renorm_clipping %s contains keys not in %s' % (renorm_clipping, keys)) self.renorm_clipping = renorm_clipping self.renorm_momentum = renorm_momentum def _raise_if_fused_cannot_be_used(self): """Raises a ValueError if fused implementation cannot be used. """ # Currently fused batch norm doesn't support renorm. It also only supports a # single axis, when no virtual batch size or adjustment is used. if self.renorm: raise ValueError('Passing both fused=True and renorm=True is ' 'unsupported') axis = [self.axis] if isinstance(self.axis, int) else self.axis if len(axis) > 1: raise ValueError('Passing fused=True is only supported when operating ' 'over a single axis.') if self.virtual_batch_size is not None: raise ValueError('Passing fused=True is unsupported when ' 'virtual_batch_size is specified.') if self.adjustment is not None: raise ValueError('Passing fused=True is unsupported when ' 'adjustment is specified.') def _fused_can_be_used(self): try: self._raise_if_fused_cannot_be_used() return True except ValueError: return False @property def trainable(self): return self._trainable @trainable.setter def trainable(self, value): self._trainable = value if self._trainable_var is not None: self._trainable_var.update_value(value) def _get_trainable_var(self): if self._trainable_var is None: self._trainable_var = K.freezable_variable( self._trainable, name=self.name + '_trainable') return self._trainable_var @property def _param_dtype(self): # Raise parameters of fp16 batch norm to fp32 if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16: return dtypes.float32 else: return self.dtype or dtypes.float32 def _support_zero_size_input(self): return distribution_strategy_context.has_strategy() and getattr( distribution_strategy_context.get_strategy().extended, 'experimental_enable_get_next_as_optional', False) def _get_shape_and_axis_for_fused(self, nd_shape, nd_axis): """Compute an equivalent shape and axis that are compatible with the fused implementation. The input/output of the layer can be reshaped to/from the shape returned by this function without affecting the correctness of the computation. Arguments: nd_shape: Tensor. The original shape of the operation. nd_axis: Integer. The original axis of the operation. Returns: shape: Tensor. A 4D shape. axis: Integer. An axis (always 1 or 3). """ assert(isinstance(nd_axis, int)) ndims = nd_shape.shape[0] shape = nd_shape[:] axis = nd_shape + nd_axis if nd_axis < 0 else nd_axis # First check if the axis needs to be moved. if axis not in (1, ndims - 1): # Move axis to dim 1. if axis == 0: # Transform [C, ...] to [1, C, ...]. shape = array_ops.concat([constant_op.constant([1]), shape], axis=0) ndims += 1 else: # Merge excess pre-axis dims into first dim. # Transform [N, ..., C, ...] to [product(N, ...), C, ...]. product = math_ops.reduce_prod(shape[:axis], keepdims=True) shape = array_ops.concat([product, shape[axis:]], axis=0) ndims -= (axis - 1) axis = 1 # Now change shape to 4D. is_channels_last = axis == ndims - 1 if ndims < 4: # Insert new dims after existing spatial dim or before channel dim. new_dims = constant_op.constant([1] * (4 - ndims)) if is_channels_last: # Transform [..., C] to [..., 1..., C] (ndims=4). shape = array_ops.concat([shape[:-1], new_dims, shape[-1:]], axis=0) else: # Transform [N, C, ...] to [N, C, ..., 1...] (ndims=4). shape = array_ops.concat([shape, new_dims], axis=0) elif ndims > 4: # Merge excess spatial dims into the second spatial dim. # Transform [N, C, H, W, ...] to [N, C, H, product(W, ...)]. # Or [N, H, W, ..., C] to [N, H, product(W, ...), C]. merge_dim = 2 if is_channels_last else 3 product = math_ops.reduce_prod( shape[merge_dim:merge_dim + 1 + (ndims - 4)], keepdims=True) shape = array_ops.concat([shape[:merge_dim], product, shape[merge_dim + 1 + (ndims - 4):]], axis=0) axis = 3 if is_channels_last else 1 return shape, axis def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) if not input_shape.ndims: raise ValueError('Input has undefined rank:', input_shape) ndims = len(input_shape) # Convert axis to list and resolve negatives if isinstance(self.axis, int): self.axis = [self.axis] for idx, x in enumerate(self.axis): if x < 0: self.axis[idx] = ndims + x # Validate axes for x in self.axis: if x < 0 or x >= ndims: raise ValueError('Invalid axis: %d' % x) if len(self.axis) != len(set(self.axis)): raise ValueError('Duplicate axis: %s' % self.axis) if self.virtual_batch_size is not None: if self.virtual_batch_size <= 0: raise ValueError('virtual_batch_size must be a positive integer that ' 'divides the true batch size of the input Tensor') # If using virtual batches, the first dimension must be the batch # dimension and cannot be the batch norm axis if 0 in self.axis: raise ValueError('When using virtual_batch_size, the batch dimension ' 'must be 0 and thus axis cannot include 0') if self.adjustment is not None: raise ValueError('When using virtual_batch_size, adjustment cannot ' 'be specified') if self.fused and not self._USE_V2_BEHAVIOR: self.fused = self._fused_can_be_used() axis_to_dim = {x: input_shape.dims[x].value for x in self.axis} for x in axis_to_dim: if axis_to_dim[x] is None: raise ValueError('Input has undefined `axis` dimension. Input shape: ', input_shape) self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim) if len(axis_to_dim) == 1 and self.virtual_batch_size is None: # Single axis batch norm (most common/default use-case) param_shape = (list(axis_to_dim.values())[0],) else: # Parameter shape is the original shape but with 1 in all non-axis dims param_shape = [axis_to_dim[i] if i in axis_to_dim else 1 for i in range(ndims)] if self.virtual_batch_size is not None: # When using virtual batches, add an extra dim at index 1 param_shape.insert(1, 1) for idx, x in enumerate(self.axis): self.axis[idx] = x + 1 # Account for added dimension if self.scale: self.gamma = self.add_weight( name='gamma', shape=param_shape, dtype=self._param_dtype, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, trainable=True, experimental_autocast=False) else: self.gamma = None if self.fused: self._gamma_const = K.constant( 1.0, dtype=self._param_dtype, shape=param_shape) if self.center: self.beta = self.add_weight( name='beta', shape=param_shape, dtype=self._param_dtype, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, trainable=True, experimental_autocast=False) else: self.beta = None if self.fused: self._beta_const = K.constant( 0.0, dtype=self._param_dtype, shape=param_shape) try: # Disable variable partitioning when creating the moving mean and variance if hasattr(self, '_scope') and self._scope: partitioner = self._scope.partitioner self._scope.set_partitioner(None) else: partitioner = None self.moving_mean = self.add_weight( name='moving_mean', shape=param_shape, dtype=self._param_dtype, initializer=self.moving_mean_initializer, synchronization=tf_variables.VariableSynchronization.ON_READ, trainable=False, aggregation=tf_variables.VariableAggregation.MEAN, experimental_autocast=False) self.moving_variance = self.add_weight( name='moving_variance', shape=param_shape, dtype=self._param_dtype, initializer=self.moving_variance_initializer, synchronization=tf_variables.VariableSynchronization.ON_READ, trainable=False, aggregation=tf_variables.VariableAggregation.MEAN, experimental_autocast=False) if self.renorm: # In batch renormalization we track the inference moving stddev instead # of the moving variance to more closely align with the paper. def moving_stddev_initializer(*args, **kwargs): return math_ops.sqrt( self.moving_variance_initializer(*args, **kwargs)) with distribution_strategy_context.get_strategy( ).extended.colocate_vars_with(self.moving_variance): self.moving_stddev = self.add_weight( name='moving_stddev', shape=param_shape, dtype=self._param_dtype, initializer=moving_stddev_initializer, synchronization=tf_variables.VariableSynchronization.ON_READ, trainable=False, aggregation=tf_variables.VariableAggregation.MEAN, experimental_autocast=False) # Create variables to maintain the moving mean and standard deviation. # These are used in training and thus are different from the moving # averages above. The renorm variables are colocated with moving_mean # and moving_stddev. # NOTE: below, the outer `with device` block causes the current device # stack to be cleared. The nested ones use a `lambda` to set the desired # device and ignore any devices that may be set by the custom getter. def _renorm_variable(name, shape, initializer=init_ops.zeros_initializer()): """Create a renorm variable.""" var = self.add_weight( name=name, shape=shape, dtype=self._param_dtype, initializer=initializer, synchronization=tf_variables.VariableSynchronization.ON_READ, trainable=False, aggregation=tf_variables.VariableAggregation.MEAN, experimental_autocast=False) return var with distribution_strategy_context.get_strategy( ).extended.colocate_vars_with(self.moving_mean): self.renorm_mean = _renorm_variable('renorm_mean', param_shape, self.moving_mean_initializer) with distribution_strategy_context.get_strategy( ).extended.colocate_vars_with(self.moving_stddev): self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape, moving_stddev_initializer) finally: if partitioner: self._scope.set_partitioner(partitioner) self.built = True def _assign_moving_average(self, variable, value, momentum, inputs_size): with K.name_scope('AssignMovingAvg') as scope: with ops.colocate_with(variable): decay = ops.convert_to_tensor(1.0 - momentum, name='decay') if decay.dtype != variable.dtype.base_dtype: decay = math_ops.cast(decay, variable.dtype.base_dtype) update_delta = ( variable - math_ops.cast(value, variable.dtype)) * decay if inputs_size is not None: update_delta = array_ops.where(inputs_size > 0, update_delta, K.zeros_like(update_delta)) return state_ops.assign_sub(variable, update_delta, name=scope) def _assign_new_value(self, variable, value): with K.name_scope('AssignNewValue') as scope: with ops.colocate_with(variable): return state_ops.assign(variable, value, name=scope) def _fused_batch_norm(self, inputs, training): """Returns the output of fused batch norm.""" beta = self.beta if self.center else self._beta_const gamma = self.gamma if self.scale else self._gamma_const original_shape = None fused_axis = self.axis[0] input_shape = array_ops.shape(inputs) ndims = len(inputs.shape) if self.axis[0] not in (1, ndims - 1) or ndims not in (4, 5): # The fused implementation supports NCHW or NHWC, so we reshape the # input/output tensor to/from an equivalent 4D shape. fused_shape, fused_axis = self._get_shape_and_axis_for_fused(input_shape, self.axis[0]) original_shape = input_shape inputs = array_ops.reshape(inputs, fused_shape) # TODO(chrisying): fused batch norm is currently not supported for # multi-axis batch norm and by extension virtual batches. In some cases, # it might be possible to use fused batch norm but would require reshaping # the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is # particularly tricky. A compromise might be to just support the most # common use case (turning 5D w/ virtual batch to NCHW) if ndims == 5: data_format = 'NCDHW' if fused_axis == 1 else 'NDHWC' else: data_format = 'NCHW' if fused_axis == 1 else 'NHWC' # TODO(b/129279393): Support zero batch input in non DistributionStrategy # code as well. if self._support_zero_size_input(): inputs_size = array_ops.size(inputs) else: inputs_size = None def _fused_batch_norm_training(): return nn.fused_batch_norm( inputs, gamma, beta, epsilon=self.epsilon, data_format=data_format) def _fused_batch_norm_inference(): return nn.fused_batch_norm( inputs, gamma, beta, mean=self.moving_mean, variance=self.moving_variance, epsilon=self.epsilon, is_training=False, data_format=data_format) output, mean, variance = tf_utils.smart_cond( training, _fused_batch_norm_training, _fused_batch_norm_inference) if not self._bessels_correction_test_only: # Remove Bessel's correction to be consistent with non-fused batch norm. # Note that the variance computed by fused batch norm is # with Bessel's correction. sample_size = math_ops.cast( array_ops.size(inputs) / array_ops.size(variance), variance.dtype) factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size variance *= factor if original_shape is not None: output = array_ops.reshape(output, original_shape) training_value = tf_utils.constant_value(training) if training_value is None: momentum = tf_utils.smart_cond(training, lambda: self.momentum, lambda: 1.0) else: momentum = ops.convert_to_tensor(self.momentum) if training_value or training_value is None: def mean_update(): return self._assign_moving_average(self.moving_mean, mean, momentum, inputs_size) def variance_update(): """Update self.moving_variance with the most recent data point.""" if self.renorm: # We apply epsilon as part of the moving_stddev to mirror the training # code path. moving_stddev = self._assign_moving_average( self.moving_stddev, math_ops.sqrt(variance + self.epsilon), momentum, inputs_size) return self._assign_new_value( self.moving_variance, # Apply relu in case floating point rounding causes it to go # negative. K.relu(moving_stddev * moving_stddev - self.epsilon)) else: return self._assign_moving_average(self.moving_variance, variance, momentum, inputs_size) self.add_update(mean_update) self.add_update(variance_update) return output def _renorm_correction_and_moments(self, mean, variance, training, inputs_size): """Returns the correction and update values for renorm.""" stddev = math_ops.sqrt(variance + self.epsilon) # Compute the average mean and standard deviation, as if they were # initialized with this batch's moments. renorm_mean = self.renorm_mean # Avoid divide by zero early on in training. renorm_stddev = math_ops.maximum(self.renorm_stddev, math_ops.sqrt(self.epsilon)) # Compute the corrections for batch renorm. r = stddev / renorm_stddev d = (mean - renorm_mean) / renorm_stddev # Ensure the corrections use pre-update moving averages. with ops.control_dependencies([r, d]): mean = array_ops.identity(mean) stddev = array_ops.identity(stddev) rmin, rmax, dmax = [self.renorm_clipping.get(key) for key in ['rmin', 'rmax', 'dmax']] if rmin is not None: r = math_ops.maximum(r, rmin) if rmax is not None: r = math_ops.minimum(r, rmax) if dmax is not None: d = math_ops.maximum(d, -dmax) d = math_ops.minimum(d, dmax) # When not training, use r=1, d=0. r = tf_utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r)) d = tf_utils.smart_cond(training, lambda: d, lambda: array_ops.zeros_like(d)) def _update_renorm_variable(var, value, inputs_size): """Updates a moving average and weight, returns the unbiased value.""" value = array_ops.identity(value) def _do_update(): """Updates the var, returns the updated value.""" new_var = self._assign_moving_average(var, value, self.renorm_momentum, inputs_size) return new_var def _fake_update(): return array_ops.identity(var) return tf_utils.smart_cond(training, _do_update, _fake_update) # TODO(yuefengz): colocate the operations update_new_mean = _update_renorm_variable(self.renorm_mean, mean, inputs_size) update_new_stddev = _update_renorm_variable(self.renorm_stddev, stddev, inputs_size) # Update the inference mode moving averages with the batch value. with ops.control_dependencies([update_new_mean, update_new_stddev]): out_mean = array_ops.identity(mean) out_variance = array_ops.identity(variance) return (r, d, out_mean, out_variance) def _moments(self, inputs, reduction_axes, keep_dims): mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims) # TODO(b/129279393): Support zero batch input in non DistributionStrategy # code as well. if self._support_zero_size_input(): inputs_size = array_ops.size(inputs) mean = array_ops.where(inputs_size > 0, mean, K.zeros_like(mean)) variance = array_ops.where(inputs_size > 0, variance, K.zeros_like(variance)) return mean, variance def _get_training_value(self, training=None): if training is None: training = K.learning_phase() if self._USE_V2_BEHAVIOR: if isinstance(training, int): training = bool(training) if base_layer_utils.is_in_keras_graph(): training = math_ops.logical_and(training, self._get_trainable_var()) else: training = math_ops.logical_and(training, self.trainable) return training def call(self, inputs, training=None): training = self._get_training_value(training) if self.virtual_batch_size is not None: # Virtual batches (aka ghost batches) can be simulated by reshaping the # Tensor and reusing the existing batch norm implementation original_shape = [-1] + inputs.shape.as_list()[1:] expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:] # Will cause errors if virtual_batch_size does not divide the batch size inputs = array_ops.reshape(inputs, expanded_shape) def undo_virtual_batching(outputs): outputs = array_ops.reshape(outputs, original_shape) return outputs if self.fused: outputs = self._fused_batch_norm(inputs, training=training) if self.virtual_batch_size is not None: # Currently never reaches here since fused_batch_norm does not support # virtual batching outputs = undo_virtual_batching(outputs) return outputs # Compute the axes along which to reduce the mean / variance input_shape = inputs.shape ndims = len(input_shape) reduction_axes = [i for i in range(ndims) if i not in self.axis] if self.virtual_batch_size is not None: del reduction_axes[1] # Do not reduce along virtual batch dim # Broadcasting only necessary for single-axis batch norm where the axis is # not the last dimension broadcast_shape = [1] * ndims broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value def _broadcast(v): if (v is not None and len(v.shape) != ndims and reduction_axes != list(range(ndims - 1))): return array_ops.reshape(v, broadcast_shape) return v scale, offset = _broadcast(self.gamma), _broadcast(self.beta) def _compose_transforms(scale, offset, then_scale, then_offset): if then_scale is not None: scale *= then_scale offset *= then_scale if then_offset is not None: offset += then_offset return (scale, offset) # Determine a boolean value for `training`: could be True, False, or None. training_value = tf_utils.constant_value(training) if training_value == False: # pylint: disable=singleton-comparison,g-explicit-bool-comparison mean, variance = self.moving_mean, self.moving_variance else: if self.adjustment: adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs)) # Adjust only during training. adj_scale = tf_utils.smart_cond(training, lambda: adj_scale, lambda: array_ops.ones_like(adj_scale)) adj_bias = tf_utils.smart_cond(training, lambda: adj_bias, lambda: array_ops.zeros_like(adj_bias)) scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset) # Some of the computations here are not necessary when training==False # but not a constant. However, this makes the code simpler. keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1 mean, variance = self._moments( math_ops.cast(inputs, self._param_dtype), reduction_axes, keep_dims=keep_dims) moving_mean = self.moving_mean moving_variance = self.moving_variance mean = tf_utils.smart_cond(training, lambda: mean, lambda: ops.convert_to_tensor(moving_mean)) variance = tf_utils.smart_cond( training, lambda: variance, lambda: ops.convert_to_tensor(moving_variance)) if self.virtual_batch_size is not None: # This isn't strictly correct since in ghost batch norm, you are # supposed to sequentially update the moving_mean and moving_variance # with each sub-batch. However, since the moving statistics are only # used during evaluation, it is more efficient to just update in one # step and should not make a significant difference in the result. new_mean = math_ops.reduce_mean(mean, axis=1, keepdims=True) new_variance = math_ops.reduce_mean(variance, axis=1, keepdims=True) else: new_mean, new_variance = mean, variance if self._support_zero_size_input(): inputs_size = array_ops.size(inputs) else: inputs_size = None if self.renorm: r, d, new_mean, new_variance = self._renorm_correction_and_moments( new_mean, new_variance, training, inputs_size) # When training, the normalized values (say, x) will be transformed as # x * gamma + beta without renorm, and (x * r + d) * gamma + beta # = x * (r * gamma) + (d * gamma + beta) with renorm. r = _broadcast(array_ops.stop_gradient(r, name='renorm_r')) d = _broadcast(array_ops.stop_gradient(d, name='renorm_d')) scale, offset = _compose_transforms(r, d, scale, offset) def _do_update(var, value): """Compute the updates for mean and variance.""" return self._assign_moving_average(var, value, self.momentum, inputs_size) def mean_update(): true_branch = lambda: _do_update(self.moving_mean, new_mean) false_branch = lambda: self.moving_mean return tf_utils.smart_cond(training, true_branch, false_branch) def variance_update(): """Update the moving variance.""" def true_branch_renorm(): # We apply epsilon as part of the moving_stddev to mirror the training # code path. moving_stddev = _do_update(self.moving_stddev, math_ops.sqrt(new_variance + self.epsilon)) return self._assign_new_value( self.moving_variance, # Apply relu in case floating point rounding causes it to go # negative. K.relu(moving_stddev * moving_stddev - self.epsilon)) if self.renorm: true_branch = true_branch_renorm else: true_branch = lambda: _do_update(self.moving_variance, new_variance) false_branch = lambda: self.moving_variance return tf_utils.smart_cond(training, true_branch, false_branch) self.add_update(mean_update) self.add_update(variance_update) mean = math_ops.cast(mean, inputs.dtype) variance = math_ops.cast(variance, inputs.dtype) if offset is not None: offset = math_ops.cast(offset, inputs.dtype) if scale is not None: scale = math_ops.cast(scale, inputs.dtype) # TODO(reedwm): Maybe do math in float32 if given float16 inputs, if doing # math in float16 hurts validation accuracy of popular models like resnet. outputs = nn.batch_normalization(inputs, _broadcast(mean), _broadcast(variance), offset, scale, self.epsilon) # If some components of the shape got lost due to adjustments, fix that. outputs.set_shape(input_shape) if self.virtual_batch_size is not None: outputs = undo_virtual_batching(outputs) return outputs def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = { 'axis': self.axis, 'momentum': self.momentum, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': initializers.serialize(self.beta_initializer), 'gamma_initializer': initializers.serialize(self.gamma_initializer), 'moving_mean_initializer': initializers.serialize(self.moving_mean_initializer), 'moving_variance_initializer': initializers.serialize(self.moving_variance_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'gamma_constraint': constraints.serialize(self.gamma_constraint) } # Only add TensorFlow-specific parameters if they are set, so as to preserve # model compatibility with external Keras. if self.renorm: config['renorm'] = True config['renorm_clipping'] = self.renorm_clipping config['renorm_momentum'] = self.renorm_momentum if self.virtual_batch_size is not None: config['virtual_batch_size'] = self.virtual_batch_size # Note: adjustment is not serializable. if self.adjustment is not None: logging.warning('The `adjustment` function of this `BatchNormalization` ' 'layer cannot be serialized and has been omitted from ' 'the layer config. It will not be included when ' 're-creating the layer from the saved config.') base_config = super(BatchNormalizationBase, self).get_config() return dict(list(base_config.items()) + list(config.items())) def replace_in_base_docstring(replacements): string = BatchNormalizationBase.__doc__ for old, new in replacements: assert old in string string.replace(old, new) return string @keras_export(v1=['keras.layers.BatchNormalization']) # pylint: disable=missing-docstring class BatchNormalization(BatchNormalizationBase): __doc__ = replace_in_base_docstring( [(''' fused: if `True`, use a faster, fused implementation, or raise a ValueError if the fused implementation cannot be used. If `None`, use the faster implementation if possible. If False, do not used the fused implementation.''', ''' fused: if `None` or `True`, use a faster, fused implementation if possible. If `False`, use the system recommended implementation.'''), ('{{TRAINABLE_ATTRIBUTE_NOTE}}', '')]) _USE_V2_BEHAVIOR = False @keras_export('keras.layers.LayerNormalization') class LayerNormalization(Layer): """Layer normalization layer (Ba et al., 2016). Normalize the activations of the previous layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1. Arguments: axis: Integer or List/Tuple. The axis that should be normalized (typically the features axis). epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling will be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. trainable: Boolean, if `True` the variables will be marked as trainable. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. References: - [Layer Normalization](https://arxiv.org/abs/1607.06450) """ def __init__(self, axis=-1, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, trainable=True, name=None, **kwargs): super(LayerNormalization, self).__init__( name=name, trainable=trainable, **kwargs) if isinstance(axis, (list, tuple)): self.axis = axis[:] elif isinstance(axis, int): self.axis = axis else: raise ValueError('Expected an int or a list/tuple of ints for the ' 'argument \'axis\', but received instead: %s' % axis) self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) self.supports_masking = True def build(self, input_shape): ndims = len(input_shape) if ndims is None: raise ValueError('Input shape %s has undefined rank.' % input_shape) # Convert axis to list and resolve negatives if isinstance(self.axis, int): self.axis = [self.axis] for idx, x in enumerate(self.axis): if x < 0: self.axis[idx] = ndims + x # Validate axes for x in self.axis: if x < 0 or x >= ndims: raise ValueError('Invalid axis: %d' % x) if len(self.axis) != len(set(self.axis)): raise ValueError('Duplicate axis: {}'.format(tuple(self.axis))) param_shape = [input_shape[dim] for dim in self.axis] if self.scale: self.gamma = self.add_weight( name='gamma', shape=param_shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, trainable=True, experimental_autocast=False) else: self.gamma = None if self.center: self.beta = self.add_weight( name='beta', shape=param_shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, trainable=True, experimental_autocast=False) else: self.beta = None self.built = True def call(self, inputs): # Compute the axes along which to reduce the mean / variance input_shape = inputs.shape ndims = len(input_shape) # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, self.axis, keep_dims=True) # Broadcasting only necessary for norm where the axis is not just # the last dimension broadcast_shape = [1] * ndims for dim in self.axis: broadcast_shape[dim] = input_shape.dims[dim].value def _broadcast(v): if (v is not None and len(v.shape) != ndims and self.axis != [ndims - 1]): return array_ops.reshape(v, broadcast_shape) return v scale, offset = _broadcast(self.gamma), _broadcast(self.beta) # Compute layer normalization using the batch_normalization function. outputs = nn.batch_normalization( inputs, mean, variance, offset=offset, scale=scale, variance_epsilon=self.epsilon) # If some components of the shape got lost due to adjustments, fix that. outputs.set_shape(input_shape) return outputs def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = { 'axis': self.axis, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': initializers.serialize(self.beta_initializer), 'gamma_initializer': initializers.serialize(self.gamma_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'gamma_constraint': constraints.serialize(self.gamma_constraint) } base_config = super(LayerNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items()))
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/normalization.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Recurrent layers backed by cuDNN. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.framework import constant_op from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.layers import recurrent_v2 from tensorflow.python.keras.layers.recurrent import RNN from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_cudnn_rnn_ops from tensorflow.python.ops import state_ops from tensorflow.python.platform import build_info from tensorflow.python.util.tf_export import keras_export class _CuDNNRNN(RNN): """Private base class for CuDNNGRU and CuDNNLSTM layers. Arguments: return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. time_major: Boolean (default False). If true, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. """ def __init__(self, return_sequences=False, return_state=False, go_backwards=False, stateful=False, time_major=False, **kwargs): # We invoke the base layer's initializer directly here because we do not # want to create RNN cell instance. super(RNN, self).__init__(**kwargs) # pylint: disable=bad-super-call self.return_sequences = return_sequences self.return_state = return_state self.go_backwards = go_backwards self.stateful = stateful self.time_major = time_major self.supports_masking = False self.input_spec = [InputSpec(ndim=3)] if hasattr(self.cell.state_size, '__len__'): state_size = self.cell.state_size else: state_size = [self.cell.state_size] self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size] self.constants_spec = None self._states = None self._num_constants = 0 self._vector_shape = constant_op.constant([-1]) def call(self, inputs, mask=None, training=None, initial_state=None): if isinstance(mask, list): mask = mask[0] if mask is not None: raise ValueError('Masking is not supported for CuDNN RNNs.') # input shape: `(samples, time (padded with zeros), input_dim)` # note that the .build() method of subclasses MUST define # self.input_spec and self.state_spec with complete input shapes. if isinstance(inputs, list): initial_state = inputs[1:] inputs = inputs[0] elif initial_state is not None: pass elif self.stateful: initial_state = self.states else: initial_state = self.get_initial_state(inputs) if len(initial_state) != len(self.states): raise ValueError('Layer has ' + str(len(self.states)) + ' states but was passed ' + str(len(initial_state)) + ' initial states.') if self.go_backwards: # Reverse time axis. inputs = K.reverse(inputs, 1) output, states = self._process_batch(inputs, initial_state) if self.stateful: updates = [] for i in range(len(states)): updates.append(state_ops.assign(self.states[i], states[i])) self.add_update(updates) if self.return_state: return [output] + states else: return output def get_config(self): config = { 'return_sequences': self.return_sequences, 'return_state': self.return_state, 'go_backwards': self.go_backwards, 'stateful': self.stateful, 'time_major': self.time_major, } base_config = super( # pylint: disable=bad-super-call RNN, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config) @property def trainable_weights(self): if self.trainable and self.built: return [self.kernel, self.recurrent_kernel, self.bias] return [] @property def non_trainable_weights(self): if not self.trainable and self.built: return [self.kernel, self.recurrent_kernel, self.bias] return [] @property def losses(self): return super(RNN, self).losses def get_losses_for(self, inputs=None): return super( # pylint: disable=bad-super-call RNN, self).get_losses_for(inputs=inputs) @keras_export(v1=['keras.layers.CuDNNGRU']) class CuDNNGRU(_CuDNNRNN): """Fast GRU implementation backed by cuDNN. More information about cuDNN can be found on the [NVIDIA developer website](https://developer.nvidia.com/cudnn). Can only be run on GPU. Arguments: units: Positive integer, dimensionality of the output space. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. """ def __init__(self, units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, **kwargs): self.units = units cell_spec = collections.namedtuple('cell', 'state_size') self._cell = cell_spec(state_size=self.units) super(CuDNNGRU, self).__init__( return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, **kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) @property def cell(self): return self._cell def build(self, input_shape): super(CuDNNGRU, self).build(input_shape) if isinstance(input_shape, list): input_shape = input_shape[0] input_dim = int(input_shape[-1]) self.kernel = self.add_weight( shape=(input_dim, self.units * 3), name='kernel', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units * 3), name='recurrent_kernel', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.bias = self.add_weight( shape=(self.units * 6,), name='bias', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) self.built = True def _process_batch(self, inputs, initial_state): if not self.time_major: inputs = array_ops.transpose(inputs, perm=(1, 0, 2)) input_h = initial_state[0] input_h = array_ops.expand_dims(input_h, axis=0) params = [] if build_info.is_rocm_build: # ROCm MIOpen's weight sequence for GRU is same with the canonical format # but different from that of Cudnn # MIOpen/Canonical: [z, r, h] Cudnn: [r, z, h] # z is update gate weights. # r is reset gate weights. # h is output gate weights. params = recurrent_v2._canonical_to_params( # pylint: disable=protected-access weights=[ self.kernel[:, :self.units], self.kernel[:, self.units:self.units * 2], self.kernel[:, self.units * 2:], self.recurrent_kernel[:, :self.units], self.recurrent_kernel[:, self.units:self.units * 2], self.recurrent_kernel[:, self.units * 2:], ], biases=[ self.bias[:self.units], self.bias[self.units:self.units * 2], self.bias[self.units * 2:self.units * 3], self.bias[self.units * 3:self.units * 4], self.bias[self.units * 4:self.units * 5], self.bias[self.units * 5:], ], shape=self._vector_shape) else: params = recurrent_v2._canonical_to_params( # pylint: disable=protected-access weights=[ self.kernel[:, self.units:self.units * 2], self.kernel[:, :self.units], self.kernel[:, self.units * 2:], self.recurrent_kernel[:, self.units:self.units * 2], self.recurrent_kernel[:, :self.units], self.recurrent_kernel[:, self.units * 2:], ], biases=[ self.bias[self.units:self.units * 2], self.bias[:self.units], self.bias[self.units * 2:self.units * 3], self.bias[self.units * 4:self.units * 5], self.bias[self.units * 3:self.units * 4], self.bias[self.units * 5:], ], shape=self._vector_shape) args = { 'input': inputs, 'input_h': input_h, 'input_c': 0, 'params': params, 'is_training': True, 'rnn_mode': 'gru', } outputs, h, _, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv2(**args) if self.stateful or self.return_state: h = h[0] if self.return_sequences: if self.time_major: output = outputs else: output = array_ops.transpose(outputs, perm=(1, 0, 2)) else: output = outputs[-1] return output, [h] def get_config(self): config = { 'units': self.units, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(CuDNNGRU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export(v1=['keras.layers.CuDNNLSTM']) class CuDNNLSTM(_CuDNNRNN): """Fast LSTM implementation backed by cuDNN. More information about cuDNN can be found on the [NVIDIA developer website](https://developer.nvidia.com/cudnn). Can only be run on GPU. Arguments: units: Positive integer, dimensionality of the output space. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. """ def __init__(self, units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, **kwargs): self.units = units cell_spec = collections.namedtuple('cell', 'state_size') self._cell = cell_spec(state_size=(self.units, self.units)) super(CuDNNLSTM, self).__init__( return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, **kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.unit_forget_bias = unit_forget_bias self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) @property def cell(self): return self._cell def build(self, input_shape): super(CuDNNLSTM, self).build(input_shape) if isinstance(input_shape, list): input_shape = input_shape[0] input_dim = int(input_shape[-1]) self.kernel = self.add_weight( shape=(input_dim, self.units * 4), name='kernel', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units * 4), name='recurrent_kernel', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) if self.unit_forget_bias: def bias_initializer(_, *args, **kwargs): return array_ops.concat([ self.bias_initializer((self.units * 5,), *args, **kwargs), initializers.Ones()((self.units,), *args, **kwargs), self.bias_initializer((self.units * 2,), *args, **kwargs), ], axis=0) else: bias_initializer = self.bias_initializer self.bias = self.add_weight( shape=(self.units * 8,), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) self.built = True def _process_batch(self, inputs, initial_state): if not self.time_major: inputs = array_ops.transpose(inputs, perm=(1, 0, 2)) input_h = initial_state[0] input_c = initial_state[1] input_h = array_ops.expand_dims(input_h, axis=0) input_c = array_ops.expand_dims(input_c, axis=0) params = [] if build_info.is_rocm_build: # ROCm MIOpen's weight sequence for LSTM is different from both canonical # and Cudnn format # MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o] # i is input gate weights. # f is forget gate weights. # o is output gate weights. # c is cell gate weights. params = recurrent_v2._canonical_to_params( # pylint: disable=protected-access weights=[ self.kernel[:, :self.units], self.kernel[:, self.units:self.units * 2], self.kernel[:, self.units * 3:], self.kernel[:, self.units * 2:self.units * 3], self.recurrent_kernel[:, :self.units], self.recurrent_kernel[:, self.units:self.units * 2], self.recurrent_kernel[:, self.units * 3:], self.recurrent_kernel[:, self.units * 2:self.units * 3], ], biases=[ self.bias[:self.units], self.bias[self.units:self.units * 2], self.bias[self.units * 3:self.units * 4], self.bias[self.units * 2:self.units * 3], self.bias[self.units * 4:self.units * 5], self.bias[self.units * 5:self.units * 6], self.bias[self.units * 7:], self.bias[self.units * 6:self.units * 7], ], shape=self._vector_shape) else: params = recurrent_v2._canonical_to_params( # pylint: disable=protected-access weights=[ self.kernel[:, :self.units], self.kernel[:, self.units:self.units * 2], self.kernel[:, self.units * 2:self.units * 3], self.kernel[:, self.units * 3:], self.recurrent_kernel[:, :self.units], self.recurrent_kernel[:, self.units:self.units * 2], self.recurrent_kernel[:, self.units * 2:self.units * 3], self.recurrent_kernel[:, self.units * 3:], ], biases=[ self.bias[:self.units], self.bias[self.units:self.units * 2], self.bias[self.units * 2:self.units * 3], self.bias[self.units * 3:self.units * 4], self.bias[self.units * 4:self.units * 5], self.bias[self.units * 5:self.units * 6], self.bias[self.units * 6:self.units * 7], self.bias[self.units * 7:], ], shape=self._vector_shape) args = { 'input': inputs, 'input_h': input_h, 'input_c': input_c, 'params': params, 'is_training': True, } outputs, h, c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv2(**args) if self.stateful or self.return_state: h = h[0] c = c[0] if self.return_sequences: if self.time_major: output = outputs else: output = array_ops.transpose(outputs, perm=(1, 0, 2)) else: output = outputs[-1] return output, [h, c] def get_config(self): config = { 'units': self.units, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(CuDNNLSTM, self).get_config() return dict(list(base_config.items()) + list(config.items()))
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/cudnn_recurrent.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for convolutional layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class Conv1DTest(keras_parameterized.TestCase): def _run_test(self, kwargs, expected_output_shape): num_samples = 2 stack_size = 3 length = 7 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Conv1D, kwargs=kwargs, input_shape=(num_samples, length, stack_size), expected_output_shape=expected_output_shape) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}, (None, 5, 2)), ('padding_same', {'padding': 'same'}, (None, 7, 2)), ('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}, (None, 7, 2)), ('padding_same_dilation_3', {'padding': 'same', 'dilation_rate': 3}, (None, 7, 2)), ('padding_causal', {'padding': 'causal'}, (None, 7, 2)), ('strides', {'strides': 2}, (None, 3, 2)), ('dilation_rate', {'dilation_rate': 2}, (None, 3, 2)), ) def test_conv1d(self, kwargs, expected_output_shape): kwargs['filters'] = 2 kwargs['kernel_size'] = 3 if test.is_gpu_available(): kwargs['fused'] = True self._run_test(kwargs, expected_output_shape) def test_conv1d_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv1d_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) @keras_parameterized.run_all_keras_modes class Conv2DTest(keras_parameterized.TestCase): def _run_test(self, kwargs, expected_output_shape): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Conv2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size), expected_output_shape=expected_output_shape) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}, (None, 5, 4, 2)), ('padding_same', {'padding': 'same'}, (None, 7, 6, 2)), ('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}, (None, 7, 6, 2)), ('strides', {'strides': (2, 2)}, (None, 3, 2, 2)), ('dilation_rate', {'dilation_rate': (2, 2)}, (None, 3, 2, 2)), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ('data_format', {'data_format': 'channels_first'}), ) def test_conv2d(self, kwargs, expected_output_shape=None): kwargs['filters'] = 2 kwargs['kernel_size'] = (3, 3) if test.is_gpu_available(): kwargs['fused'] = True if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True): self._run_test(kwargs, expected_output_shape) def test_conv2d_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv2d_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) @keras_parameterized.run_all_keras_modes class Conv3DTest(keras_parameterized.TestCase): def _run_test(self, kwargs, expected_output_shape): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 depth = 5 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Conv3D, kwargs=kwargs, input_shape=(num_samples, depth, num_row, num_col, stack_size), expected_output_shape=expected_output_shape) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}, (None, 3, 5, 4, 2)), ('padding_same', {'padding': 'same'}, (None, 5, 7, 6, 2)), ('strides', {'strides': (2, 2, 2)}, (None, 2, 3, 2, 2)), ('dilation_rate', {'dilation_rate': (2, 2, 2)}, (None, 1, 3, 2, 2)), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ('data_format', {'data_format': 'channels_first'}), ) def test_conv3d(self, kwargs, expected_output_shape=None): kwargs['filters'] = 2 kwargs['kernel_size'] = (3, 3, 3) if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True): self._run_test(kwargs, expected_output_shape) def test_conv3d_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv3D(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv3d_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv3D(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv3d_dynamic_shape(self): input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32) with self.cached_session(use_gpu=True): # Won't raise error here. testing_utils.layer_test( keras.layers.Conv3D, kwargs={ 'data_format': 'channels_last', 'filters': 3, 'kernel_size': 3 }, input_shape=(None, None, None, None, 3), input_data=input_data) if test.is_gpu_available(cuda_only=True): testing_utils.layer_test( keras.layers.Conv3D, kwargs={ 'data_format': 'channels_first', 'filters': 3, 'kernel_size': 3 }, input_shape=(None, 3, None, None, None), input_data=input_data) @keras_parameterized.run_all_keras_modes class ZeroPaddingTest(keras_parameterized.TestCase): def test_zero_padding_1d(self): num_samples = 2 input_dim = 2 num_steps = 5 shape = (num_samples, num_steps, input_dim) inputs = np.ones(shape) with self.cached_session(use_gpu=True): # basic test testing_utils.layer_test( keras.layers.ZeroPadding1D, kwargs={'padding': 2}, input_shape=inputs.shape) testing_utils.layer_test( keras.layers.ZeroPadding1D, kwargs={'padding': (1, 2)}, input_shape=inputs.shape) # correctness test layer = keras.layers.ZeroPadding1D(padding=2) layer.build(shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) for offset in [0, 1, -1, -2]: np.testing.assert_allclose(np_output[:, offset, :], 0.) np.testing.assert_allclose(np_output[:, 2:-2, :], 1.) layer = keras.layers.ZeroPadding1D(padding=(1, 2)) layer.build(shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) for left_offset in [0]: np.testing.assert_allclose(np_output[:, left_offset, :], 0.) for right_offset in [-1, -2]: np.testing.assert_allclose(np_output[:, right_offset, :], 0.) np.testing.assert_allclose(np_output[:, 1:-2, :], 1.) layer.get_config() # test incorrect use with self.assertRaises(ValueError): keras.layers.ZeroPadding1D(padding=(1, 1, 1)) with self.assertRaises(ValueError): keras.layers.ZeroPadding1D(padding=None) def test_zero_padding_2d(self): num_samples = 2 stack_size = 2 input_num_row = 4 input_num_col = 5 for data_format in ['channels_first', 'channels_last']: inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size)) inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col)) # basic test with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.ZeroPadding2D, kwargs={'padding': (2, 2), 'data_format': data_format}, input_shape=inputs.shape) testing_utils.layer_test( keras.layers.ZeroPadding2D, kwargs={'padding': ((1, 2), (3, 4)), 'data_format': data_format}, input_shape=inputs.shape) # correctness test with self.cached_session(use_gpu=True): layer = keras.layers.ZeroPadding2D( padding=(2, 2), data_format=data_format) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) if data_format == 'channels_last': for offset in [0, 1, -1, -2]: np.testing.assert_allclose(np_output[:, offset, :, :], 0.) np.testing.assert_allclose(np_output[:, :, offset, :], 0.) np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) elif data_format == 'channels_first': for offset in [0, 1, -1, -2]: np.testing.assert_allclose(np_output[:, :, offset, :], 0.) np.testing.assert_allclose(np_output[:, :, :, offset], 0.) np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) layer = keras.layers.ZeroPadding2D( padding=((1, 2), (3, 4)), data_format=data_format) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) if data_format == 'channels_last': for top_offset in [0]: np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.) for bottom_offset in [-1, -2]: np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.) for left_offset in [0, 1, 2]: np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.) for right_offset in [-1, -2, -3, -4]: np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.) np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.) elif data_format == 'channels_first': for top_offset in [0]: np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.) for bottom_offset in [-1, -2]: np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.) for left_offset in [0, 1, 2]: np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.) for right_offset in [-1, -2, -3, -4]: np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.) np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.) # test incorrect use with self.assertRaises(ValueError): keras.layers.ZeroPadding2D(padding=(1, 1, 1)) with self.assertRaises(ValueError): keras.layers.ZeroPadding2D(padding=None) def test_zero_padding_3d(self): num_samples = 2 stack_size = 2 input_len_dim1 = 4 input_len_dim2 = 5 input_len_dim3 = 3 inputs = np.ones((num_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size)) with self.cached_session(use_gpu=True): # basic test testing_utils.layer_test( keras.layers.ZeroPadding3D, kwargs={'padding': (2, 2, 2)}, input_shape=inputs.shape) # correctness test layer = keras.layers.ZeroPadding3D(padding=(2, 2, 2)) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) for offset in [0, 1, -1, -2]: np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.) np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.) np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.) np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.) # test incorrect use with self.assertRaises(ValueError): keras.layers.ZeroPadding3D(padding=(1, 1)) with self.assertRaises(ValueError): keras.layers.ZeroPadding3D(padding=None) @test_util.for_all_test_methods(test_util.disable_xla, 'align_corners=False not supported by XLA') @keras_parameterized.run_all_keras_modes class UpSamplingTest(keras_parameterized.TestCase): def test_upsampling_1d(self): with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4)) def test_upsampling_2d(self): num_samples = 2 stack_size = 2 input_num_row = 11 input_num_col = 12 for data_format in ['channels_first', 'channels_last']: if data_format == 'channels_first': inputs = np.random.rand(num_samples, stack_size, input_num_row, input_num_col) else: inputs = np.random.rand(num_samples, input_num_row, input_num_col, stack_size) # basic test with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.UpSampling2D, kwargs={'size': (2, 2), 'data_format': data_format}, input_shape=inputs.shape) for length_row in [2]: for length_col in [2, 3]: layer = keras.layers.UpSampling2D( size=(length_row, length_col), data_format=data_format) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) if data_format == 'channels_first': assert np_output.shape[2] == length_row * input_num_row assert np_output.shape[3] == length_col * input_num_col else: # tf assert np_output.shape[1] == length_row * input_num_row assert np_output.shape[2] == length_col * input_num_col # compare with numpy if data_format == 'channels_first': expected_out = np.repeat(inputs, length_row, axis=2) expected_out = np.repeat(expected_out, length_col, axis=3) else: # tf expected_out = np.repeat(inputs, length_row, axis=1) expected_out = np.repeat(expected_out, length_col, axis=2) np.testing.assert_allclose(np_output, expected_out) def test_upsampling_2d_bilinear(self): num_samples = 2 stack_size = 2 input_num_row = 11 input_num_col = 12 for data_format in ['channels_first', 'channels_last']: if data_format == 'channels_first': inputs = np.random.rand(num_samples, stack_size, input_num_row, input_num_col) else: inputs = np.random.rand(num_samples, input_num_row, input_num_col, stack_size) testing_utils.layer_test(keras.layers.UpSampling2D, kwargs={'size': (2, 2), 'data_format': data_format, 'interpolation': 'bilinear'}, input_shape=inputs.shape) if not context.executing_eagerly(): for length_row in [2]: for length_col in [2, 3]: layer = keras.layers.UpSampling2D( size=(length_row, length_col), data_format=data_format) layer.build(inputs.shape) outputs = layer(keras.backend.variable(inputs)) np_output = keras.backend.eval(outputs) if data_format == 'channels_first': self.assertEqual(np_output.shape[2], length_row * input_num_row) self.assertEqual(np_output.shape[3], length_col * input_num_col) else: self.assertEqual(np_output.shape[1], length_row * input_num_row) self.assertEqual(np_output.shape[2], length_col * input_num_col) def test_upsampling_3d(self): num_samples = 2 stack_size = 2 input_len_dim1 = 10 input_len_dim2 = 11 input_len_dim3 = 12 for data_format in ['channels_first', 'channels_last']: if data_format == 'channels_first': inputs = np.random.rand(num_samples, stack_size, input_len_dim1, input_len_dim2, input_len_dim3) else: inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size) # basic test with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.UpSampling3D, kwargs={'size': (2, 2, 2), 'data_format': data_format}, input_shape=inputs.shape) for length_dim1 in [2, 3]: for length_dim2 in [2]: for length_dim3 in [3]: layer = keras.layers.UpSampling3D( size=(length_dim1, length_dim2, length_dim3), data_format=data_format) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) if data_format == 'channels_first': assert np_output.shape[2] == length_dim1 * input_len_dim1 assert np_output.shape[3] == length_dim2 * input_len_dim2 assert np_output.shape[4] == length_dim3 * input_len_dim3 else: # tf assert np_output.shape[1] == length_dim1 * input_len_dim1 assert np_output.shape[2] == length_dim2 * input_len_dim2 assert np_output.shape[3] == length_dim3 * input_len_dim3 # compare with numpy if data_format == 'channels_first': expected_out = np.repeat(inputs, length_dim1, axis=2) expected_out = np.repeat(expected_out, length_dim2, axis=3) expected_out = np.repeat(expected_out, length_dim3, axis=4) else: # tf expected_out = np.repeat(inputs, length_dim1, axis=1) expected_out = np.repeat(expected_out, length_dim2, axis=2) expected_out = np.repeat(expected_out, length_dim3, axis=3) np.testing.assert_allclose(np_output, expected_out) @keras_parameterized.run_all_keras_modes class CroppingTest(keras_parameterized.TestCase): def test_cropping_1d(self): num_samples = 2 time_length = 4 input_len_dim1 = 2 inputs = np.random.rand(num_samples, time_length, input_len_dim1) with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Cropping1D, kwargs={'cropping': (2, 2)}, input_shape=inputs.shape) # test incorrect use with self.assertRaises(ValueError): keras.layers.Cropping1D(cropping=(1, 1, 1)) with self.assertRaises(ValueError): keras.layers.Cropping1D(cropping=None) def test_cropping_2d(self): num_samples = 2 stack_size = 2 input_len_dim1 = 9 input_len_dim2 = 9 cropping = ((2, 2), (3, 3)) for data_format in ['channels_first', 'channels_last']: if data_format == 'channels_first': inputs = np.random.rand(num_samples, stack_size, input_len_dim1, input_len_dim2) else: inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2, stack_size) with self.cached_session(use_gpu=True): # basic test testing_utils.layer_test( keras.layers.Cropping2D, kwargs={'cropping': cropping, 'data_format': data_format}, input_shape=inputs.shape) # correctness test layer = keras.layers.Cropping2D( cropping=cropping, data_format=data_format) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) # compare with numpy if data_format == 'channels_first': expected_out = inputs[:, :, cropping[0][0]:-cropping[0][1], cropping[ 1][0]:-cropping[1][1]] else: expected_out = inputs[:, cropping[0][0]:-cropping[0][1], cropping[1][ 0]:-cropping[1][1], :] np.testing.assert_allclose(np_output, expected_out) for data_format in ['channels_first', 'channels_last']: if data_format == 'channels_first': inputs = np.random.rand(num_samples, stack_size, input_len_dim1, input_len_dim2) else: inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2, stack_size) # another correctness test (no cropping) with self.cached_session(use_gpu=True): cropping = ((0, 0), (0, 0)) layer = keras.layers.Cropping2D( cropping=cropping, data_format=data_format) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) # compare with input np.testing.assert_allclose(np_output, inputs) # test incorrect use with self.assertRaises(ValueError): keras.layers.Cropping2D(cropping=(1, 1, 1)) with self.assertRaises(ValueError): keras.layers.Cropping2D(cropping=None) def test_cropping_3d(self): num_samples = 2 stack_size = 2 input_len_dim1 = 8 input_len_dim2 = 8 input_len_dim3 = 8 croppings = [((2, 2), (1, 1), (2, 3)), 3, (0, 1, 1)] for cropping in croppings: for data_format in ['channels_last', 'channels_first']: if data_format == 'channels_first': inputs = np.random.rand(num_samples, stack_size, input_len_dim1, input_len_dim2, input_len_dim3) else: inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size) # basic test with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Cropping3D, kwargs={'cropping': cropping, 'data_format': data_format}, input_shape=inputs.shape) if len(croppings) == 3 and len(croppings[0]) == 2: # correctness test with self.cached_session(use_gpu=True): layer = keras.layers.Cropping3D( cropping=cropping, data_format=data_format) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if context.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) # compare with numpy if data_format == 'channels_first': expected_out = inputs[:, :, cropping[0][0]:-cropping[0][1], cropping[1][0]:-cropping[1][1], cropping[2][0]:-cropping[2][1]] else: expected_out = inputs[:, cropping[0][0]:-cropping[0][1], cropping[1][0]:-cropping[1][1], cropping[2][0]:-cropping[2][1], :] np.testing.assert_allclose(np_output, expected_out) # test incorrect use with self.assertRaises(ValueError): keras.layers.Cropping3D(cropping=(1, 1)) with self.assertRaises(ValueError): keras.layers.Cropping3D(cropping=None) @keras_parameterized.run_all_keras_modes class DepthwiseConv2DTest(keras_parameterized.TestCase): def _run_test(self, kwargs): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.DepthwiseConv2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size)) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}), ('padding_same', {'padding': 'same'}), ('strides', {'strides': (2, 2)}), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ('data_format', {'data_format': 'channels_first'}), ('depth_multiplier_1', {'depth_multiplier': 1}), ('depth_multiplier_2', {'depth_multiplier': 2}), ) def test_depthwise_conv2d(self, kwargs): kwargs['kernel_size'] = (3, 3) if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True): self._run_test(kwargs) def test_depthwise_conv2d_full(self): kwargs = { 'kernel_size': 3, 'padding': 'valid', 'data_format': 'channels_last', 'activation': None, 'depthwise_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'depthwise_constraint': 'unit_norm', 'use_bias': True, 'strides': (2, 2), 'depth_multiplier': 1, } self._run_test(kwargs) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/convolutional_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for layer serialization utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python import keras from tensorflow.python import tf2 from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras.layers import normalization as batchnorm_v1 from tensorflow.python.keras.layers import normalization_v2 as batchnorm_v2 from tensorflow.python.keras.layers import recurrent as rnn_v1 from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2 from tensorflow.python.platform import test @tf_test_util.run_all_in_graph_and_eager_modes class LayerSerializationTest(parameterized.TestCase, test.TestCase): def test_serialize_deserialize(self): layer = keras.layers.Dense( 3, activation='relu', kernel_initializer='ones', bias_regularizer='l2') config = keras.layers.serialize(layer) new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.activation, keras.activations.relu) self.assertEqual(new_layer.bias_regularizer.__class__, keras.regularizers.L1L2) if tf2.enabled(): self.assertEqual(new_layer.kernel_initializer.__class__, keras.initializers.OnesV2) else: self.assertEqual(new_layer.kernel_initializer.__class__, keras.initializers.Ones) self.assertEqual(new_layer.units, 3) @parameterized.parameters( [batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization]) def test_serialize_deserialize_batchnorm(self, batchnorm_layer): layer = batchnorm_layer( momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2') config = keras.layers.serialize(layer) self.assertEqual(config['class_name'], 'BatchNormalization') new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.momentum, 0.9) if tf2.enabled(): self.assertIsInstance(new_layer, batchnorm_v2.BatchNormalization) self.assertEqual(new_layer.beta_initializer.__class__, keras.initializers.ZerosV2) else: self.assertIsInstance(new_layer, batchnorm_v1.BatchNormalization) self.assertEqual(new_layer.beta_initializer.__class__, keras.initializers.Zeros) self.assertEqual(new_layer.gamma_regularizer.__class__, keras.regularizers.L1L2) @parameterized.parameters( [batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization]) def test_deserialize_batchnorm_backwards_compatiblity(self, batchnorm_layer): layer = batchnorm_layer( momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2') config = keras.layers.serialize(layer) # To simulate if BatchNormalizationV1 or BatchNormalizationV2 appears in the # saved model. if batchnorm_layer is batchnorm_v1.BatchNormalization: config['class_name'] = 'BatchNormalizationV1' else: config['class_name'] = 'BatchNormalizationV2' new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.momentum, 0.9) if tf2.enabled(): self.assertIsInstance(new_layer, batchnorm_v2.BatchNormalization) self.assertEqual(new_layer.beta_initializer.__class__, keras.initializers.ZerosV2) else: self.assertIsInstance(new_layer, batchnorm_v1.BatchNormalization) self.assertEqual(new_layer.beta_initializer.__class__, keras.initializers.Zeros) self.assertEqual(new_layer.gamma_regularizer.__class__, keras.regularizers.L1L2) @parameterized.parameters([rnn_v1.LSTM, rnn_v2.LSTM]) def test_serialize_deserialize_lstm(self, layer): lstm = layer(5, return_sequences=True) config = keras.layers.serialize(lstm) self.assertEqual(config['class_name'], 'LSTM') new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.units, 5) self.assertEqual(new_layer.return_sequences, True) if tf2.enabled(): self.assertIsInstance(new_layer, rnn_v2.LSTM) else: self.assertIsInstance(new_layer, rnn_v1.LSTM) self.assertNotIsInstance(new_layer, rnn_v2.LSTM) @parameterized.parameters([rnn_v1.GRU, rnn_v2.GRU]) def test_serialize_deserialize_gru(self, layer): gru = layer(5, return_sequences=True) config = keras.layers.serialize(gru) self.assertEqual(config['class_name'], 'GRU') new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.units, 5) self.assertEqual(new_layer.return_sequences, True) if tf2.enabled(): self.assertIsInstance(new_layer, rnn_v2.GRU) else: self.assertIsInstance(new_layer, rnn_v1.GRU) self.assertNotIsInstance(new_layer, rnn_v2.GRU) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/serialization_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layer serialization/deserialization functions. """ # pylint: disable=wildcard-import # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import tf2 from tensorflow.python.keras.engine.base_layer import AddLoss from tensorflow.python.keras.engine.base_layer import AddMetric from tensorflow.python.keras.engine.base_layer import TensorFlowOpLayer from tensorflow.python.keras.engine.input_layer import Input from tensorflow.python.keras.engine.input_layer import InputLayer from tensorflow.python.keras.layers.advanced_activations import * from tensorflow.python.keras.layers.convolutional import * from tensorflow.python.keras.layers.convolutional_recurrent import * from tensorflow.python.keras.layers.core import * from tensorflow.python.keras.layers.cudnn_recurrent import * from tensorflow.python.keras.layers.dense_attention import * from tensorflow.python.keras.layers.embeddings import * from tensorflow.python.keras.layers.local import * from tensorflow.python.keras.layers.merge import * from tensorflow.python.keras.layers.noise import * from tensorflow.python.keras.layers.normalization import * from tensorflow.python.keras.layers.pooling import * from tensorflow.python.keras.layers.recurrent import * from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import * from tensorflow.python.keras.layers.wrappers import * from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object from tensorflow.python.util.tf_export import keras_export if tf2.enabled(): from tensorflow.python.keras.layers.normalization_v2 import * # pylint: disable=g-import-not-at-top from tensorflow.python.keras.layers.recurrent_v2 import * # pylint: disable=g-import-not-at-top # This deserialization table is added for backward compatibility, as in TF 1.13, # BatchNormalizationV1 and BatchNormalizationV2 are used as class name for v1 # and v2 version of BatchNormalization, respectively. Here we explicitly convert # them to the canonical name in the config of deserialization. _DESERIALIZATION_TABLE = { 'BatchNormalizationV1': 'BatchNormalization', 'BatchNormalizationV2': 'BatchNormalization', } @keras_export('keras.layers.serialize') def serialize(layer): return {'class_name': layer.__class__.__name__, 'config': layer.get_config()} @keras_export('keras.layers.deserialize') def deserialize(config, custom_objects=None): """Instantiates a layer from a config dictionary. Arguments: config: dict of the form {'class_name': str, 'config': dict} custom_objects: dict mapping class names (or function names) of custom (non-Keras) objects to class/functions Returns: Layer instance (may be Model, Sequential, Network, Layer...) """ # Prevent circular dependencies. from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top from tensorflow.python.keras.premade.linear import LinearModel # pylint: disable=g-import-not-at-top from tensorflow.python.keras.premade.wide_deep import WideDeepModel # pylint: disable=g-import-not-at-top from tensorflow.python.feature_column import dense_features # pylint: disable=g-import-not-at-top from tensorflow.python.feature_column import sequence_feature_column as sfc # pylint: disable=g-import-not-at-top globs = globals() # All layers. globs['Network'] = models.Network globs['Model'] = models.Model globs['Sequential'] = models.Sequential globs['LinearModel'] = LinearModel globs['WideDeepModel'] = WideDeepModel # Prevent circular dependencies with FeatureColumn serialization. globs['DenseFeatures'] = dense_features.DenseFeatures globs['SequenceFeatures'] = sfc.SequenceFeatures layer_class_name = config['class_name'] if layer_class_name in _DESERIALIZATION_TABLE: config['class_name'] = _DESERIALIZATION_TABLE[layer_class_name] return deserialize_keras_object( config, module_objects=globs, custom_objects=custom_objects, printable_module_name='layer')
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/serialization.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests dense attention layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import test_util from tensorflow.python.keras.layers import core from tensorflow.python.keras.layers import dense_attention from tensorflow.python.ops import array_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class BaseDenseAttentionTest(test.TestCase): def test_one_dim_with_mask(self): # Scores tensor of shape [1, 1, 1] scores = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 1, 1] v = np.array([[[1.6]]], dtype=np.float32) # Scores mask tensor of shape [1, 1, 1] scores_mask = np.array([[[True]]], dtype=np.bool_) actual = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v, scores_mask=scores_mask) # Expected tensor of shape [1, 1, 1]. # expected000 = softmax(scores)[0, 0] * 1.6 = 1.6 expected = np.array([[[1.6]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_one_dim_no_mask(self): # Scores tensor of shape [1, 1, 1] scores = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 1, 1] v = np.array([[[1.6]]], dtype=np.float32) actual = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v) # Expected tensor of shape [1, 1, 1]. # expected000 = softmax(scores)[0, 0] * 1.6 = 1.6 expected = np.array([[[1.6]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_mask(self): # Scores tensor of shape [1, 1, 3] scores = np.array([[[1., 0., 1.]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Scores mask tensor of shape [1, 1, 3] scores_mask = np.array([[[True, True, False]]], dtype=np.bool_) actual = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v, scores_mask=scores_mask) # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863 # attention_distribution001 = exp(0)/(exp(1) + exp(0)) = 0.26894142137 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.73105857863 * 1.6 + 0.26894142137 * 0.7 - 0 * 0.8 # = 1.35795272077 expected = np.array([[[1.35795272077]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_no_mask(self): # Scores tensor of shape [1, 1, 3] scores = np.array([[[1., 0., 1.]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) actual = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v) # Expected attention distribution = softmax(scores). # => attention_distribution000 = exp(1)/(exp(1) + exp(0) + exp(1)) # = 0.42231879825 # attention_distribution001 = exp(0)/(exp(1) + exp(0) + exp(1)) # = 0.15536240349 # attention_distribution002 = exp(1)/(exp(1) + exp(0) + exp(1)) # = 0.42231879825 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.42231879825 * 1.6 + 0.15536240349 * 0.7 # - 0.42231879825 * 0.8 # = 0.44660872104 expected = np.array([[[0.44660872104]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_one_dim_batch_size_two(self): # Scores tensor of shape [2, 1, 1] scores = np.array([[[1.1]], [[2.1]]], dtype=np.float32) # Value tensor of shape [2, 1, 1] v = np.array([[[1.6]], [[2.6]]], dtype=np.float32) # Scpres mask tensor of shape [2, 1, 1] scores_mask = np.array([[[True]], [[True]]], dtype=np.bool_) actual = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v, scores_mask=scores_mask) # Expected tensor of shape [2, 1, 1]. # expected000 = softmax(scores)[0, 0] * 1.6 = 1.6 # expected100 = softmax(scores)[1, 0] * 2.6 = 2.6 expected = np.array([[[1.6]], [[2.6]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_serialization(self): # Test serialization with causal layer = dense_attention.BaseDenseAttention(causal=True) config = keras.layers.serialize(layer) new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.causal, True) config = layer.get_config() new_layer = dense_attention.BaseDenseAttention.from_config(config) self.assertEqual(new_layer.causal, True) @test_util.run_all_in_graph_and_eager_modes class AttentionTest(test.TestCase): def test_calculate_scores_one_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = dense_attention.Attention() attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = 1.1*1.6 = 1.76 expected = np.array([[[1.76]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_multi_dim(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 4] k = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) attention_layer = dense_attention.Attention() attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 2, 3]. # expected000 = 1.*1.5+1.1*1.6+1.2*1.7+1.3*1.8 = 7.64 # expected001 = 1.*2.5+1.1*2.6+1.2*2.7+1.3*2.8 = 12.24 # expected002 = 1.*3.5+1.1*3.6+1.2*3.7+1.3*3.8 = 16.84 # expected010 = 2.*1.5+2.1*1.6+2.2*1.7+2.3*1.8 = 14.24 # expected011 = 2.*2.5+2.1*2.6+2.2*2.7+2.3*2.8 = 22.84 # expected012 = 2.*3.5+2.1*3.6+2.2*3.7+2.3*3.8 = 31.44 expected = np.array( [[[7.64, 12.24, 16.84], [14.24, 22.84, 31.44]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_batch_size_two(self): # Query tensor of shape [2, 1, 1] q = np.array([[[1.1]], [[2.1]]], dtype=np.float32) # Key tensor of shape [2, 1, 1] k = np.array([[[1.6]], [[2.6]]], dtype=np.float32) attention_layer = dense_attention.Attention() attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [2, 1, 1]. # expected000 = 1.1*1.6 = 1.76 # expected100 = 2.1*2.6 = 5.46 expected = np.array([[[1.76]], [[5.46]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_with_scale(self): """Tests that scores are multiplied by scale.""" # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = dense_attention.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) attention_layer.scale = -2. actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = -2*1.1*1.6 = -3.52 expected = np.array([[[-3.52]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_shape(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_shape_with_key(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Key tensor of shape [1, 3, 4] k = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v, k], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_multi_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v], mask=[None, v_mask]) # Expected scores of shape [1, 1, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8 # = 1.3561791301 expected = np.array([[[1.3561791301]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_key(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 1] k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v, k], mask=[None, v_mask]) # Expected scores of shape [1, 1, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.72908792234 * 0.5 + 0.27091207765 * 0.8 - 0 * 0.3 # = 0.58127362329 expected = np.array([[[0.58127362329]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_query_mask(self): # Query tensor of shape [1, 2, 1] q = np.array([[[1.1], [-0.5]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Query mask tensor of shape [1, 2] q_mask = np.array([[True, False]], dtype=np.bool_) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v], mask=[q_mask, v_mask]) # Expected scores of shape [1, 2, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8], [-0.5*1.6, -0.5*0.7, 0.5*0.8]]] # = [[[1.76, 0.77, -0.88], [-0.8, -0.35, 0.4]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # => attention_distribution010 = exp(-0.8)/(exp(-0.8) + exp(-0.35)) # = 0.38936076605 # attention_distribution011 = exp(-0.35)/(exp(-0.8) + exp(-0.35)) # = 0.61063923394 # attention_distribution012 = 0 # # Expected tensor of shape [1, 2, 1] with zeros where q_mask == False. # expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8 # = 1.3561791301 # expected000 = 0 expected = np.array([[[1.3561791301], [0.]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_scale_None(self): """Tests that scale is None by default.""" attention_layer = dense_attention.Attention() attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) self.assertIsNone(attention_layer.scale) def test_scale_init_eager(self): """Tests that scale initializes to 1 when use_scale=True.""" with context.eager_mode(): attention_layer = dense_attention.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) self.assertAllClose(1., attention_layer.scale.value()) @test_util.deprecated_graph_mode_only def test_scale_init_graph(self): """Tests that scale initializes to 1 when use_scale=True.""" with self.cached_session() as sess: attention_layer = dense_attention.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) sess.run(attention_layer.scale.initializer) self.assertAllClose(1., attention_layer.scale.value()) def test_self_attention_causal(self): # Query-value tensor of shape [1, 3, 1] q = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) attention_layer = dense_attention.Attention(causal=True) actual = attention_layer([q, q]) # Expected scores of shape [1, 3, 3] # scores = [[0.25, 0.4, -0.15], [0.4, 0.64, -0.24], [-0.15, -0.24, 0.09]] # Expected attention distribution = softmax(scores) lower triangular # => attention_distribution00 = [1., 0., 0.] # attention_distribution01 # = [exp(0.4), exp(0.64), 0.] / (exp(0.4) + exp(0.64)) # = [0.44028635073, 0.55971364926, 0.] # attention_distribution02 # = [exp(-0.15), exp(-0.24), exp(0.09)] # / (exp(-0.15) + exp(-0.24) + exp(0.09)) # = [0.31395396638, 0.28693232061, 0.399113713] # # Expected tensor of shape [1, 3, 1]. # expected000 = 0.5 # expected010 = 0.44028635073 * 0.5 + 0.55971364926 * 0.8 # = 0.66791409477 # expected020 = 0.31395396638 * 0.5 +0.28693232061 * 0.8 -0.399113713 * 0.3 # = 0.26678872577 expected = np.array( [[[0.5], [0.66791409477], [0.26678872577]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_inputs_not_list(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegexp( ValueError, 'Attention layer must be called on a list of inputs'): attention_layer(q) def test_inputs_too_short(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegexp( ValueError, 'Attention layer accepts inputs list of length 2 or 3'): attention_layer([q]) def test_inputs_too_long(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegexp( ValueError, 'Attention layer accepts inputs list of length 2 or 3'): attention_layer([q, q, q, q]) def test_mask_not_list(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegexp( ValueError, 'Attention layer mask must be a list'): attention_layer([q, q], mask=mask) def test_mask_too_short(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegexp( ValueError, 'Attention layer mask must be a list of length 2'): attention_layer([q, q], mask=[mask]) def test_mask_too_long(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegexp( ValueError, 'Attention layer mask must be a list of length 2'): attention_layer([q, q], mask=[mask, mask, mask]) def test_override_mask(self): attention_layer = dense_attention.Attention() q = core.Masking()(np.array([[[1.1]]], dtype=np.float32)) mask = np.array([[False]], dtype=np.bool_) actual = attention_layer([q, q], mask=[mask, mask]) self.assertAllClose([[[0]]], actual) def test_implicit_mask(self): attention_layer = dense_attention.Attention() q = core.Masking(1.1)(np.array([[[1.1], [1]]], dtype=np.float32)) v = core.Masking(1.2)(np.array([[[1.2], [1]]], dtype=np.float32)) actual = attention_layer([q, v]) self.assertAllClose([[[0], [1]]], actual) def test_serialization(self): # Test serialization with use_scale layer = dense_attention.Attention(use_scale=True) config = keras.layers.serialize(layer) new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.use_scale, True) config = layer.get_config() new_layer = dense_attention.Attention.from_config(config) self.assertEqual(new_layer.use_scale, True) @test_util.run_all_in_graph_and_eager_modes class AdditiveAttentionTest(test.TestCase): def test_calculate_scores_one_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683 expected = np.array([[[0.49550372683]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_multi_dim(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 4] k = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4])) # Scale tensor of shape [4] attention_layer.scale = np.array([[[0.5, 0.6, 0.7, 0.8]]], dtype=np.float32) actual = attention_layer._calculate_scores(query=q, key=k) # pylint:disable=line-too-long # expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581 # expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449 # expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652 # expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449 # expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652 # expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916 # pylint:enable=line-too-long expected = np.array( [[[2.58044532581, 2.59734317449, 2.59964024652], [2.59734317449, 2.59964024652, 2.59995130916]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_batch_size_two(self): # Query tensor of shape [2, 1, 1] q = np.array([[[1.1]], [[2.1]]], dtype=np.float32) # Key tensor of shape [2, 1, 1] k = np.array([[[1.6]], [[2.6]]], dtype=np.float32) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [2, 1, 1]. # expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683 # expected100 = 0.5 * tanh(2.1 + 2.6) = 0.49991728277 expected = np.array( [[[0.49550372683]], [[0.49991728277]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_shape(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() actual = attention_layer([q, v], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_shape_no_scale(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention(use_scale=False) actual = attention_layer([q, v], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_shape_with_key(self): # Query tensor of shape [1, 2, 4] q = np.array( [[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Key tensor of shape [1, 3, 4] k = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() actual = attention_layer([q, v, k], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_multi_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer([q, v], mask=[None, v_mask]) # pylint:disable=line-too-long # Expected scores of shape [1, 1, 3] # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]] # = [[[0.49550372683, 0.47340300642, 0.14565630622]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.50552495521 # attention_distribution001 # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.49447504478 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8 # = 1.15497245968 # pylint:enable=line-too-long expected = np.array([[[1.15497245968]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_key(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 1] k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer([q, v, k], mask=[None, v_mask]) # pylint:disable=line-too-long # Expected scores of shape [1, 1, 3] # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]] # = [[[0.49550372683, 0.47340300642, 0.14565630622]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.50552495521 # attention_distribution001 # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.49447504478 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.50552495521 * 0.5 + 0.49447504478 * 0.8 - 0 * 0.3 # = 0.64834251342 # pylint:enable=line-too-long expected = np.array([[[0.64834251342]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_query_mask(self): # Query tensor of shape [1, 2, 1] q = np.array([[[1.1], [-0.5]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Query mask tensor of shape [1, 2] q_mask = np.array([[True, False]], dtype=np.bool_) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer([q, v], mask=[q_mask, v_mask]) # pylint:disable=line-too-long # Expected scores of shape [1, 2, 3] # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)], # [0.5 * tanh(-0.5 + 1.6), 0.5 * tanh(-0.5 + 0.7), 0.5 * tanh(-0.5 - 0.8)]]] # = [[[0.49550372683, 0.47340300642, 0.14565630622], # [0.40024951088, 0.09868766011, -0.43086157965]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.50552495521 # attention_distribution001 # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.49447504478 # attention_distribution002 = 0 # => attention_distribution010 # = exp(0.40024951088)/(exp(0.40024951088) + exp(0.09868766011)) # = 0.57482427975 # attention_distribution011 # = exp(0.09868766011)/(exp(0.40024951088) + exp(0.09868766011)) # = 0.42517572025 # attention_distribution012 = 0 # # Expected tensor of shape [1, 2, 1] with zeros where q_mask == False. # expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8 # = 1.15497245968 # expected000 = 0 # pylint:enable=line-too-long expected = np.array([[[1.15497245968], [0.]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_serialization(self): # Test serialization with use_scale layer = dense_attention.AdditiveAttention(use_scale=True) config = keras.layers.serialize(layer) new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.use_scale, True) config = layer.get_config() new_layer = dense_attention.AdditiveAttention.from_config(config) self.assertEqual(new_layer.use_scale, True) @test_util.run_all_in_graph_and_eager_modes class LowerTriangularMaskTest(test.TestCase): def test_square_shape(self): actual = dense_attention._lower_triangular_mask([3, 3]) expected = np.array( [[True, False, False], [True, True, False], [True, True, True]], dtype=np.bool_) self.assertAllEqual(expected, actual) def test_orthogonal_shape(self): actual = dense_attention._lower_triangular_mask([3, 2]) expected = np.array( [[True, False], [True, True], [True, True]], dtype=np.bool_) self.assertAllEqual(expected, actual) def test_three_dim(self): actual = dense_attention._lower_triangular_mask([1, 3, 3]) expected = np.array( [[[True, False, False], [True, True, False], [True, True, True]]], dtype=np.bool_) self.assertAllEqual(expected, actual) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/dense_attention_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Convolutional-recurrent layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras import activations from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.layers.recurrent import _standardize_args from tensorflow.python.keras.layers.recurrent import DropoutRNNCellMixin from tensorflow.python.keras.layers.recurrent import RNN from tensorflow.python.keras.utils import conv_utils from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.util.tf_export import keras_export class ConvRNN2D(RNN): """Base class for convolutional-recurrent layers. Arguments: cell: A RNN cell instance. A RNN cell is a class that has: - a `call(input_at_t, states_at_t)` method, returning `(output_at_t, states_at_t_plus_1)`. The call method of the cell can also take the optional argument `constants`, see section "Note on passing external constants" below. - a `state_size` attribute. This can be a single integer (single state) in which case it is the number of channels of the recurrent state (which should be the same as the number of channels of the cell output). This can also be a list/tuple of integers (one size per state). In this case, the first entry (`state_size[0]`) should be the same as the size of the cell output. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. input_shape: Use this argument to specify the shape of the input when this layer is the first one in a model. Call arguments: inputs: A 5D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is for use with cells that use dropout. initial_state: List of initial state tensors to be passed to the first call of the cell. constants: List of constant tensors to be passed to the cell at each timestep. Input shape: 5D tensor with shape: `(samples, timesteps, channels, rows, cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, rows, cols, channels)` if data_format='channels_last'. Output shape: - If `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. - If `return_sequences`: 5D tensor with shape: `(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'. - Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. Masking: This layer supports masking for input data with a variable number of timesteps. Note on using statefulness in RNNs: You can set RNN layers to be 'stateful', which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches. To enable statefulness: - Specify `stateful=True` in the layer constructor. - Specify a fixed batch size for your model, by passing - If sequential model: `batch_input_shape=(...)` to the first layer in your model. - If functional model with 1 or more Input layers: `batch_shape=(...)` to all the first layers in your model. This is the expected shape of your inputs *including the batch size*. It should be a tuple of integers, e.g. `(32, 10, 100, 100, 32)`. Note that the number of rows and columns should be specified too. - Specify `shuffle=False` when calling fit(). To reset the states of your model, call `.reset_states()` on either a specific layer, or on your entire model. Note on specifying the initial state of RNNs: You can specify the initial state of RNN layers symbolically by calling them with the keyword argument `initial_state`. The value of `initial_state` should be a tensor or list of tensors representing the initial state of the RNN layer. You can specify the initial state of RNN layers numerically by calling `reset_states` with the keyword argument `states`. The value of `states` should be a numpy array or list of numpy arrays representing the initial state of the RNN layer. Note on passing external constants to RNNs: You can pass "external" constants to the cell using the `constants` keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This requires that the `cell.call` method accepts the same keyword argument `constants`. Such constants can be used to condition the cell transformation on additional static inputs (not changing over time), a.k.a. an attention mechanism. """ def __init__(self, cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, **kwargs): if unroll: raise TypeError('Unrolling isn\'t possible with ' 'convolutional RNNs.') if isinstance(cell, (list, tuple)): # The StackedConvRNN2DCells isn't implemented yet. raise TypeError('It is not possible at the moment to' 'stack convolutional cells.') super(ConvRNN2D, self).__init__(cell, return_sequences, return_state, go_backwards, stateful, unroll, **kwargs) self.input_spec = [InputSpec(ndim=5)] self.states = None self._num_constants = None @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if isinstance(input_shape, list): input_shape = input_shape[0] cell = self.cell if cell.data_format == 'channels_first': rows = input_shape[3] cols = input_shape[4] elif cell.data_format == 'channels_last': rows = input_shape[2] cols = input_shape[3] rows = conv_utils.conv_output_length(rows, cell.kernel_size[0], padding=cell.padding, stride=cell.strides[0], dilation=cell.dilation_rate[0]) cols = conv_utils.conv_output_length(cols, cell.kernel_size[1], padding=cell.padding, stride=cell.strides[1], dilation=cell.dilation_rate[1]) if cell.data_format == 'channels_first': output_shape = input_shape[:2] + (cell.filters, rows, cols) elif cell.data_format == 'channels_last': output_shape = input_shape[:2] + (rows, cols, cell.filters) if not self.return_sequences: output_shape = output_shape[:1] + output_shape[2:] if self.return_state: output_shape = [output_shape] if cell.data_format == 'channels_first': output_shape += [(input_shape[0], cell.filters, rows, cols) for _ in range(2)] elif cell.data_format == 'channels_last': output_shape += [(input_shape[0], rows, cols, cell.filters) for _ in range(2)] return output_shape @tf_utils.shape_type_conversion def build(self, input_shape): # Note input_shape will be list of shapes of initial states and # constants if these are passed in __call__. if self._num_constants is not None: constants_shape = input_shape[-self._num_constants:] # pylint: disable=E1130 else: constants_shape = None if isinstance(input_shape, list): input_shape = input_shape[0] batch_size = input_shape[0] if self.stateful else None self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5]) # allow cell (if layer) to build before we set or validate state_spec if isinstance(self.cell, Layer): step_input_shape = (input_shape[0],) + input_shape[2:] if constants_shape is not None: self.cell.build([step_input_shape] + constants_shape) else: self.cell.build(step_input_shape) # set or validate state_spec if hasattr(self.cell.state_size, '__len__'): state_size = list(self.cell.state_size) else: state_size = [self.cell.state_size] if self.state_spec is not None: # initial_state was passed in call, check compatibility if self.cell.data_format == 'channels_first': ch_dim = 1 elif self.cell.data_format == 'channels_last': ch_dim = 3 if [spec.shape[ch_dim] for spec in self.state_spec] != state_size: raise ValueError( 'An initial_state was passed that is not compatible with ' '`cell.state_size`. Received `state_spec`={}; ' 'However `cell.state_size` is ' '{}'.format([spec.shape for spec in self.state_spec], self.cell.state_size)) else: if self.cell.data_format == 'channels_first': self.state_spec = [InputSpec(shape=(None, dim, None, None)) for dim in state_size] elif self.cell.data_format == 'channels_last': self.state_spec = [InputSpec(shape=(None, None, None, dim)) for dim in state_size] if self.stateful: self.reset_states() self.built = True def get_initial_state(self, inputs): # (samples, timesteps, rows, cols, filters) initial_state = K.zeros_like(inputs) # (samples, rows, cols, filters) initial_state = K.sum(initial_state, axis=1) shape = list(self.cell.kernel_shape) shape[-1] = self.cell.filters initial_state = self.cell.input_conv(initial_state, array_ops.zeros(tuple(shape)), padding=self.cell.padding) if hasattr(self.cell.state_size, '__len__'): return [initial_state for _ in self.cell.state_size] else: return [initial_state] def __call__(self, inputs, initial_state=None, constants=None, **kwargs): inputs, initial_state, constants = _standardize_args( inputs, initial_state, constants, self._num_constants) if initial_state is None and constants is None: return super(ConvRNN2D, self).__call__(inputs, **kwargs) # If any of `initial_state` or `constants` are specified and are Keras # tensors, then add them to the inputs and temporarily modify the # input_spec to include them. additional_inputs = [] additional_specs = [] if initial_state is not None: kwargs['initial_state'] = initial_state additional_inputs += initial_state self.state_spec = [] for state in initial_state: shape = K.int_shape(state) self.state_spec.append(InputSpec(shape=shape)) additional_specs += self.state_spec if constants is not None: kwargs['constants'] = constants additional_inputs += constants self.constants_spec = [InputSpec(shape=K.int_shape(constant)) for constant in constants] self._num_constants = len(constants) additional_specs += self.constants_spec # at this point additional_inputs cannot be empty for tensor in additional_inputs: if K.is_keras_tensor(tensor) != K.is_keras_tensor(additional_inputs[0]): raise ValueError('The initial state or constants of an RNN' ' layer cannot be specified with a mix of' ' Keras tensors and non-Keras tensors') if K.is_keras_tensor(additional_inputs[0]): # Compute the full input spec, including state and constants full_input = [inputs] + additional_inputs full_input_spec = self.input_spec + additional_specs # Perform the call with temporarily replaced input_spec original_input_spec = self.input_spec self.input_spec = full_input_spec output = super(ConvRNN2D, self).__call__(full_input, **kwargs) self.input_spec = original_input_spec return output else: return super(ConvRNN2D, self).__call__(inputs, **kwargs) def call(self, inputs, mask=None, training=None, initial_state=None, constants=None): # note that the .build() method of subclasses MUST define # self.input_spec and self.state_spec with complete input shapes. if isinstance(inputs, list): inputs = inputs[0] if initial_state is not None: pass elif self.stateful: initial_state = self.states else: initial_state = self.get_initial_state(inputs) if isinstance(mask, list): mask = mask[0] if len(initial_state) != len(self.states): raise ValueError('Layer has ' + str(len(self.states)) + ' states but was passed ' + str(len(initial_state)) + ' initial states.') timesteps = K.int_shape(inputs)[1] kwargs = {} if generic_utils.has_arg(self.cell.call, 'training'): kwargs['training'] = training if constants: if not generic_utils.has_arg(self.cell.call, 'constants'): raise ValueError('RNN cell does not support constants') def step(inputs, states): constants = states[-self._num_constants:] states = states[:-self._num_constants] return self.cell.call(inputs, states, constants=constants, **kwargs) else: def step(inputs, states): return self.cell.call(inputs, states, **kwargs) last_output, outputs, states = K.rnn(step, inputs, initial_state, constants=constants, go_backwards=self.go_backwards, mask=mask, input_length=timesteps) if self.stateful: updates = [] for i in range(len(states)): updates.append(K.update(self.states[i], states[i])) self.add_update(updates) if self.return_sequences: output = outputs else: output = last_output if self.return_state: if not isinstance(states, (list, tuple)): states = [states] else: states = list(states) return [output] + states else: return output def reset_states(self, states=None): if not self.stateful: raise AttributeError('Layer must be stateful.') input_shape = self.input_spec[0].shape state_shape = self.compute_output_shape(input_shape) if self.return_state: state_shape = state_shape[0] if self.return_sequences: state_shape = state_shape[:1].concatenate(state_shape[2:]) if None in state_shape: raise ValueError('If a RNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: \n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer.\n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.\n' 'The same thing goes for the number of rows and ' 'columns.') # helper function def get_tuple_shape(nb_channels): result = list(state_shape) if self.cell.data_format == 'channels_first': result[1] = nb_channels elif self.cell.data_format == 'channels_last': result[3] = nb_channels else: raise KeyError return tuple(result) # initialize state if None if self.states[0] is None: if hasattr(self.cell.state_size, '__len__'): self.states = [K.zeros(get_tuple_shape(dim)) for dim in self.cell.state_size] else: self.states = [K.zeros(get_tuple_shape(self.cell.state_size))] elif states is None: if hasattr(self.cell.state_size, '__len__'): for state, dim in zip(self.states, self.cell.state_size): K.set_value(state, np.zeros(get_tuple_shape(dim))) else: K.set_value(self.states[0], np.zeros(get_tuple_shape(self.cell.state_size))) else: if not isinstance(states, (list, tuple)): states = [states] if len(states) != len(self.states): raise ValueError('Layer ' + self.name + ' expects ' + str(len(self.states)) + ' states, ' + 'but it received ' + str(len(states)) + ' state values. Input received: ' + str(states)) for index, (value, state) in enumerate(zip(states, self.states)): if hasattr(self.cell.state_size, '__len__'): dim = self.cell.state_size[index] else: dim = self.cell.state_size if value.shape != get_tuple_shape(dim): raise ValueError('State ' + str(index) + ' is incompatible with layer ' + self.name + ': expected shape=' + str(get_tuple_shape(dim)) + ', found shape=' + str(value.shape)) # TODO(anjalisridhar): consider batch calls to `set_value`. K.set_value(state, value) class ConvLSTM2DCell(DropoutRNNCellMixin, Layer): """Cell class for the ConvLSTM2D layer. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.] (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Call arguments: inputs: A 4D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., **kwargs): super(ConvLSTM2DCell, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate') self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.unit_forget_bias = unit_forget_bias self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.state_size = (self.filters, self.filters) def build(self, input_shape): if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis] kernel_shape = self.kernel_size + (input_dim, self.filters * 4) self.kernel_shape = kernel_shape recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4) self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.recurrent_kernel = self.add_weight( shape=recurrent_kernel_shape, initializer=self.recurrent_initializer, name='recurrent_kernel', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) if self.use_bias: if self.unit_forget_bias: def bias_initializer(_, *args, **kwargs): return K.concatenate([ self.bias_initializer((self.filters,), *args, **kwargs), initializers.Ones()((self.filters,), *args, **kwargs), self.bias_initializer((self.filters * 2,), *args, **kwargs), ]) else: bias_initializer = self.bias_initializer self.bias = self.add_weight( shape=(self.filters * 4,), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None self.built = True def call(self, inputs, states, training=None): h_tm1 = states[0] # previous memory state c_tm1 = states[1] # previous carry state # dropout matrices for input units dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4) # dropout matrices for recurrent units rec_dp_mask = self.get_recurrent_dropout_mask_for_cell( h_tm1, training, count=4) if 0 < self.dropout < 1.: inputs_i = inputs * dp_mask[0] inputs_f = inputs * dp_mask[1] inputs_c = inputs * dp_mask[2] inputs_o = inputs * dp_mask[3] else: inputs_i = inputs inputs_f = inputs inputs_c = inputs inputs_o = inputs if 0 < self.recurrent_dropout < 1.: h_tm1_i = h_tm1 * rec_dp_mask[0] h_tm1_f = h_tm1 * rec_dp_mask[1] h_tm1_c = h_tm1 * rec_dp_mask[2] h_tm1_o = h_tm1 * rec_dp_mask[3] else: h_tm1_i = h_tm1 h_tm1_f = h_tm1 h_tm1_c = h_tm1 h_tm1_o = h_tm1 (kernel_i, kernel_f, kernel_c, kernel_o) = array_ops.split(self.kernel, 4, axis=3) (recurrent_kernel_i, recurrent_kernel_f, recurrent_kernel_c, recurrent_kernel_o) = array_ops.split(self.recurrent_kernel, 4, axis=3) if self.use_bias: bias_i, bias_f, bias_c, bias_o = array_ops.split(self.bias, 4) else: bias_i, bias_f, bias_c, bias_o = None, None, None, None x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding) x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding) x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding) x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding) h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i) h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f) h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c) h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o) i = self.recurrent_activation(x_i + h_i) f = self.recurrent_activation(x_f + h_f) c = f * c_tm1 + i * self.activation(x_c + h_c) o = self.recurrent_activation(x_o + h_o) h = o * self.activation(c) return h, [h, c] def input_conv(self, x, w, b=None, padding='valid'): conv_out = K.conv2d(x, w, strides=self.strides, padding=padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if b is not None: conv_out = K.bias_add(conv_out, b, data_format=self.data_format) return conv_out def recurrent_conv(self, x, w): conv_out = K.conv2d(x, w, strides=(1, 1), padding='same', data_format=self.data_format) return conv_out def get_config(self): config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize( self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize( self.kernel_initializer), 'recurrent_initializer': initializers.serialize( self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize( self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize( self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize( self.kernel_constraint), 'recurrent_constraint': constraints.serialize( self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout} base_config = super(ConvLSTM2DCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.ConvLSTM2D') class ConvLSTM2D(ConvRNN2D): """Convolutional LSTM. It is similar to an LSTM layer, but the input transformations and recurrent transformations are both convolutional. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, time, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, time, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. By default hyperbolic tangent activation function is applied (`tanh(x)`). recurrent_activation: Activation function to use for the recurrent step. use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.] (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. go_backwards: Boolean (default False). If True, process the input sequence backwards. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Call arguments: inputs: A 5D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` are set. initial_state: List of initial state tensors to be passed to the first call of the cell. Input shape: - If data_format='channels_first' 5D tensor with shape: `(samples, time, channels, rows, cols)` - If data_format='channels_last' 5D tensor with shape: `(samples, time, rows, cols, channels)` Output shape: - If `return_sequences` - If data_format='channels_first' 5D tensor with shape: `(samples, time, filters, output_row, output_col)` - If data_format='channels_last' 5D tensor with shape: `(samples, time, output_row, output_col, filters)` - Else - If data_format ='channels_first' 4D tensor with shape: `(samples, filters, output_row, output_col)` - If data_format='channels_last' 4D tensor with shape: `(samples, output_row, output_col, filters)` where `o_row` and `o_col` depend on the shape of the filter and the padding Raises: ValueError: in case of invalid constructor arguments. References: - [Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1) The current implementation does not include the feedback loop on the cells output. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, go_backwards=False, stateful=False, dropout=0., recurrent_dropout=0., **kwargs): cell = ConvLSTM2DCell(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, dtype=kwargs.get('dtype')) super(ConvLSTM2D, self).__init__(cell, return_sequences=return_sequences, go_backwards=go_backwards, stateful=stateful, **kwargs) self.activity_regularizer = regularizers.get(activity_regularizer) def call(self, inputs, mask=None, training=None, initial_state=None): self.cell.reset_dropout_mask() self.cell.reset_recurrent_dropout_mask() return super(ConvLSTM2D, self).call(inputs, mask=mask, training=training, initial_state=initial_state) @property def filters(self): return self.cell.filters @property def kernel_size(self): return self.cell.kernel_size @property def strides(self): return self.cell.strides @property def padding(self): return self.cell.padding @property def data_format(self): return self.cell.data_format @property def dilation_rate(self): return self.cell.dilation_rate @property def activation(self): return self.cell.activation @property def recurrent_activation(self): return self.cell.recurrent_activation @property def use_bias(self): return self.cell.use_bias @property def kernel_initializer(self): return self.cell.kernel_initializer @property def recurrent_initializer(self): return self.cell.recurrent_initializer @property def bias_initializer(self): return self.cell.bias_initializer @property def unit_forget_bias(self): return self.cell.unit_forget_bias @property def kernel_regularizer(self): return self.cell.kernel_regularizer @property def recurrent_regularizer(self): return self.cell.recurrent_regularizer @property def bias_regularizer(self): return self.cell.bias_regularizer @property def kernel_constraint(self): return self.cell.kernel_constraint @property def recurrent_constraint(self): return self.cell.recurrent_constraint @property def bias_constraint(self): return self.cell.bias_constraint @property def dropout(self): return self.cell.dropout @property def recurrent_dropout(self): return self.cell.recurrent_dropout def get_config(self): config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize( self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize( self.kernel_initializer), 'recurrent_initializer': initializers.serialize( self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize( self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize( self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize( self.activity_regularizer), 'kernel_constraint': constraints.serialize( self.kernel_constraint), 'recurrent_constraint': constraints.serialize( self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout} base_config = super(ConvLSTM2D, self).get_config() del base_config['cell'] return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/convolutional_recurrent.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for noise layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import keras from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class NoiseLayersTest(keras_parameterized.TestCase): def test_GaussianNoise(self): testing_utils.layer_test( keras.layers.GaussianNoise, kwargs={'stddev': 1.}, input_shape=(3, 2, 3)) def test_GaussianDropout(self): testing_utils.layer_test( keras.layers.GaussianDropout, kwargs={'rate': 0.5}, input_shape=(3, 2, 3)) def test_AlphaDropout(self): testing_utils.layer_test( keras.layers.AlphaDropout, kwargs={'rate': 0.2}, input_shape=(3, 2, 3)) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/noise_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras' base preprocessing layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from tensorflow.python.platform import test class PreprocessingLayerTest(test.TestCase): """Base test class for preprocessing layer API validation.""" # TODO(b/137303934): Consider incorporating something like this Close vs All # behavior into core tf.test.TestCase. def assertAllCloseOrEqual(self, a, b, msg=None): """Asserts that elements are close (if numeric) or equal (if string).""" if isinstance(a, collections.Mapping): self.assertEqual(len(a), len(b)) for key, a_value in a.items(): b_value = b[key] error_message = "{} ({})".format(msg, key) if msg else None self.assertAllCloseOrEqual(a_value, b_value, error_message) elif isinstance(a, (list, tuple)): self.assertEqual(len(a), len(b)) for a_value, b_value in zip(a, b): self.assertAllCloseOrEqual(a_value, b_value, msg) else: comparison_fn = ( self.assertAllClose if np.issubdtype(a.dtype, np.number) else self.assertAllEqual) comparison_fn(a, b, msg=msg) def assert_extracted_output_equal(self, combiner, acc1, acc2, msg=None): data_1 = combiner.extract(acc1) data_2 = combiner.extract(acc2) self.assertAllCloseOrEqual(data_1, data_2, msg=msg) def validate_accumulator_computation(self, combiner, data, expected): """Validate that various combinations of compute and merge are identical.""" if len(data) < 4: raise AssertionError("Data must have at least 4 elements.") data_0 = np.array([data[0]]) data_1 = np.array([data[1]]) data_2 = np.array(data[2:]) single_compute = combiner.compute(data) all_merge = combiner.merge([ combiner.compute(data_0), combiner.compute(data_1), combiner.compute(data_2) ]) self.assertAllCloseOrEqual( single_compute, all_merge, msg="Sharding data should not change the data output.") unordered_all_merge = combiner.merge([ combiner.compute(data_1), combiner.compute(data_2), combiner.compute(data_0) ]) self.assertAllCloseOrEqual( all_merge, unordered_all_merge, msg="The order of merge arguments should not change the data " "output.") hierarchical_merge = combiner.merge([ combiner.compute(data_1), combiner.merge([combiner.compute(data_2), combiner.compute(data_0)]) ]) self.assertAllCloseOrEqual( all_merge, hierarchical_merge, msg="Nesting merge arguments should not change the data output.") nested_compute = combiner.compute( data_0, combiner.compute(data_1, combiner.compute(data_2))) self.assertAllCloseOrEqual( all_merge, nested_compute, msg="Nesting compute arguments should not change the data output.") mixed_compute = combiner.merge([ combiner.compute(data_0), combiner.compute(data_1, combiner.compute(data_2)) ]) self.assertAllCloseOrEqual( all_merge, mixed_compute, msg="Mixing merge and compute calls should not change the data " "output.") self.assertAllCloseOrEqual(expected, all_merge) def validate_accumulator_extract(self, combiner, data, expected): """Validate that the expected results of computing and extracting.""" acc = combiner.compute(data) extracted_data = combiner.extract(acc) self.assertAllCloseOrEqual(expected, extracted_data) def validate_accumulator_extract_and_restore(self, combiner, data, expected): """Validate that the extract<->restore loop loses no data.""" acc = combiner.compute(data) extracted_data = combiner.extract(acc) restored_acc = combiner.restore(extracted_data) self.assert_extracted_output_equal(combiner, acc, restored_acc) self.assertAllCloseOrEqual(expected, combiner.extract(restored_acc)) def validate_accumulator_serialize_and_deserialize(self, combiner, data, expected): """Validate that the serialize<->deserialize loop loses no data.""" acc = combiner.compute(data) serialized_data = combiner.serialize(acc) deserialized_data = combiner.deserialize(serialized_data) self.assertAllCloseOrEqual(acc, deserialized_data) def validate_accumulator_uniqueness(self, combiner, data): """Validate that every call to compute creates a unique accumulator.""" acc = combiner.compute(data) acc2 = combiner.compute(data) self.assertIsNot(acc, acc2) self.assertAllCloseOrEqual(acc, acc2)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/preprocessing/preprocessing_test_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for keras.layers.preprocessing.normalization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers.preprocessing import normalization from tensorflow.python.keras.layers.preprocessing import normalization_v1 from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils from tensorflow.python.keras.utils.generic_utils import CustomObjectScope from tensorflow.python.platform import test def get_layer_class(): if context.executing_eagerly(): return normalization.Normalization else: return normalization_v1.Normalization @keras_parameterized.run_all_keras_modes class NormalizationTest(keras_parameterized.TestCase, preprocessing_test_utils.PreprocessingLayerTest): def test_layer_api_compatibility(self): cls = get_layer_class() with CustomObjectScope({"Normalization": cls}): output_data = testing_utils.layer_test( cls, kwargs={"axis": -1}, input_shape=(None, 3), input_data=np.array([[3, 1, 2], [6, 5, 4]], dtype=np.float32), validate_training=False, adapt_data=np.array([[1, 2, 1], [2, 3, 4], [1, 2, 1], [2, 3, 4]])) expected = np.array([[6., -6., -0.22222222], [18., 10., 0.66666667]]) self.assertAllClose(expected, output_data) def test_combiner_api_compatibility(self): data = np.array([[1], [2], [3], [4], [5]]) combiner = normalization.Normalization._NormalizingCombiner(axis=-1) expected = { "count": np.array(5.0), "variance": np.array([2.]), "mean": np.array([3.]) } self.validate_accumulator_extract_and_restore(combiner, data, expected) self.validate_accumulator_serialize_and_deserialize(combiner, data, expected) self.validate_accumulator_uniqueness(combiner, data) self.validate_accumulator_extract(combiner, data, expected) @parameterized.named_parameters( { "data": np.array([[1], [2], [3], [4], [5]]), "axis": -1, "expected": { "count": np.array(5.0), "variance": np.array([2.]), "mean": np.array([3.]) }, "testcase_name": "2d_single_element" }, { "data": np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]), "axis": -1, "expected": { "count": np.array(5.0), "mean": np.array([3., 4.]), "variance": np.array([2., 2.]) }, "testcase_name": "2d_multi_element" }, { "data": np.array([[[1, 2]], [[2, 3]], [[3, 4]], [[4, 5]], [[5, 6]]]), "axis": 2, "expected": { "count": np.array(5.0), "mean": np.array([3., 4.]), "variance": np.array([2., 2.]) }, "testcase_name": "3d_multi_element" }, { "data": np.array([[[1, 2]], [[2, 3]], [[3, 4]], [[4, 5]], [[5, 6]]]), "axis": (1, 2), "expected": { "count": np.array(5.0), "mean": np.array([[3., 4.]]), "variance": np.array([[2., 2.]]) }, "testcase_name": "3d_multi_element_multi_axis" }, { "data": np.array([[[1, 2], [2, 3]], [[3, 4], [4, 5]], [[1, 2], [2, 3]], [[3, 4], [4, 5]]]), "axis": 1, "expected": { "count": np.array(8.0), "mean": np.array([2.5, 3.5]), "variance": np.array([1.25, 1.25]) }, "testcase_name": "3d_multi_element_internal_axis" }) def test_combiner_computation_multi_value_axis(self, data, axis, expected): combiner = normalization.Normalization._NormalizingCombiner(axis=axis) expected_accumulator = combiner._create_accumulator(**expected) self.validate_accumulator_computation(combiner, data, expected_accumulator) @parameterized.named_parameters( { "adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]]), "axis": -1, "test_data": np.array([[1.], [2.], [3.]]), "expected": np.array([[-1], [-.5], [0]]), "testcase_name": "2d_single_element" }, { "adapt_data": np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]]), "axis": 1, "test_data": np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]]), "expected": np.array([[[-1.2, -0.6, 0.], [-1.2, -0.6, 0.]], [[0., 0.6, 1.2], [0., 0.6, 1.2]]]), "testcase_name": "3d_internal_axis" }, { "adapt_data": np.array([[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]]), "axis": (1, 2), "test_data": np.array([[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]]), "expected": np.array([[[1., 6., -5.], [-1., 1., -0.5]], [[1., 2., 1.], [-1., 1., 0.5]]]), "testcase_name": "3d_multiple_axis" }) def test_layer_computation(self, adapt_data, axis, test_data, expected): cls = get_layer_class() layer = cls(axis=axis) layer.adapt(adapt_data) input_shape = tuple([None for _ in range(test_data.ndim - 1)]) input_data = keras.Input(shape=input_shape) output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = testing_utils.should_run_eagerly() model._experimental_run_tf_function = testing_utils.should_run_tf_function() output_data = model.predict(test_data) self.assertAllClose(expected, output_data) def test_mean_setting_continued_adapt_failure(self): if not context.executing_eagerly(): self.skipTest("'assign' doesn't work in V1, so don't test in V1.") cls = get_layer_class() layer = cls() layer.build((2,)) layer.mean.assign([1.3, 2.0]) with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"): layer.adapt(np.array([[1, 2]]), reset_state=False) def test_var_setting_continued_adapt_failure(self): if not context.executing_eagerly(): self.skipTest("'assign' doesn't work in V1, so don't test in V1.") cls = get_layer_class() layer = cls() layer.build((2,)) layer.variance.assign([1.3, 2.0]) with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"): layer.adapt(np.array([[1, 2]]), reset_state=False) def test_weight_setting_continued_adapt_failure(self): cls = get_layer_class() layer = cls() layer.build((2,)) layer.set_weights([np.array([1.3, 2.0]), np.array([0.0, 1.0]), np.array(0)]) with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"): layer.adapt(np.array([[1, 2]]), reset_state=False) def test_weight_setting_no_count_continued_adapt_failure(self): cls = get_layer_class() layer = cls() layer.build((2,)) layer.set_weights([np.array([1.3, 2.0]), np.array([0.0, 1.0])]) with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"): layer.adapt(np.array([[1, 2]]), reset_state=False) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/preprocessing/normalization_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tensorflow V1 version of the Normalization preprocessing layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.engine.base_preprocessing_layer_v1 import CombinerPreprocessingLayer from tensorflow.python.keras.layers.preprocessing import normalization class Normalization(normalization.Normalization, CombinerPreprocessingLayer): pass
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/preprocessing/normalization_v1.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras preprocessing layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.base_preprocessing_layer import Combiner from tensorflow.python.keras.engine.base_preprocessing_layer import CombinerPreprocessingLayer from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.util import compat _COUNT_NAME = 'count' _MEAN_NAME = 'mean' _VARIANCE_NAME = 'variance' # TODO(momernick): Find a good example of normalization? class Normalization(CombinerPreprocessingLayer): """Feature-wise normalization of the data. This layer will coerce its inputs into a normal distribution centered around 0. It accomplishes this by precomputing the mean and variance of the data, and calling (input-mean)/var at runtime. What happens in `adapt`: Compute mean and variance of the data and store them as the layer's weights. `adapt` should be called before `fit`, `evaluate`, or `predict`. Attributes: axis: Integer or tuple of integers, the axis or axes that should be normalized (typically the features axis). We will normalize each element in the specified axis. The default is '-1' (the innermost axis); 0 (the batch axis) is not allowed. """ def __init__(self, axis=-1, dtype=None, **kwargs): # This ensures that if the value of K.floatx() changes after file-loading # time, the dtype value will change to reflect it. dtype = dtype or K.floatx() super(Normalization, self).__init__( combiner=Normalization._NormalizingCombiner(axis), dtype=dtype, **kwargs) if axis == 0: raise ValueError('The argument \'axis\' may not be 0.') self.axis = axis def build(self, input_shape): self._broadcast_shape = [1 for _ in range(len(input_shape))] if isinstance(self.axis, (tuple, list)): mean_and_var_shape = [] for i in self.axis: mean_and_var_shape.append(input_shape[i]) self._broadcast_shape[i] = input_shape[i] else: mean_and_var_shape = input_shape[self.axis] self._broadcast_shape[self.axis] = input_shape[self.axis] # count is not used in this class's call() method, but is used to re-create # the accumulator during multiple calls to 'adapt'. # TODO(omalleyt): should mean and variance be set to self.dtype? self.mean = self._add_state_variable( name=_MEAN_NAME, shape=mean_and_var_shape, dtype=K.floatx(), initializer=init_ops.zeros_initializer) self.variance = self._add_state_variable( name=_VARIANCE_NAME, shape=mean_and_var_shape, dtype=K.floatx(), initializer=init_ops.zeros_initializer) self.count = self._add_state_variable( name=_COUNT_NAME, shape=(), dtype=dtypes.int32, initializer=init_ops.zeros_initializer) super(Normalization, self).build(input_shape) def call(self, inputs): # We need to reshape the mean and variance data to ensure that Tensorflow # broadcasts the data correctly. mean = array_ops.reshape(self.mean, self._broadcast_shape) variance = array_ops.reshape(self.variance, self._broadcast_shape) return (inputs - mean) / variance def compute_output_shape(self, input_shape): return input_shape def compute_output_signature(self, input_spec): return input_spec def get_config(self): config = {'axis': self.axis} base_config = super(Normalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def set_weights(self, weights): """Override for set_weights to ensure we can set just mean/var weights.""" if len(weights) == 2: weights.append(np.array(0)) super(Normalization, self).set_weights(weights) class _NormalizingCombiner(Combiner): """Combiner for the Normalization preprocessing layer. This class encapsulates the computations for finding the mean and variance of a set of data in a stable and numerically correct way. Its associated accumulator is a namedtuple('count', 'mean', 'variance'). Attributes: axis: The axis to compute mean and var over. """ def __init__(self, axis): self.axis = axis def compute(self, values, accumulator=None): """Compute a step in this computation, returning a new accumulator.""" # This is the shape of all reduced axes (not specified in 'axis'). reduction_counts = np.delete(values.shape, self.axis) # We get the number of elements that will be reduced by multiplying all # values of 'shape' corresponding to the reduced axes. count = np.prod(reduction_counts, dtype=np.int32) # We want to reduce across dimensions except those specified in 'axis' # when using np.mean or np.variance; create the tuple of axes to reduce # over here. reduction_axes = tuple(np.delete(range(values.ndim), self.axis)) mean = np.mean(values, axis=reduction_axes, dtype=np.float64) variance = np.var(values, axis=reduction_axes, dtype=np.float64) # Create an accumulator with our new data and either return it or combine # it with the passed accumulator. sanitized_accumulator = self._create_accumulator(count, mean, variance) if accumulator is None: return sanitized_accumulator else: return self.merge([accumulator, sanitized_accumulator]) def merge(self, accumulators): """Merge several accumulators to a single accumulator.""" # Combine accumulators and return the result. combined_count = np.sum( [accumulator.count for accumulator in accumulators]) # To combine accumulator means, we weight each accumulator's mean by the # number of elements that were accumulated, and then divide by the # total number of elements. combined_mean = np.add.reduce([ accumulator.mean * accumulator.count for accumulator in accumulators ]) / combined_count # The variance is computed using the lack-of-fit sum of squares # formula (see https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares). def variance_contribution(accumulator): return accumulator.count * ( accumulator.variance + np.square(accumulator.mean - combined_mean)) combined_variance = np.add.reduce([ variance_contribution(accumulator) for accumulator in accumulators ]) / combined_count return self._create_accumulator(combined_count, combined_mean, combined_variance) def extract(self, accumulator): """Convert an accumulator into a dict of output values.""" return { _COUNT_NAME: accumulator.count, _MEAN_NAME: accumulator.mean, _VARIANCE_NAME: accumulator.variance } def restore(self, output): """Create an accumulator based on 'output'.""" # There is no special internal state here, so we just return the relevant # internal value. count = output[_COUNT_NAME] mean = output[_MEAN_NAME] var = output[_VARIANCE_NAME] if (count == 0 and (mean.any() != 0.0 or var.any() != 0.0)): raise RuntimeError( 'The mean and/or variance of a Normalization preprocessing layer ' "were set without also setting 'count'. If 'count' is not also set," " 'adapt' cannot be called unless the 'reset_state' arg is True.") return self._create_accumulator(output[_COUNT_NAME], output[_MEAN_NAME], output[_VARIANCE_NAME]) def serialize(self, accumulator): """Serialize an accumulator for a remote call.""" output_dict = { _COUNT_NAME: accumulator.count.tolist(), _MEAN_NAME: accumulator.mean.tolist(), _VARIANCE_NAME: accumulator.variance.tolist() } return compat.as_bytes(json.dumps(output_dict)) def deserialize(self, encoded_accumulator): """Deserialize an accumulator received from 'serialize()'.""" value_dict = json.loads(compat.as_text(encoded_accumulator)) return self._create_accumulator( np.array(value_dict[_COUNT_NAME]), np.array(value_dict[_MEAN_NAME]), np.array(value_dict[_VARIANCE_NAME])) def _create_accumulator(self, count, mean, variance): """Convert any 'nan' values in the given accumulator to numeric values.""" return collections.namedtuple( 'Accumulator', ['count', 'mean', 'variance'])(np.array(count), np.nan_to_num(mean), np.nan_to_num(variance))
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/layers/preprocessing/normalization.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in WideNDeep model classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import activations from tensorflow.python.keras import backend as K from tensorflow.python.keras import layers as layer_module from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import training from tensorflow.python.keras.utils import generic_utils from tensorflow.python.util.tf_export import keras_export @keras_export('keras.experimental.WideDeepModel') class WideDeepModel(training.Model): r"""Wide & Deep Model for regression and classification problems. This model jointly train a linear and a dnn model. Example: ```python linear_model = LinearModel() dnn_model = keras.Sequential([keras.layers.Dense(units=64), keras.layers.Dense(units=1)]) combined_model = WideDeepModel(dnn_model, linear_model) combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse']) # define dnn_inputs and linear_inputs as separate numpy arrays or # a single numpy array if dnn_inputs is same as linear_inputs. combined_model.fit([dnn_inputs, linear_inputs], y, epochs) # or define a single `tf.data.Dataset` that contains a single tensor or # separate tensors for dnn_inputs and linear_inputs. dataset = tf.data.Dataset.from_tensors(([dnn_inputs, linear_inputs], y)) combined_model.fit(dataset, epochs) ``` Both linear and dnn model can be pre-compiled and trained separately before jointly training: Example: ```python linear_model = LinearModel() linear_model.compile('adagrad', 'mse') linear_model.fit(linear_inputs, y, epochs) dnn_model = keras.Sequential([keras.layers.Dense(units=1)]) dnn_model.compile('rmsprop', 'mse') dnn_model.fit(dnn_inputs, y, epochs) combined_model = WideDeepModel(dnn_model, linear_model) combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse']) combined_model.fit([dnn_inputs, linear_inputs], y, epochs) ``` """ def __init__(self, linear_model, dnn_model, activation=None, **kwargs): """Create a Wide & Deep Model. Args: linear_model: a premade LinearModel, its output must match the output of the dnn model. dnn_model: a `tf.keras.Model`, its output must match the output of the linear model. activation: Activation function. Set it to None to maintain a linear activation. **kwargs: The keyword arguments that are passed on to BaseLayer.__init__. Allowed keyword arguments include `name`. """ super(WideDeepModel, self).__init__(**kwargs) self.linear_model = linear_model self.dnn_model = dnn_model self.activation = activations.get(activation) def call(self, inputs): if not isinstance(inputs, (tuple, list)) or len(inputs) != 2: linear_inputs = dnn_inputs = inputs else: linear_inputs, dnn_inputs = inputs linear_output = self.linear_model(linear_inputs) dnn_output = self.dnn_model(dnn_inputs) output = .5 * (linear_output + dnn_output) if self.activation: return self.activation(output) return output def _get_optimizers(self): if isinstance(self.optimizer, (tuple, list)): return (self.optimizer[0], self.optimizer[1]) else: return (self.optimizer, self.optimizer) # This does not support gradient scaling and LossScaleOptimizer. def _backwards(self, tape, loss): linear_vars = self.linear_model._unique_trainable_weights # pylint: disable=protected-access dnn_vars = self.dnn_model._unique_trainable_weights # pylint: disable=protected-access linear_grads, dnn_grads = tape.gradient(loss, (linear_vars, dnn_vars)) linear_optimizer, dnn_optimizer = self._get_optimizers() linear_optimizer.apply_gradients(zip(linear_grads, linear_vars)) dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars)) return def _make_train_function(self): # TODO(tanzheny): This is a direct copy from super to make it work # refactor it so that common logic can be shared. has_recompiled = self._recompile_weights_loss_and_weighted_metrics() self._check_trainable_weights_consistency() # If we have re-compiled the loss/weighted metric sub-graphs then create # train function even if one exists already. This is because # `_feed_sample_weights` list has been updated on re-copmpile. if getattr(self, 'train_function', None) is None or has_recompiled: # Restore the compiled trainable state. current_trainable_state = self._get_trainable_state() self._set_trainable_state(self._compiled_trainable_state) inputs = ( self._feed_inputs + self._feed_targets + self._feed_sample_weights) if not isinstance(K.symbolic_learning_phase(), int): inputs += [K.symbolic_learning_phase()] linear_optimizer, dnn_optimizer = self._get_optimizers() with K.get_graph().as_default(): with K.name_scope('training'): # Training updates updates = [] linear_updates = linear_optimizer.get_updates( params=self.linear_model._unique_trainable_weights, # pylint: disable=protected-access loss=self.total_loss) updates += linear_updates dnn_updates = dnn_optimizer.get_updates( params=self.dnn_model._unique_trainable_weights, # pylint: disable=protected-access loss=self.total_loss) updates += dnn_updates # Unconditional updates updates += self.get_updates_for(None) # Conditional updates relevant to this model updates += self.get_updates_for(self.inputs) metrics = self._get_training_eval_metrics() metrics_tensors = [ m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access ] with K.name_scope('training'): # Gets loss and metrics. Updates weights at each call. fn = K.function( inputs, [self.total_loss] + metrics_tensors, updates=updates, name='train_function', **self._function_kwargs) setattr(self, 'train_function', fn) # Restore the current trainable state self._set_trainable_state(current_trainable_state) def get_config(self): linear_config = generic_utils.serialize_keras_object(self.linear_model) dnn_config = generic_utils.serialize_keras_object(self.dnn_model) config = { 'linear_model': linear_config, 'dnn_model': dnn_config, 'activation': activations.serialize(self.activation), } base_config = base_layer.Layer.get_config(self) return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): linear_config = config.pop('linear_model') linear_model = layer_module.deserialize(linear_config, custom_objects) dnn_config = config.pop('dnn_model') dnn_model = layer_module.deserialize(dnn_config, custom_objects) activation = activations.deserialize( config.pop('activation', None), custom_objects=custom_objects) return cls( linear_model=linear_model, dnn_model=dnn_model, activation=activation, **config)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/premade/wide_deep.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in linear model classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import activations from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import core from tensorflow.python.ops import nn from tensorflow.python.util.tf_export import keras_export @keras_export('keras.experimental.LinearModel') class LinearModel(training.Model): r"""Linear Model for regression and classification problems. This model approximates the following function: $$y = \beta + \sum_{i=1}^{N} w_{i} * x_{i}$$ where $$\beta$$ is the bias and $$w_{i}$$ is the weight for each feature. Example: ```python model = LinearModel() model.compile(optimizer='sgd', loss='mse') model.fit(x, y, epochs) ``` This model accepts sparse float inputs as well: Example: ```python model = LinearModel() opt = tf.keras.optimizers.Adam() loss_fn = tf.keras.losses.MeanSquaredError() with tf.GradientTape() as tape: output = model(sparse_input) loss = tf.reduce_mean(loss_fn(target, output)) grads = tape.gradient(loss, model.weights) opt.apply_gradients(zip(grads, model.weights)) ``` """ def __init__(self, units=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, **kwargs): """Create a Linear Model. Args: units: Positive integer, output dimension without the batch size. activation: Activation function to use. If you don't specify anything, no activation is applied. use_bias: whether to calculate the bias/intercept for this model. If set to False, no bias/intercept will be used in calculations, e.g., the data is already centered. kernel_initializer: Initializer for the `kernel` weights matrices. bias_initializer: Initializer for the bias vector. kernel_regularizer: regularizer for kernel vectors. bias_regularizer: regularizer for bias vector. **kwargs: The keyword arguments that are passed on to BaseLayer.__init__. """ self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) super(LinearModel, self).__init__(**kwargs) def build(self, input_shape): self.dense_layers = [] if isinstance(input_shape, list): for shape in input_shape: layer = core.Dense( units=self.units, use_bias=False, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, input_shape=shape) self.dense_layers.append(layer) else: layer = core.Dense( units=self.units, use_bias=False, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, input_shape=input_shape) self.dense_layers.append(layer) if self.use_bias: self.bias = self.add_weight( 'bias', shape=self.units, initializer=self.bias_initializer, regularizer=self.bias_regularizer, dtype=self.dtype, trainable=True) else: self.bias = None def call(self, inputs): if not isinstance(inputs, (tuple, list)): inputs = [inputs] if len(inputs) != len(self.dense_layers): raise ValueError('Expected {} inputs, but got {} inputs'.format( len(self.dense_layers), len(inputs))) result = None for inp, layer in zip(inputs, self.dense_layers): output = layer(inp) if result is None: result = output else: result += output if self.use_bias: result = nn.bias_add(result, self.bias) if self.activation is not None: return self.activation(result) # pylint: disable=not-callable return result def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), } base_config = base_layer.Layer.get_config(self) return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): del custom_objects return cls(**config)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/premade/linear.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras Premade Linear models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.feature_column import dense_features_v2 from tensorflow.python.feature_column import feature_column_v2 as fc from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.keras import backend from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import losses from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import core from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.premade import linear from tensorflow.python.ops import variables from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class LinearModelTest(keras_parameterized.TestCase): def test_linear_model_with_single_input(self): model = linear.LinearModel() inp = np.random.uniform(low=-5, high=5, size=(64, 2)) output = .3 * inp[:, 0] + .2 * inp[:, 1] model.compile('sgd', 'mse', []) model.fit(inp, output, epochs=5) self.assertTrue(model.built) def test_linear_model_with_multi_input(self): model = linear.LinearModel() input_a = np.random.uniform(low=-5, high=5, size=(64, 1)) input_b = np.random.uniform(low=-5, high=5, size=(64, 1)) output = .3 * input_a + .2 * input_b model.compile('sgd', 'mse', []) model.fit([input_a, input_b], output, epochs=5) def test_linear_model_as_layer(self): input_a = input_layer.Input(shape=(1,), name='a') output_a = linear.LinearModel()(input_a) input_b = input_layer.Input(shape=(1,), name='b') output_b = core.Dense(units=1)(input_b) output = output_a + output_b model = training.Model(inputs=[input_a, input_b], outputs=[output]) input_a_np = np.random.uniform(low=-5, high=5, size=(64, 1)) input_b_np = np.random.uniform(low=-5, high=5, size=(64, 1)) output_np = .3 * input_a_np + .2 * input_b_np model.compile('sgd', 'mse', []) model.fit([input_a_np, input_b_np], output_np, epochs=5) def test_linear_model_with_sparse_input(self): indices = constant_op.constant([[0, 0], [0, 2], [1, 0], [1, 1]], dtype=dtypes.int64) values = constant_op.constant([.4, .6, .8, .5]) shape = constant_op.constant([2, 3], dtype=dtypes.int64) model = linear.LinearModel() inp = sparse_tensor.SparseTensor(indices, values, shape) output = model(inp) self.evaluate(variables.global_variables_initializer()) if context.executing_eagerly(): weights = model.get_weights() weights[0] = np.ones((3, 1)) model.set_weights(weights) output = model(inp) self.assertAllClose([[1.], [1.3]], self.evaluate(output)) def test_linear_model_with_sparse_input_and_custom_training(self): batch_size = 64 indices = [] values = [] target = np.zeros((batch_size, 1)) with context.eager_mode(): for i in range(64): rand_int = np.random.randint(3) if rand_int == 0: indices.append((i, 0)) val = np.random.uniform(low=-5, high=5) values.append(val) target[i] = 0.3 * val elif rand_int == 1: indices.append((i, 1)) val = np.random.uniform(low=-5, high=5) values.append(val) target[i] = 0.2 * val else: indices.append((i, 0)) indices.append((i, 1)) val_1 = np.random.uniform(low=-5, high=5) val_2 = np.random.uniform(low=-5, high=5) values.append(val_1) values.append(val_2) target[i] = 0.3 * val_1 + 0.2 * val_2 indices = np.asarray(indices) values = np.asarray(values) shape = constant_op.constant([batch_size, 2], dtype=dtypes.int64) inp = sparse_tensor.SparseTensor(indices, values, shape) model = linear.LinearModel(use_bias=False) opt = gradient_descent.SGD() for _ in range(20): with backprop.GradientTape() as t: output = model(inp) loss = backend.mean(losses.mean_squared_error(target, output)) grads = t.gradient(loss, model.trainable_variables) grads_and_vars = zip(grads, model.trainable_variables) opt.apply_gradients(grads_and_vars) # This test is an example for a regression on categorical inputs, i.e., # the output is 0.4, 0.6, 0.9 when input is 'alpha', 'beta', 'gamma' # separately. def test_linear_model_with_feature_column(self): with context.eager_mode(): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = fc.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = fc.indicator_column(cat_column) dense_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') combined = sequential.Sequential([dense_feature_layer, linear_model]) opt = gradient_descent.SGD(learning_rate=0.1) combined.compile(opt, 'mse', []) combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10) self.assertAllClose([[0.4], [0.6], [0.9]], combined.layers[1].dense_layers[0].kernel.numpy(), atol=0.01) def test_config(self): linear_model = linear.LinearModel(units=3, use_bias=True) config = linear_model.get_config() cloned_linear_model = linear.LinearModel.from_config(config) self.assertEqual(linear_model.units, cloned_linear_model.units) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/premade/linear_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Premade Model API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.premade import linear from tensorflow.python.keras.premade import wide_deep
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/premade/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras Premade WideNDeep models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.feature_column import dense_features_v2 from tensorflow.python.feature_column import feature_column_v2 as fc from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import core from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.premade import linear from tensorflow.python.keras.premade import wide_deep from tensorflow.python.ops import variables from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class WideDeepModelTest(keras_parameterized.TestCase): def test_wide_deep_model(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2)) dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(inputs, output, epochs=5) self.assertTrue(wide_deep_model.built) def test_wide_deep_model_backprop(self): with self.cached_session(): linear_model = linear.LinearModel(units=1, kernel_initializer='zeros') dnn_model = sequential.Sequential( [core.Dense(units=1, kernel_initializer='zeros')]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.array([1.]) dnn_inp = np.array([1.]) inputs = [linear_inp, dnn_inp] output = linear_inp + 2 * dnn_inp linear_opt = gradient_descent.SGD(learning_rate=.1) dnn_opt = gradient_descent.SGD(learning_rate=.3) wide_deep_model.compile( optimizer=[linear_opt, dnn_opt], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.evaluate(variables.global_variables_initializer()) wide_deep_model.fit(inputs, output, epochs=1) self.assertAllClose( [[0.3]], self.evaluate(wide_deep_model.linear_model.dense_layers[0].kernel)) self.assertAllClose([[0.9]], self.evaluate( wide_deep_model.dnn_model.layers[0].kernel)) def test_wide_deep_model_with_single_input(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) inputs = np.random.uniform(low=-5, high=5, size=(64, 3)) output = .3 * inputs[:, 0] wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(inputs, output, epochs=5) def test_wide_deep_model_with_single_optimizer(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2)) dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] wide_deep_model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(inputs, output, epochs=5) self.assertTrue(wide_deep_model.built) def test_wide_deep_model_as_layer(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1)]) linear_input = input_layer.Input(shape=(3,), name='linear') dnn_input = input_layer.Input(shape=(5,), name='dnn') wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) wide_deep_output = wide_deep_model((linear_input, dnn_input)) input_b = input_layer.Input(shape=(1,), name='b') output_b = core.Dense(units=1)(input_b) model = training.Model( inputs=[linear_input, dnn_input, input_b], outputs=[wide_deep_output + output_b]) linear_input_np = np.random.uniform(low=-5, high=5, size=(64, 3)) dnn_input_np = np.random.uniform(low=-5, high=5, size=(64, 5)) input_b_np = np.random.uniform(low=-5, high=5, size=(64,)) output_np = linear_input_np[:, 0] + .2 * dnn_input_np[:, 1] + input_b_np model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit([linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5) def test_wide_deep_model_with_sub_model_trained(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel( linear.LinearModel(units=1), sequential.Sequential([core.Dense(units=1, input_dim=3)])) linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2)) dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] linear_model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) dnn_model.compile( optimizer='adam', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) linear_model.fit(linear_inp, output, epochs=50) dnn_model.fit(dnn_inp, output, epochs=50) wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(inputs, output, epochs=50) # This test is an example for cases where linear and dnn model accepts # same raw input and same transformed inputs, i.e., the raw input is # categorical, and both linear and dnn model accept one hot encoding. def test_wide_deep_model_with_single_feature_column(self): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = fc.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = fc.indicator_column(cat_column) dense_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') dnn_model = sequential.Sequential([core.Dense(units=1)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) combined = sequential.Sequential([dense_feature_layer, wide_deep_model]) opt = gradient_descent.SGD(learning_rate=0.1) combined.compile( opt, 'mse', [], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10) # This test is an example for cases where linear and dnn model accepts # same raw input but different transformed inputs, i.e,. the raw input is # categorical, and linear model accepts one hot encoding, while dnn model # accepts embedding encoding. def test_wide_deep_model_with_two_feature_columns(self): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = fc.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = fc.indicator_column(cat_column) emb_column = fc.embedding_column(cat_column, dimension=5) linear_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') combined_linear = sequential.Sequential( [linear_feature_layer, linear_model]) dnn_model = sequential.Sequential([core.Dense(units=1)]) dnn_feature_layer = dense_features_v2.DenseFeatures([emb_column]) combined_dnn = sequential.Sequential([dnn_feature_layer, dnn_model]) wide_deep_model = wide_deep.WideDeepModel(combined_linear, combined_dnn) opt = gradient_descent.SGD(learning_rate=0.1) wide_deep_model.compile( opt, 'mse', [], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10) self.assertEqual(3, linear_model.inputs[0].shape[1]) self.assertEqual(5, dnn_model.inputs[0].shape[1]) def test_config(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) config = wide_deep_model.get_config() cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(config) self.assertEqual(linear_model.units, cloned_wide_deep_model.linear_model.units) self.assertEqual(dnn_model.layers[0].units, cloned_wide_deep_model.dnn_model.layers[0].units) def test_config_with_custom_objects(self): def my_activation(x): return x linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel( linear_model, dnn_model, activation=my_activation) config = wide_deep_model.get_config() cloned_wide_deep_model = wide_deep.WideDeepModel.from_config( config, custom_objects={'my_activation': my_activation}) self.assertEqual(cloned_wide_deep_model.activation, my_activation) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/premade/wide_deep_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras estimator API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.util.tf_export import keras_export # Keras has undeclared dependency on tensorflow/estimator:estimator_py. # As long as you depend //third_party/py/tensorflow:tensorflow target # everything will work as normal. # LINT.IfChange @keras_export(v1=['keras.estimator.model_to_estimator']) def model_to_estimator( keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None, checkpoint_format='saver'): """Constructs an `Estimator` instance from given keras model. For usage example, please see: [Creating estimators from Keras Models](https://tensorflow.org/guide/estimators#model_to_estimator). __Sample Weights__ Estimators returned by `model_to_estimator` are configured to handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample weights when training or evaluating the Estimator, the first item returned by the input function should be a dictionary with keys `features` and `sample_weights`. Example below: ``` keras_model = tf.keras.Model(...) keras_model.compile(...) estimator = tf.keras.estimator.model_to_estimator(keras_model) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. custom_objects: Dictionary for custom objects. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. config: `RunConfig` to config `Estimator`. checkpoint_format: Sets the format of the checkpoint saved by the estimator when training. May be `saver` or `checkpoint`, depending on whether to save checkpoints from `tf.train.Saver` or `tf.train.Checkpoint`. This argument currently defaults to `saver`. When 2.0 is released, the default will be `checkpoint`. Estimators use name-based `tf.train.Saver` checkpoints, while Keras models use object-based checkpoints from `tf.train.Checkpoint`. Currently, saving object-based checkpoints from `model_to_estimator` is only supported by Functional and Sequential models. Returns: An Estimator from given keras model. Raises: ValueError: if neither keras_model nor keras_model_path was given. ValueError: if both keras_model and keras_model_path was given. ValueError: if the keras_model_path is a GCS URI. ValueError: if keras_model has not been compiled. ValueError: if an invalid checkpoint_format was given. """ try: from tensorflow_estimator.python.estimator import keras as keras_lib # pylint: disable=g-import-not-at-top except ImportError: raise NotImplementedError( 'tf.keras.estimator.model_to_estimator function not available in your ' 'installation.') return keras_lib.model_to_estimator( # pylint:disable=unexpected-keyword-arg keras_model=keras_model, keras_model_path=keras_model_path, custom_objects=custom_objects, model_dir=model_dir, config=config, checkpoint_format=checkpoint_format, use_v2_estimator=False) @keras_export('keras.estimator.model_to_estimator', v1=[]) def model_to_estimator_v2( keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None, checkpoint_format='checkpoint'): """Constructs an `Estimator` instance from given keras model. For usage example, please see: [Creating estimators from Keras Models](https://tensorflow.org/guide/estimators#model_to_estimator). __Sample Weights__ Estimators returned by `model_to_estimator` are configured to handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample weights when training or evaluating the Estimator, the first item returned by the input function should be a dictionary with keys `features` and `sample_weights`. Example below: ``` keras_model = tf.keras.Model(...) keras_model.compile(...) estimator = tf.keras.estimator.model_to_estimator(keras_model) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. custom_objects: Dictionary for custom objects. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. config: `RunConfig` to config `Estimator`. checkpoint_format: Sets the format of the checkpoint saved by the estimator when training. May be `saver` or `checkpoint`, depending on whether to save checkpoints from `tf.compat.v1.train.Saver` or `tf.train.Checkpoint`. The default is `checkpoint`. Estimators use name-based `tf.train.Saver` checkpoints, while Keras models use object-based checkpoints from `tf.train.Checkpoint`. Currently, saving object-based checkpoints from `model_to_estimator` is only supported by Functional and Sequential models. Returns: An Estimator from given keras model. Raises: ValueError: if neither keras_model nor keras_model_path was given. ValueError: if both keras_model and keras_model_path was given. ValueError: if the keras_model_path is a GCS URI. ValueError: if keras_model has not been compiled. ValueError: if an invalid checkpoint_format was given. """ try: from tensorflow_estimator.python.estimator import keras as keras_lib # pylint: disable=g-import-not-at-top except ImportError: raise NotImplementedError( 'tf.keras.estimator.model_to_estimator function not available in your ' 'installation.') return keras_lib.model_to_estimator( # pylint:disable=unexpected-keyword-arg keras_model=keras_model, keras_model_path=keras_model_path, custom_objects=custom_objects, model_dir=model_dir, config=config, checkpoint_format=checkpoint_format, use_v2_estimator=True) # LINT.ThenChange(//tensorflow_estimator/python/estimator/keras.py)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/estimator/__init__.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CIFAR100 small images classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.python.keras import backend as K from tensorflow.python.keras.datasets.cifar import load_batch from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.cifar100.load_data') def load_data(label_mode='fine'): """Loads CIFAR100 dataset. Arguments: label_mode: one of "fine", "coarse". Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. Raises: ValueError: in case of invalid `label_mode`. """ if label_mode not in ['fine', 'coarse']: raise ValueError('`label_mode` must be one of `"fine"`, `"coarse"`.') dirname = 'cifar-100-python' origin = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' path = get_file( dirname, origin=origin, untar=True, file_hash= '85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7') fpath = os.path.join(path, 'train') x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels') fpath = os.path.join(path, 'test') x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels') y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if K.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/cifar100.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Fashion-MNIST dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import numpy as np from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.fashion_mnist.load_data') def load_data(): """Loads the Fashion-MNIST dataset. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. License: The copyright for Fashion-MNIST is held by Zalando SE. Fashion-MNIST is licensed under the [MIT license]( https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE). """ dirname = os.path.join('datasets', 'fashion-mnist') base = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' files = [ 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz' ] paths = [] for fname in files: paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname)) with gzip.open(paths[0], 'rb') as lbpath: y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) with gzip.open(paths[1], 'rb') as imgpath: x_train = np.frombuffer( imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28) with gzip.open(paths[2], 'rb') as lbpath: y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8) with gzip.open(paths[3], 'rb') as imgpath: x_test = np.frombuffer( imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28) return (x_train, y_train), (x_test, y_test)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/fashion_mnist.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras built-in datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.datasets import boston_housing from tensorflow.python.keras.datasets import cifar10 from tensorflow.python.keras.datasets import cifar100 from tensorflow.python.keras.datasets import fashion_mnist from tensorflow.python.keras.datasets import imdb from tensorflow.python.keras.datasets import mnist from tensorflow.python.keras.datasets import reuters del absolute_import del division del print_function
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/__init__.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """IMDB sentiment classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import numpy as np from tensorflow.python.keras.preprocessing.sequence import _remove_long_seq from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.imdb.load_data') def load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3, **kwargs): """Loads the IMDB dataset. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). num_words: max number of words to include. Words are ranked by how often they occur (in the training set) and only the most frequent words are kept skip_top: skip the top N most frequently occurring words (which may not be informative). maxlen: sequences longer than this will be filtered out. seed: random seed for sample shuffling. start_char: The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character. oov_char: words that were cut out because of the `num_words` or `skip_top` limit will be replaced with this character. index_from: index actual words with this index and higher. **kwargs: Used for backwards compatibility. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. Raises: ValueError: in case `maxlen` is so low that no input sequence could be kept. Note that the 'out of vocabulary' character is only used for words that were present in the training set but are not included because they're not making the `num_words` cut here. Words that were not seen in the training set but are in the test set have simply been skipped. """ # Legacy support if 'nb_words' in kwargs: logging.warning('The `nb_words` argument in `load_data` ' 'has been renamed `num_words`.') num_words = kwargs.pop('nb_words') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'imdb.npz', file_hash= '69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f') with np.load(path, allow_pickle=True) as f: x_train, labels_train = f['x_train'], f['y_train'] x_test, labels_test = f['x_test'], f['y_test'] np.random.seed(seed) indices = np.arange(len(x_train)) np.random.shuffle(indices) x_train = x_train[indices] labels_train = labels_train[indices] indices = np.arange(len(x_test)) np.random.shuffle(indices) x_test = x_test[indices] labels_test = labels_test[indices] xs = np.concatenate([x_train, x_test]) labels = np.concatenate([labels_train, labels_test]) if start_char is not None: xs = [[start_char] + [w + index_from for w in x] for x in xs] elif index_from: xs = [[w + index_from for w in x] for x in xs] if maxlen: xs, labels = _remove_long_seq(maxlen, xs, labels) if not xs: raise ValueError('After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. ' 'Increase maxlen.') if not num_words: num_words = max([max(x) for x in xs]) # by convention, use 2 as OOV word # reserve 'index_from' (=3 by default) characters: # 0 (padding), 1 (start), 2 (OOV) if oov_char is not None: xs = [ [w if (skip_top <= w < num_words) else oov_char for w in x] for x in xs ] else: xs = [[w for w in x if skip_top <= w < num_words] for x in xs] idx = len(x_train) x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) return (x_train, y_train), (x_test, y_test) @keras_export('keras.datasets.imdb.get_word_index') def get_word_index(path='imdb_word_index.json'): """Retrieves the dictionary mapping word indices back to words. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). Returns: The word index dictionary. """ origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'imdb_word_index.json', file_hash='bfafd718b763782e994055a2d397834f') with open(path) as f: return json.load(f)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/imdb.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Boston housing price regression dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.boston_housing.load_data') def load_data(path='boston_housing.npz', test_split=0.2, seed=113): """Loads the Boston Housing dataset. Arguments: path: path where to cache the dataset locally (relative to ~/.keras/datasets). test_split: fraction of the data to reserve as test set. seed: Random seed for shuffling the data before computing the test split. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ assert 0 <= test_split < 1 origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'boston_housing.npz', file_hash= 'f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5') with np.load(path) as f: x = f['x'] y = f['y'] np.random.seed(seed) indices = np.arange(len(x)) np.random.shuffle(indices) x = x[indices] y = y[indices] x_train = np.array(x[:int(len(x) * (1 - test_split))]) y_train = np.array(y[:int(len(x) * (1 - test_split))]) x_test = np.array(x[int(len(x) * (1 - test_split)):]) y_test = np.array(y[int(len(x) * (1 - test_split)):]) return (x_train, y_train), (x_test, y_test)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/boston_housing.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Reuters topic classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import numpy as np from tensorflow.python.keras.preprocessing.sequence import _remove_long_seq from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.reuters.load_data') def load_data(path='reuters.npz', num_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2, index_from=3, **kwargs): """Loads the Reuters newswire classification dataset. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). num_words: max number of words to include. Words are ranked by how often they occur (in the training set) and only the most frequent words are kept skip_top: skip the top N most frequently occurring words (which may not be informative). maxlen: truncate sequences after this length. test_split: Fraction of the dataset to be used as test data. seed: random seed for sample shuffling. start_char: The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character. oov_char: words that were cut out because of the `num_words` or `skip_top` limit will be replaced with this character. index_from: index actual words with this index and higher. **kwargs: Used for backwards compatibility. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. Note that the 'out of vocabulary' character is only used for words that were present in the training set but are not included because they're not making the `num_words` cut here. Words that were not seen in the training set but are in the test set have simply been skipped. """ # Legacy support if 'nb_words' in kwargs: logging.warning('The `nb_words` argument in `load_data` ' 'has been renamed `num_words`.') num_words = kwargs.pop('nb_words') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'reuters.npz', file_hash= 'd6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916') with np.load(path, allow_pickle=True) as f: xs, labels = f['x'], f['y'] np.random.seed(seed) indices = np.arange(len(xs)) np.random.shuffle(indices) xs = xs[indices] labels = labels[indices] if start_char is not None: xs = [[start_char] + [w + index_from for w in x] for x in xs] elif index_from: xs = [[w + index_from for w in x] for x in xs] if maxlen: xs, labels = _remove_long_seq(maxlen, xs, labels) if not num_words: num_words = max([max(x) for x in xs]) # by convention, use 2 as OOV word # reserve 'index_from' (=3 by default) characters: # 0 (padding), 1 (start), 2 (OOV) if oov_char is not None: xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs] else: xs = [[w for w in x if skip_top <= w < num_words] for x in xs] idx = int(len(xs) * (1 - test_split)) x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) return (x_train, y_train), (x_test, y_test) @keras_export('keras.datasets.reuters.get_word_index') def get_word_index(path='reuters_word_index.json'): """Retrieves the dictionary mapping word indices back to words. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). Returns: The word index dictionary. """ origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'reuters_word_index.json', file_hash='4d44cc38712099c9e383dc6e5f11a921') with open(path) as f: return json.load(f)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/reuters.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities common to CIFAR10 and CIFAR100 datasets. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from six.moves import cPickle def load_batch(fpath, label_key='labels'): """Internal utility for parsing CIFAR data. Arguments: fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. Returns: A tuple `(data, labels)`. """ with open(fpath, 'rb') as f: if sys.version_info < (3,): d = cPickle.load(f) else: d = cPickle.load(f, encoding='bytes') # decode utf8 d_decoded = {} for k, v in d.items(): d_decoded[k.decode('utf8')] = v d = d_decoded data = d['data'] labels = d[label_key] data = data.reshape(data.shape[0], 3, 32, 32) return data, labels
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/cifar.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CIFAR10 small images classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.python.keras import backend as K from tensorflow.python.keras.datasets.cifar import load_batch from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.cifar10.load_data') def load_data(): """Loads CIFAR10 dataset. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ dirname = 'cifar-10-batches-py' origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' path = get_file( dirname, origin=origin, untar=True, file_hash= '6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce') num_train_samples = 50000 x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.empty((num_train_samples,), dtype='uint8') for i in range(1, 6): fpath = os.path.join(path, 'data_batch_' + str(i)) (x_train[(i - 1) * 10000:i * 10000, :, :, :], y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) fpath = os.path.join(path, 'test_batch') x_test, y_test = load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if K.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) x_test = x_test.astype(x_train.dtype) y_test = y_test.astype(y_train.dtype) return (x_train, y_train), (x_test, y_test)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/cifar10.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MNIST handwritten digits dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.mnist.load_data') def load_data(path='mnist.npz'): """Loads the MNIST dataset. Arguments: path: path where to cache the dataset locally (relative to ~/.keras/datasets). Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. License: Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset, which is a derivative work from original NIST datasets. MNIST dataset is made available under the terms of the [Creative Commons Attribution-Share Alike 3.0 license.]( https://creativecommons.org/licenses/by-sa/3.0/) """ origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'mnist.npz', file_hash= '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1') with np.load(path) as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] return (x_train, y_train), (x_test, y_test)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/datasets/mnist.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests Policies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.mixed_precision.experimental import policy as mp_policy from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.training.experimental import loss_scale as loss_scale_module from tensorflow.python.training.experimental import mixed_precision @test_util.run_all_in_graph_and_eager_modes class PolicyTest(test.TestCase): """Tests Policies.""" @testing_utils.enable_v2_dtype_behavior def test_dtype_attributes(self): policy = mp_policy.Policy('infer') self.assertEqual(policy.compute_dtype, None) self.assertEqual(policy.variable_dtype, None) policy = mp_policy.Policy('infer_float32_vars') self.assertEqual(policy.compute_dtype, None) self.assertEqual(policy.variable_dtype, 'float32') for dtype in 'int32', 'bool', 'float16', 'float32': policy = mp_policy.Policy(dtype) self.assertEqual(policy.name, dtype) self.assertEqual(policy.compute_dtype, dtype) self.assertEqual(policy.variable_dtype, dtype) policy = mp_policy.Policy(dtype + '_with_float32_vars') expected_name = ( dtype if dtype == 'float32' else dtype + '_with_float32_vars') self.assertEqual(policy.name, expected_name) self.assertEqual(policy.compute_dtype, dtype) self.assertEqual(policy.variable_dtype, 'float32') for dtype in 'float16', 'bfloat16': policy = mp_policy.Policy('mixed_' + dtype) self.assertEqual(policy.name, 'mixed_' + dtype) self.assertEqual(policy.compute_dtype, dtype) self.assertEqual(policy.variable_dtype, 'float32') @testing_utils.enable_v2_dtype_behavior def test_repr(self): for policy in ('infer', 'infer_with_float32_vars', 'float32', 'float16_with_float32_vars'): self.assertEqual(repr(mp_policy.Policy(policy)), '<Policy "%s", loss_scale=None>' % policy) self.assertEqual(repr(mp_policy.Policy('float32_with_float32_vars')), '<Policy "float32", loss_scale=None>') self.assertEqual(repr(mp_policy.Policy('float16', loss_scale=2)), '<Policy "float16", loss_scale=FixedLossScale(2.0)>') @testing_utils.enable_v2_dtype_behavior def test_policy_errors(self): # Test passing invalid strings expected_error = 'Cannot convert value %s to a mixed precision Policy.' for invalid_policy in ('abc', 'abc_with_float32_vars', 'float32_with_float16_vars'): with self.assertRaisesRegexp(ValueError, expected_error % invalid_policy): mp_policy.Policy(invalid_policy) # Test passing a DType with self.assertRaisesRegexp(TypeError, "'name' must be a string, not a DType. " "Instead, pass DType.name. Got: float16"): mp_policy.Policy(dtypes.float16) # Test passing a non-DType invalid type with self.assertRaisesRegexp(TypeError, "'name' must be a string, but got: 5"): mp_policy.Policy(5) @testing_utils.enable_v2_dtype_behavior def test_with_input_dtype(self): policy = mp_policy.with_input_dtype(mp_policy.Policy('infer'), 'float16') self.assertEqual(policy.compute_dtype, 'float16') self.assertEqual(policy.variable_dtype, 'float16') policy = mp_policy.with_input_dtype( mp_policy.Policy('infer_with_float32_vars'), 'float16') self.assertEqual(policy.compute_dtype, 'float16') self.assertEqual(policy.variable_dtype, 'float32') policy = mp_policy.with_input_dtype( mp_policy.Policy('infer_with_float32_vars'), 'float32') self.assertEqual(policy.compute_dtype, 'float32') self.assertEqual(policy.variable_dtype, 'float32') @testing_utils.enable_v2_dtype_behavior def test_loss_scale(self): policy = mp_policy.Policy('float32') self.assertEqual(policy.loss_scale, None) policy = mp_policy.Policy('float32', loss_scale=None) self.assertEqual(policy.loss_scale, None) ls = loss_scale_module.DynamicLossScale() policy = mp_policy.Policy('float32', loss_scale=ls) self.assertIs(policy.loss_scale, ls) policy = mp_policy.Policy('float32', loss_scale='dynamic') self.assertIsInstance(policy.loss_scale, loss_scale_module.DynamicLossScale) policy = mp_policy.Policy('mixed_float16') self.assertIsInstance(policy.loss_scale, loss_scale_module.DynamicLossScale) policy = mp_policy.Policy('mixed_float16', loss_scale=None) self.assertEqual(policy.loss_scale, None) policy = mp_policy.Policy('mixed_bfloat16') self.assertEqual(policy.loss_scale, None) @testing_utils.enable_v2_dtype_behavior def test_global_policy(self): if base_layer_utils.v2_dtype_behavior_enabled(): default_policy = 'float32' else: default_policy = 'infer' self.assertEqual(mp_policy.global_policy().name, default_policy) try: mp_policy.set_policy('infer_with_float32_vars') self.assertEqual(mp_policy.global_policy().name, 'infer_with_float32_vars') with ops.Graph().as_default(): # Policies are not associated with a graph self.assertEqual(mp_policy.global_policy().name, 'infer_with_float32_vars') mp_policy.set_policy('infer') self.assertEqual(mp_policy.global_policy().name, 'infer') policy = mp_policy.Policy('infer_with_float32_vars') mp_policy.set_policy(policy) self.assertIs(mp_policy.global_policy(), policy) finally: mp_policy.set_policy(None) @testing_utils.enable_v2_dtype_behavior def test_loss_scale_warning(self): with test.mock.patch.object(tf_logging, 'warn') as mock_warn: mp_policy.Policy('float32', loss_scale=2.) self.assertEqual( mock_warn.call_args[0][0], 'Creating a Policy with a loss scale is only useful for float16 ' 'policies. You passed loss_scale=2.0 for policy float32. Consider ' 'not passing any loss_scale instead.') for policy_name in 'float16', 'float16_with_float32_vars', 'mixed_float16': with test.mock.patch.object(tf_logging, 'warn') as mock_warn: mp_policy.Policy(policy_name, loss_scale=2.) mock_warn.assert_not_called() @testing_utils.enable_v2_dtype_behavior def test_policy_scope(self): if base_layer_utils.v2_dtype_behavior_enabled(): default_policy = 'float32' else: default_policy = 'infer' with mp_policy.policy_scope('infer_with_float32_vars'): self.assertEqual(mp_policy.global_policy().name, 'infer_with_float32_vars') with mp_policy.policy_scope('infer'): self.assertEqual(mp_policy.global_policy().name, 'infer') self.assertEqual(mp_policy.global_policy().name, 'infer_with_float32_vars') self.assertEqual(mp_policy.global_policy().name, default_policy) @testing_utils.enable_v2_dtype_behavior def test_error_if_graph_rewrite_enabled(self): try: mixed_precision.enable_mixed_precision_graph_rewrite( gradient_descent.SGD(1.)) with self.assertRaisesRegexp( ValueError, 'the mixed precision graph rewrite has already been ' 'enabled'): mp_policy.set_policy('infer_float32_vars') finally: mixed_precision.disable_mixed_precision_graph_rewrite() @testing_utils.disable_v2_dtype_behavior def test_v1_dtype_behavior(self): # These policies are allowed with V1 dtype behavior with mp_policy.policy_scope(mp_policy.Policy('infer')): pass with mp_policy.policy_scope(mp_policy.Policy('infer_float32_vars')): pass # These policies are not allowed with V1 dtype behavior with self.assertRaisesRegexp( ValueError, 'global policy can only be set to a non-infer policy in TensorFlow 2'): with mp_policy.policy_scope(mp_policy.Policy('float32')): pass with self.assertRaisesRegexp( ValueError, 'global policy can only be set to a non-infer policy in TensorFlow 2'): with mp_policy.policy_scope( mp_policy.Policy('float16_with_float32_vars')): pass if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/policy_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the Policy class for mixed precision training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import six from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.platform import tf_logging from tensorflow.python.training.experimental import loss_scale as loss_scale_module from tensorflow.python.training.experimental import mixed_precision_global_state from tensorflow.python.util.tf_export import keras_export # Default value of certain arguments, indicating the default behavior for # that argument should be used. USE_DEFAULT = 'USE_DEFAULT' @keras_export('keras.mixed_precision.experimental.Policy') class Policy(object): """A dtype policy for a Keras layer. A dtype policy determines dtype-related aspects of a layer, such as its computation and variable dtypes. Each layer has a policy. Policies can be passed to the 'dtype' argument of layer constructors, or a global policy can be set with 'tf.keras.mixed_precision.experimental.set_policy'. A layer will default to the global policy if no policy is passed to it's constructor. For most models, each layer will have the same computation dtype and variable dtype, which will typically be float32. However, when mixed precision training is used, most layers will instead have a float16 computation dtype and a float32 variable dtype. See [this link](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) for more information on mixed precision training. When the variable dtype does not match the computation dtype, variables will be automatically casted to the computation dtype to avoid type errors. Policies also have a `tf.train.experimental.LossScale` instance, which is used by Models to performance loss scaling. Layers which are not Models ignore the loss scale. Policies are constructed by passing a string to the constructor, e.g. `tf.keras.mixed_precision.experimental.Policy('float32')`. The string determines the compute and variable dtypes. Currently, it can be one of in one of the following forms: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. * '<dtype>_with_float32_vars', where <dtype> is any dtype. The compute dtype will be <dtype>, while the variable dtype is float32. This can be used for mixed precision, which uses float16 or bfloat16 for most computations, and float32 for variables, but it is recommended to use the 'mixed_float16' or 'mixed_bfloat16' policies instead. * 'mixed_float16' or 'mixed_bfloat16': Similar to 'float16_with_float32_vars' or 'bfloat16_with_float32_vars' respectively. 'mixed_float16' is identical to 'float16_with_float32_vars' except the loss_scale is dynamic by default. 'mixed_bfloat16' is currently identical to 'bfloat16_with_float32_vars'. More changes may be added to these mixed policies in the future, to further differentiate them from [b]float16_with_float32_vars. ### How to use mixed precision in layers with Policies To use mixed precision in a model, the 'mixed_float16' policy can be used. `tf.keras.mixed_precision.experimental.set_policy` can be used to set the default policy for layers if no policy is passed to them. For example: ```python tf.keras.mixed_precision.experimental.set_policy('mixed_float16') model = tf.keras.models.Sequential( tf.keras.layers.Input((100,)), # Dense layers use global policy of 'mixed_float16', which does # computations in float16 while keeping variables in float32. tf.keras.layers.Dense(10), tf.keras.layers.Dense(10), # Softmax should be done in float32 for numeric stability. We pass # dtype='float32' to use float32 instead of the global policy. tf.keras.layers.Activation('Softmax', dtype='float32') ) model.fit(...) # Train `model` ``` Alternatively, the policy can be passed to individual layers instead of setting the global policy with `set_policy`: ```python policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') model = tf.keras.models.Sequential( tf.keras.layers.Input((100,)), tf.keras.layers.Dense(10, dtype=policy), tf.keras.layers.Dense(10, dtype=policy), # Softmax should be done in float32 for numeric stability. tf.keras.layers.Activation('Softmax', dtype='float32') ) model.fit(...) # Train `model` ``` As the above example shows, strings can be directly passed to layer constructors in the `dtype` argument instead of policies, but only if the string is convertible to a dtype. ### The deprecated "infer" policy In addition to a dtype or "<dtype>_with_float32_vars", a policy can also be "infer". This Policy is deprecated, and it is not recommended. When a layer has an infer policy, it will infer the computation and variable dtype from the first input the first time the layer is called. Once the layer is called for the first time, the layer's policy will change to the dtype of the first input. Similarly to "infer", there is a deprecated "infer_with_float32_vars" policy that infers the compute dtype, but not the variable dtype. In TensorFlow 1, only the "infer" and "infer_with_float32_vars" policies are available. """ # TODO(reedwm): Replace link in above docstring with a version that is more # TensorFlow-specific, and that also mentions bfloat16. def __init__(self, name, loss_scale=USE_DEFAULT): """Constructs the policy. The `name` argument determines the compute and variable dtype, and has no additional effect on the Policy. The compute and variable dtypes can only be specified through `name`, and cannot be specified directly. Args: name: A string. Can be one of the following values: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. * '<dtype>_with_float32_vars', where <dtype> is any dtype. The compute dtype will be <dtype>, while the variable dtype is float32. This can be used for mixed precision, which uses float16 or bfloat16 for most computations, and float32 for variables, but it is recommended to use the 'mixed_float16' or 'mixed_bfloat16' policies instead. * 'mixed_float16' or 'mixed_bfloat16': Similar to 'float16_with_float32_vars' or 'bfloat16_with_float32_vars' respectively. 'mixed_float16' is identical to 'float16_with_float32_vars' except the loss_scale is dynamic by default. 'mixed_bfloat16' is currently identical to 'bfloat16_with_float32_vars'. More changes may be added to these mixed policies in the future, to further differentiate them from [b]float16_with_float32_vars. * 'infer' or 'infer_with_float32_vars' (deprecated): Infer the computation dtype from the input dtype. loss_scale: A `tf.train.experimental.LossScale`, or a value convertible to one such as "dynamic". Defaults to using no loss scaling unless `name` is "mixed_float16", in which case this defaults to "dynamic". Only `tf.keras.Model`s, not layers, use the loss scale, and it is only used during `Model.fit` or `Model.train_on_batch`. """ if isinstance(name, dtypes.DType): raise TypeError("'name' must be a string, not a DType. " "Instead, pass DType.name. Got: %s" % (name.name,)) elif not isinstance(name, six.string_types): raise TypeError("'name' must be a string, but got: %s" % (name,)) if name == 'infer_float32_vars': # For backwards compatibility. TODO(reedwm): Remove this. name = 'infer_with_float32_vars' if name == 'float32_with_float32_vars': # Doesn't affect correctness, but causes "float32" instead of # "float32_with_float32_vars" to be printed in __repr__. name = 'float32' self._name = name self._compute_dtype, self._variable_dtype = self._parse_name(name) if loss_scale == USE_DEFAULT: loss_scale = 'dynamic' if name == 'mixed_float16' else None if loss_scale and self._compute_dtype not in (None, 'float16'): tf_logging.warn('Creating a Policy with a loss scale is only useful for ' 'float16 policies. You passed loss_scale=%r for policy ' '%s. Consider not passing any loss_scale instead.' % (loss_scale, name)) self._loss_scale = loss_scale_module.get(loss_scale) def _parse_name(self, name): """Parses a Policy name into a compute and variable dtype. Args: name: The name of the policy: Returns: The (compute_dtype, variable_dtype) pair. """ if name == 'mixed_float16': return 'float16', 'float32' elif name == 'mixed_bfloat16': return 'bfloat16', 'float32' if name.endswith('_with_float32_vars'): base_name = name[:-len('_with_float32_vars')] float32_vars = True else: base_name = name float32_vars = False if base_name == 'infer': base_dtype = None else: try: base_dtype = dtypes.as_dtype(base_name).name except TypeError: error = ('Cannot convert value %s to a mixed precision Policy. ' 'Valid policies include include those in the form "<dtype>" ' 'and "<dtype>_with_float32_vars", where <dtype> is the name ' 'of a dtype.' % (name,)) if float32_vars: error += (' The value %s ends with _with_float32_vars, but %s cannot ' 'be converted to a DType' % (name, base_name)) raise ValueError(error) if float32_vars: return base_dtype, 'float32' else: return base_dtype, base_dtype @property def variable_dtype(self): """The variable dtype of this policy. This is the dtype layers will create their variables in, unless a layer explicit chooses a different dtype. If this is different than `Policy.compute_dtype` and both are non-None, Layers will cast variables to the compute dtype to avoid type errors. If this is None, the policy is "infer" and the `compute_dtype` is also None. If `compute_dtype` is None, this is either None or float32. Returns: The variable dtype of this policy, or None if the variable dtype should be inferred from the inputs. """ return self._variable_dtype @property def compute_dtype(self): """The compute dtype of this policy. This is the dtype layers will do their computations in. If this is None, the policy is "infer" or "infer_with_float32_vars" and `variable_dtype` is either None or float32 respectively. Note that even if the compute dtype is float16 or bfloat16, hardware devices may not do individual adds, multiplies, and other fundamental operations in [b]float16, but instead may do some of them in float32 for numeric stability. The compute dtype is the dtype of the inputs and outputs of the TensorFlow ops that the layer executes. Internally, many TensorFlow ops will do certain internal calculations in float32, or some other device-internal intermediate format with higher precision than [b]float16, to increase numeric stability. For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul will do use float32 intermediate math. The performance benefit of float16 is still apparent, due to increased memory bandwidth and the fact GPUs have specialized hardware for computating matmuls on float16 while still keeping intermediate computations in float32. Returns: The variable dtype of this policy, or None if the variable dtype should be inferred from the inputs. """ return self._compute_dtype @property def should_cast_variables(self): """Returns True if variables should be casted. This is true if the variable dtype is not the same as the compute dtype. Returns: True, if variables should be casted. """ return self.variable_dtype != self.compute_dtype @property def loss_scale(self): """Returns the loss scale of this Policy. Returns: A `tf.train.experimental.LossScale`, or None. """ return self._loss_scale @property def name(self): """Returns the name of this policy.""" return self._name def __repr__(self): return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale) def with_input_dtype(policy, dtype): """Copies "infer" `policy`, adding `dtype` to it. Policy must be "infer" or "infer_float32_vars" (i.e., has no compute dtype). Returns a new policy with compute dtype `dtype`. The returned policy's variable dtype is also `dtype` if `policy` is "infer", and is `float32` if `policy` is "infer_with_float32_vars". Args: policy: An "infer" or "infer_float32_vars" policy dtype: The dtype of an input to a layer. Returns: A new policy copied from `policy`, but with compute dtype and maybe variable_dtype set to `dtype`. """ assert not policy.compute_dtype dtype = dtypes.as_dtype(dtype).name if policy.variable_dtype is None: return Policy(dtype) else: # Policies without a compute dtype are either "infer" or # "infer_with_float32_vars", so the variable_dtype must be float32 here. assert policy.variable_dtype == 'float32' return Policy(dtype + '_with_float32_vars') # The current global policy in effect. If None, it means the current value of # floatx should be used as the policy if the V2 dtype behavior is enabled, # or "infer" otherwise. # TODO(reedwm): Make this thread local? _global_policy = None @keras_export('keras.mixed_precision.experimental.global_policy') def global_policy(): """Returns the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no policy has been set with `keras.mixed_precision.experimental.set_policy`, this will return a policy constructed from `tf.keras.backend.floatx()` in TensorFlow 2, or an "infer" policy in TensorFlow 1. See `keras.mixed_precision.experimental.Policy` for more information. Returns: The global Policy. """ if _global_policy is None: if base_layer_utils.v2_dtype_behavior_enabled(): return Policy(backend.floatx()) else: return Policy('infer') return _global_policy def policy_defaults_to_floatx(): """Returns True if `global_policy()` will use the current value of floatx.""" return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled() def _check_if_mixed_precision_graph_rewrite_is_enabled(): # TODO(reedwm): Update this comment once the Keras API is complete. if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled: raise ValueError( 'The mixed precision policy cannot be set, because the mixed ' 'precision graph rewrite has already been enabled.\n' 'At most, one of the following functions can be called:\n\n' ' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() ' '(You called this first)\n' ' 2. tf.keras.mixed_precision.experimental.set_policy() (You called ' 'this second)\n\n' 'You called both functions, which is an error, because both functions ' 'enable you to use mixed precision. The first function enables mixed ' 'precision in the graph with a graph rewrite. However it is currently ' 'not very customizable, and does not support eager. The second ' 'function is for Keras layers, but is not yet fully complete.') @keras_export('keras.mixed_precision.experimental.set_policy') def set_policy(policy): """Sets the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no global policy is set, layers will instead default to a Policy constructed from `tf.keras.backend.floatx()` in TensorFlow 2. In TensorFlow 1, layers default to an "infer" policy. See `keras.mixed_precision.experimental.Policy` for more information. Args: policy: A Policy, or a string that will be converted to a Policy.. """ global _global_policy _check_if_mixed_precision_graph_rewrite_is_enabled() if policy is not None and not isinstance(policy, Policy): policy = Policy(policy) if (policy and not base_layer_utils.v2_dtype_behavior_enabled() and policy.compute_dtype): raise ValueError( 'The global policy can only be set to a non-infer policy in TensorFlow ' '2') _global_policy = policy mixed_precision_global_state.using_default_mixed_precision_policy = ( _global_policy is None) # TODO(reedwm): Make this thread local @contextlib.contextmanager def policy_scope(policy): """A context manager that sets the global Policy under it. Args: policy: A Policy, or a string that will be converted to a Policy.. Yields: Nothing. """ old_policy = _global_policy try: set_policy(policy) yield finally: set_policy(old_policy)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/policy.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mixed precision API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.mixed_precision.experimental.loss_scale_optimizer import LossScaleOptimizer from tensorflow.python.keras.mixed_precision.experimental.policy import global_policy from tensorflow.python.keras.mixed_precision.experimental.policy import Policy from tensorflow.python.keras.mixed_precision.experimental.policy import set_policy
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests mixed precision works correctly with Keras layers and models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.keras import backend from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import layers from tensorflow.python.keras import models from tensorflow.python.keras import optimizers from tensorflow.python.keras import regularizers from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.layers import core from tensorflow.python.keras.layers import recurrent from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer from tensorflow.python.keras.mixed_precision.experimental import policy from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training.experimental import loss_scale as loss_scale_module from tensorflow.python.training.tracking import util as trackable_utils from tensorflow.python.util import nest class AssertTypeLayer(base_layer.Layer): """A layer which asserts it's inputs are a certain type.""" def __init__(self, assert_type=None, **kwargs): self._assert_type = assert_type super(AssertTypeLayer, self).__init__(**kwargs) def assert_input_types(self, inputs): """Asserts `inputs` are of the correct type. Should be called in call().""" if self._assert_type: inputs_flattened = nest.flatten(inputs) for inp in inputs_flattened: assert inp.dtype.base_dtype == self._assert_type, ( 'Input tensor has type %s which does not match assert type %s' % (inp.dtype.name, self._assert_type.name)) class AddLayer(AssertTypeLayer): """A layer which adds it's input to a scalar variable.""" def __init__(self, regularizer=None, use_operator=False, var_name='v', **kwargs): """Initializes the AddLayer. Args: regularizer: The regularizer on the scalar variable. use_operator: If True, add using the + operator. If False, add using tf.add. var_name: The name of the variable. It can be useful to pass a name other than 'v', to test having the attribute name (self.v) being different from the variable name. **kwargs: Passed to AssertTypeLayer constructor. """ self._regularizer = regularizer self._use_operator = use_operator self._var_name = var_name super(AddLayer, self).__init__(**kwargs) def build(self, _): self.v = self.add_weight( self._var_name, (), initializer='ones', regularizer=self._regularizer) self.built = True def call(self, inputs): self.assert_input_types(inputs) assert inputs.dtype == self.v.dtype return self._add(inputs, self.v) def _add(self, x, y): if self._use_operator: return x + y else: return math_ops.add(x, y) class AddLayerWithoutAutoCast(AddLayer): """Same as AddLayer, but does not use AutoCastVariables.""" def build(self, _): dtype = self.dtype if dtype in ('float16', 'bfloat16'): dtype = 'float32' self.v = self.add_weight( 'v', (), initializer='ones', dtype=dtype, experimental_autocast=False, regularizer=self._regularizer) self.built = True def call(self, inputs): self.assert_input_types(inputs) assert self.v.dtype in (dtypes.float32, dtypes.float64) return self._add(inputs, math_ops.cast(self.v, inputs.dtype)) class AddLayerWithFunction(AddLayer): """Same as AddLayer, but _add is decorated with a tf.function.""" @def_function.function def _add(self, x, y): return super(AddLayerWithFunction, self)._add(x, y) class IdentityRegularizer(regularizers.Regularizer): def __call__(self, x): assert x.dtype == dtypes.float32 return array_ops.identity(x) # If called outside any strategy.scope() calls, this will return the default # strategy. default_strategy_fn = distribution_strategy_context.get_strategy def create_mirrored_strategy(): if context.num_gpus() >= 1: return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0']) else: return mirrored_strategy.MirroredStrategy(['cpu:0']) TESTCASES = ({ 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy }) class KerasLayerTest(keras_parameterized.TestCase): """Test mixed precision with Keras layers.""" @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_infer_with_float32_vars(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(), policy.policy_scope('infer_float32_vars'): layer = AddLayer(assert_type=dtypes.float16) self.assertEqual(layer.dtype, dtypes.float32) y = layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) self.assertEqual(y.dtype, dtypes.float16) self.assertEqual(layer.dtype, dtypes.float32) self.assertEqual(layer._dtype_policy._name, 'float16_with_float32_vars') self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(y), 2.) if base_layer_utils.v2_dtype_behavior_enabled(): # Layer should now cast inputs to float16 x = constant_op.constant([1.], dtype=dtypes.float32) y = layer(x) self.assertEqual(y.dtype, dtypes.float16) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes @testing_utils.enable_v2_dtype_behavior def test_floating_point_policies_with_float32_vars(self, strategy_fn): for dtype in 'bfloat16', 'float16', 'float64': x = constant_op.constant([1.]) policy_name = dtype + '_with_float32_vars' with strategy_fn().scope(), policy.policy_scope(policy_name): layer = AddLayer(assert_type=dtype) self.assertEqual(layer.dtype, dtypes.float32) self.assertEqual(layer._dtype_policy._name, policy_name) y = layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) self.assertEqual(y.dtype, dtype) self.assertEqual(layer.dtype, dtypes.float32) self.assertEqual(layer._dtype_policy._name, policy_name) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(y), 2.) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes @testing_utils.enable_v2_dtype_behavior def test_int32_with_float32_vars(self, strategy_fn): # The policy int32_with_float32_vars is not useful at all (nor is any other # non-float policy with float32 variables), but we have it for consistency, # and so we test it. class IdentityLayerWithVar(base_layer.Layer): def build(self, _): self.v = self.add_weight('v', ()) def call(self, inputs): # Variables are only casted to other floats, not ints assert array_ops.identity(self.v).dtype == 'float32' return array_ops.identity(inputs) x = constant_op.constant([1]) with strategy_fn().scope(), policy.policy_scope('int32_with_float32_vars'): layer = IdentityLayerWithVar() self.assertEqual(layer.dtype, dtypes.float32) self.assertEqual(layer._dtype_policy._name, 'int32_with_float32_vars') y = layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) self.assertEqual(y.dtype, dtypes.int32) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_layer_with_non_autocast_variable(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): layer = AddLayerWithoutAutoCast(assert_type=dtypes.float16) y = layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) self.assertEqual(y.dtype, dtypes.float16) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(y), 2.) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_layer_calling_tf_function(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): layer = AddLayerWithFunction(assert_type=dtypes.float16) y = layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) self.assertEqual(y.dtype, dtypes.float16) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(y), 2.) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_layer_regularizer_runs_in_var_dtype(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): # Test on AddLayer layer = AddLayer( assert_type=dtypes.float16, regularizer=IdentityRegularizer()) layer(x) (regularizer_loss,) = layer.losses self.assertEqual(regularizer_loss.dtype, dtypes.float32) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(regularizer_loss), 1.) # Test on AddLayerWithoutAutoCast layer = AddLayerWithoutAutoCast( assert_type=dtypes.float16, regularizer=IdentityRegularizer()) layer(x) (regularizer_loss,) = layer.losses self.assertEqual(regularizer_loss.dtype, dtypes.float32) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(regularizer_loss), 1.) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_passing_policy_to_layer(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): # Passing a Policy to 'dtype' sets the policy for that layer. layer = AddLayer( assert_type=dtypes.float16, dtype=policy.Policy('infer_float32_vars')) # layer.dtype refers to the variable dtype self.assertEqual(layer.dtype, dtypes.float32) layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) with policy.policy_scope('infer_float32_vars'): # Passing a Policy to dtype overrides the global Policy layer = AddLayer( assert_type=dtypes.float16, dtype=policy.Policy('infer')) # layer dtype is not yet known self.assertEqual(layer.dtype, None) layer(x) self.assertEqual(layer.v.dtype, dtypes.float16) self.assertEqual(layer.dtype, dtypes.float16) @test_util.run_in_graph_and_eager_modes def test_error_passing_policy_string_to_layer(self): with self.assertRaisesRegexp( TypeError, "Cannot convert value 'float16_with_float32_vars' to a " "TensorFlow DType"): # This is not allowed, as otherwise a "float16_with_float32_vars" policy # could be created without an API call that has the name "experimental" in # it. AddLayer(dtype='float16_with_float32_vars') @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_gradient(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope() as strategy: with policy.policy_scope('infer_float32_vars'): layer = AddLayer(assert_type=dtypes.float16) def run_fn(): with backprop.GradientTape() as tape: y = layer(x) # Divide by num_replicas_in_sync, as the effective total loss is the # sum of each of the replica's losses. y /= strategy.num_replicas_in_sync # Learning rate is small enough that if applied to a float16 variable, # the variable will not change. So this tests the learning rate is not # applied to a float16 value, but instead the float32 variable. opt = gradient_descent.SGD(2**-14) grad = tape.gradient(y, layer.v) return opt.apply_gradients([(grad, layer.v)]) op = strategy.experimental_run(run_fn) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) self.evaluate(op) # The gradient with respective to the variable is 1. Since the # variable is initialized with 1 and the learning rate is 2**-14, the # new variable value should be: init_val - gradient * learning_rate, # which is 1 - 1 * 2**-14 self.assertEqual(self.evaluate(layer.v), 1 - 2**-14) def _test_checkpointing_layer_weights(self, strategy_fn, mixed_prec_when_saving, mixed_prec_when_loading): # In this test, we potentially save with mixed precision enabled and load # with mixed precision disabled, or vice versa. This is possible because # variables are float32 regardless of whether mixed precision is enabled. save_policy = 'infer_float32_vars' if mixed_prec_when_saving else 'infer' load_policy = 'infer_float32_vars' if mixed_prec_when_loading else 'infer' save_input_dtype = 'float16' if mixed_prec_when_saving else 'float32' load_input_dtype = 'float16' if mixed_prec_when_loading else 'float32' # Create a layer and save a checkpoint. x = constant_op.constant([1.], dtype=save_input_dtype) with strategy_fn().scope(): with policy.policy_scope(save_policy): layer = AddLayer(assert_type=save_input_dtype) layer(x) # Build layer layer.set_weights([np.array(100.)]) self.assertEqual(self.evaluate(layer(x)), 101.) checkpoint = trackable_utils.Checkpoint(layer=layer) prefix = os.path.join(self.get_temp_dir(), 'ckpt') save_path = checkpoint.save(prefix) # Create a new layer and restore the checkpoint. x = constant_op.constant([1.], dtype=load_input_dtype) with strategy_fn().scope(): with policy.policy_scope(load_policy): layer = AddLayer(assert_type=load_input_dtype) layer(x) # Build layer layer.set_weights([np.array(200.)]) self.assertEqual(self.evaluate(layer(x)), 201.) checkpoint = trackable_utils.Checkpoint(layer=layer) checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.assertEqual(layer.get_weights(), [100.]) self.assertEqual(self.evaluate(layer(x)), 101.) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_checkpointing_layer_weights(self, strategy_fn): self._test_checkpointing_layer_weights( strategy_fn, mixed_prec_when_saving=True, mixed_prec_when_loading=True) self._test_checkpointing_layer_weights( strategy_fn, mixed_prec_when_saving=True, mixed_prec_when_loading=False) self._test_checkpointing_layer_weights( strategy_fn, mixed_prec_when_saving=False, mixed_prec_when_loading=True) class KerasModelTest(keras_parameterized.TestCase): """Test mixed precision with Keras models.""" def _is_strategy_supported(self, strategy_fn, check_model_type=False): if (strategy_fn != default_strategy_fn and (testing_utils.should_run_eagerly() or (check_model_type and testing_utils.get_model_type() == 'subclass'))): # Distribution strategies do not support subclassed models or running with # `run_eagerly=True`. return False else: return True @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( { 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'operator', 'strategy_fn': create_mirrored_strategy, 'use_operator': True }, { 'testcase_name': 'regularizer', 'strategy_fn': create_mirrored_strategy, 'use_regularizer': True }, { 'testcase_name': 'infer', 'strategy_fn': create_mirrored_strategy, 'policy_name': 'mixed_float16' }, { 'testcase_name': 'norun_distributed', 'strategy_fn': create_mirrored_strategy, 'experimental_run_tf_function': False }) @testing_utils.enable_v2_dtype_behavior def test_model(self, strategy_fn, use_operator=False, use_regularizer=False, policy_name='mixed_float16', experimental_run_tf_function=True): if not self._is_strategy_supported(strategy_fn, check_model_type=True): return regularizer = IdentityRegularizer() if use_regularizer else None with strategy_fn().scope(): # Pass loss_scale=None, as this test will fail if the DynamicLossScale # skips applying gradients for a step with policy.policy_scope(policy.Policy(policy_name, loss_scale=None)): layer_list = [] if testing_utils.get_model_type() == 'subclass': # Subclassed models do not have an Input layer, so the model does not # cast inputs to the Input layer's dtype. Therefore, we need to # manually insert a float16 cast. cast_f16_layer = layers.Lambda( lambda x: math_ops.cast(x, 'float16'), input_shape=(1,)) layer_list.append(cast_f16_layer) layer = AddLayer( assert_type=dtypes.float16, use_operator=use_operator, regularizer=regularizer, input_shape=(1,)) cast_f32_layer = layers.Lambda(lambda x: math_ops.cast(x, 'float32')) layer_list += [layer, cast_f32_layer] model = testing_utils.get_model_from_layers( layer_list, input_shape=(1,), input_dtype=dtypes.float16) def loss_fn(y_true, y_pred): del y_true return math_ops.reduce_mean(y_pred) # Learning rate is small enough that if applied to a float16 variable, # the variable will not change. So this tests the learning rate not # applied to a float16 value, but instead the float32 variable. opt = gradient_descent.SGD(2**-14) model.compile( opt, loss=loss_fn, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((2, 1)) y = np.ones((2, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2) model.fit(dataset) # Variable starts at 1, and should have gradient of 2 ** -14 subtracted # from it. expected = 1 - 2**-14 if use_regularizer: # Regularizer adds another 2 ** -14 to the gradient. expected -= 2**-14 self.assertEqual(backend.eval(layer.v), expected) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( { 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'norun_distributed', 'strategy_fn': create_mirrored_strategy, 'experimental_run_tf_function': False, }) def test_fixed_loss_scaling(self, strategy_fn, experimental_run_tf_function=True): # Note: We do not test mixed precision in this method, only loss scaling. if not self._is_strategy_supported(strategy_fn): return loss_scale = 8. batch_size = 4 with strategy_fn().scope(): x = layers.Input(shape=(1,), batch_size=batch_size) layer = AddLayer() y = layer(x) # The gradient of 'y' at this point is 1. With loss scaling, the gradient # is 'loss_scale'. We divide by the batch size since the loss is averaged # across batch elements. expected_gradient = loss_scale / batch_size identity_with_grad_check_fn = ( mp_test_util.create_identity_with_grad_check_fn([expected_gradient])) y = core.Lambda(identity_with_grad_check_fn)(y) model = models.Model(inputs=x, outputs=y) def loss_fn(y_true, y_pred): del y_true return math_ops.reduce_mean(y_pred) opt = gradient_descent.SGD(1.) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) model.compile( opt, loss=loss_fn, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(backend.eval(layer.v), 1) x = np.ones((batch_size, 1)) y = np.ones((batch_size, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size) model.fit(dataset) # Variable starts at 1, and should have gradient of 1 subtracted from it. expected = 0 self.assertEqual(backend.eval(layer.v), expected) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( { 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'loss_scaling', 'strategy_fn': create_mirrored_strategy, 'use_loss_scaling': True }) @testing_utils.enable_v2_dtype_behavior def test_advanced_model(self, strategy_fn, use_loss_scaling=False): # The advanced model tests mixed-precision-related features that would occur # in a resnet50 model. It tests a model that has: # * Multiple layers, some which use auto-cast variables and some which do # not # * Regularization on some variables and not others. # * A fixed loss scale (if use_loss_scaling is True) if not self._is_strategy_supported(strategy_fn): return strategy = strategy_fn() if use_loss_scaling: loss_scale = 8. else: loss_scale = None learning_rate = 2**-14 with strategy.scope(): with policy.policy_scope(policy.Policy('mixed_float16', loss_scale=loss_scale)): x = layers.Input(shape=(1,), batch_size=2) layer1 = AddLayer( assert_type=dtypes.float16, regularizer=IdentityRegularizer(), use_operator=True) layer2 = AddLayerWithoutAutoCast( assert_type=dtypes.float16, use_operator=True) layer3 = AddLayer(assert_type=dtypes.float16, use_operator=False) layer4 = AddLayerWithoutAutoCast( assert_type=dtypes.float16, regularizer=IdentityRegularizer(), use_operator=False) y = layer1(x) y = layer2(y) y = layer3(y) y = layer4(y) if use_loss_scaling: # The gradient of 'y' at this point is 1. With loss scaling, the # gradient is 'loss_scale'. We divide by the batch size of 2 since the # loss is averaged across batch elements. expected_gradient = loss_scale / 2 identity_with_grad_check_fn = ( mp_test_util.create_identity_with_grad_check_fn( expected_dtype=dtypes.float16, expected_gradient=[expected_gradient])) y = core.Lambda(identity_with_grad_check_fn)(y) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) def loss_fn(y_true, y_pred): self.assertEqual(y_true.dtype, dtypes.float32) self.assertEqual(y_pred.dtype, dtypes.float32) return math_ops.reduce_mean(y_pred) opt = gradient_descent.SGD(learning_rate) model.compile( opt, loss=loss_fn, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) x = np.ones((2, 1)) y = np.ones((2, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2) model.fit(dataset) for layer in (layer1, layer2, layer3, layer4): if layer.losses: # Layer has weight regularizer self.assertEqual(backend.eval(layer.v), 1 - 2 * learning_rate) else: # Layer does not have weight regularizer self.assertEqual(backend.eval(layer.v), 1 - learning_rate) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( { 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'pass_loss_scale_to_policy', 'strategy_fn': create_mirrored_strategy, 'pass_loss_scale_to_policy': True, }, { 'testcase_name': 'norun_distributed', 'strategy_fn': create_mirrored_strategy, 'experimental_run_tf_function': False, }) def test_dynamic_loss_scaling(self, strategy_fn, pass_loss_scale_to_policy=False, experimental_run_tf_function=True): if not self._is_strategy_supported(strategy_fn): return strategy = strategy_fn() initial_loss_scale = 2. batch_size = 4 loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=initial_loss_scale, increment_period=2) expected_gradient = backend.variable([initial_loss_scale / batch_size], dtype=dtypes.float16) # If this variable is set to True, the model below will have NaN gradients have_nan_gradients = backend.variable(False, dtype=dtypes.bool) with strategy.scope(): opt = gradient_descent.SGD(1.) if pass_loss_scale_to_policy: p = policy.Policy('infer_float32_vars', loss_scale=loss_scale) else: p = policy.Policy('infer_float32_vars') opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) with policy.policy_scope(p): x = layers.Input( shape=(1,), batch_size=batch_size, dtype=dtypes.float16) layer = AddLayer(assert_type=dtypes.float16) y = layer(x) identity_with_nan_grads = ( mp_test_util.create_identity_with_nan_gradients_fn( have_nan_gradients)) y = core.Lambda(identity_with_nan_grads)(y) identity_with_grad_check_fn = ( mp_test_util.create_identity_with_grad_check_fn( expected_dtype=dtypes.float16, expected_gradient=expected_gradient)) y = core.Lambda(identity_with_grad_check_fn)(y) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) def loss_fn(y_true, y_pred): del y_true return math_ops.reduce_mean(y_pred) model.compile( opt, loss=loss_fn, run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(backend.eval(layer.v), 1) x = np.ones((batch_size, 1)) y = np.ones((batch_size, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size) model.fit(dataset) # The variables starts with 1 and has a gradient of 1, so will go down by 1 # each step. self.assertEqual(backend.eval(layer.v), 0) model.fit(dataset) self.assertEqual(backend.eval(layer.v), -1) # There have been two steps without NaNs, so the loss scale will double backend.set_value(expected_gradient, backend.get_value(expected_gradient * 2)) model.fit(dataset) self.assertEqual(backend.eval(layer.v), -2) # Next test with NaN gradients. backend.set_value(have_nan_gradients, True) model.fit(dataset) # Variable should not be updated self.assertEqual(backend.eval(layer.v), -2) # Test with finite gradients again backend.set_value(have_nan_gradients, False) # The loss scale will be halved due to the NaNs, so the gradient will also # be halved backend.set_value(expected_gradient, backend.get_value(expected_gradient / 2)) model.fit(dataset) self.assertEqual(backend.eval(layer.v), -3) @test_util.run_in_graph_and_eager_modes @testing_utils.enable_v2_dtype_behavior def test_loss_scale_optimizer_overrides_policy_loss_scale(self): with policy.policy_scope(policy.Policy('float32', loss_scale=10.)): opt = gradient_descent.SGD(1.) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=5.) x = layers.Input(shape=(1,)) y = AddLayer()(x) model = models.Model(x, y) model.compile(opt, loss='mse') self.assertEqual(self.evaluate(model.optimizer.loss_scale()), 5.) @test_util.run_in_graph_and_eager_modes @testing_utils.enable_v2_dtype_behavior def test_pass_invalid_optimizer_with_loss_scaling(self): with policy.policy_scope(policy.Policy('float32', loss_scale=10.)): x = layers.Input(shape=(1,)) y = AddLayer()(x) model = models.Model(x, y) with self.assertRaisesRegexp(ValueError, 'optimizer" must be an instance of '): model.compile(optimizers.SGD(1.), 'mse') @test_util.run_in_graph_and_eager_modes @testing_utils.enable_v2_dtype_behavior def test_functional_model_loss_dtype(self): with policy.policy_scope('float16'): x = layers.Input(shape=(1,)) y = AddLayer()(x) model = models.Model(x, y) model.add_loss(math_ops.cast(y, 'float32')) # The loss should not be casted to the policy's dtype. self.assertEqual(model.losses[0].dtype, 'float32') @parameterized.named_parameters( { 'testcase_name': 'base', 'strategy_fn': default_strategy_fn, }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'base_h5', 'strategy_fn': default_strategy_fn, 'h5': True, }, { 'testcase_name': 'distribute_h5', 'strategy_fn': create_mirrored_strategy, 'h5': True, }) @test_util.run_in_graph_and_eager_modes def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False): with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16) layer = AddLayer(assert_type=dtypes.float16) y = layer(x) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) model.set_weights([np.array(100.)]) x = np.ones((2, 1), dtype=np.float16) self.assertAllClose(backend.get_value(model(x)), x + 100.) suffix = '.h5' if h5 else '' weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix) model.save_weights(weights_file) model.set_weights([np.array(200.)]) self.assertAllClose(backend.get_value(model(x)), x + 200.) model.load_weights(weights_file) self.assertAllClose(backend.get_value(model(x)), x + 100.) self.assertEqual(model.get_weights(), [np.array(100.)]) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( { 'testcase_name': 'base', 'strategy_fn': default_strategy_fn, }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'different_var_name', 'strategy_fn': default_strategy_fn, 'var_name': 'w' }, { 'testcase_name': 'different_var_name_distribute', 'strategy_fn': create_mirrored_strategy, 'var_name': 'w' }) def test_save_slot_variables_with_autocast_vars(self, strategy_fn, var_name='v'): if not self._is_strategy_supported(strategy_fn): return with strategy_fn().scope(), policy.policy_scope('infer_float32_vars'): x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float16) # Having a var_name other than 'v' tests that a fixed bug (b/134713714) # does not reoccur. The bug was that a crash would occur when saving a # checkpoint where an AutoCastVariable with a slot variable would have a # different name than the layer attribute's name (layer.v in this case). layer = AddLayer(assert_type=dtypes.float16, var_name=var_name) y = layer(x) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) opt = gradient_descent.SGD(1., 1.) model.compile( optimizer=opt, loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2) weights_file = os.path.join(self.get_temp_dir(), 'weights') model.save_weights(weights_file) saved_slot = backend.get_value(opt.get_slot(layer.v, 'momentum')) model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2) new_slot = backend.get_value(opt.get_slot(layer.v, 'momentum')) self.assertNotEqual(new_slot, saved_slot) model.load_weights(weights_file) restored_slot = backend.get_value(opt.get_slot(layer.v, 'momentum')) self.assertEqual(restored_slot, saved_slot) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters(*TESTCASES) def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn): if not self._is_strategy_supported(strategy_fn): return strategy = strategy_fn() if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and not context.executing_eagerly()): # TODO(b/121381184): Enable running the test in this case. return # Create and run model. with strategy.scope(): x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float32) y = AddLayer(assert_type=dtypes.float32)(x) model = models.Model(inputs=x, outputs=y) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=1., increment_period=2., multiplier=2.) opt = gradient_descent.SGD(1.) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) model.compile( optimizer=opt, loss='mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # Run for 3 steps (6 examples with a batch size of 2) model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2) self.assertEqual(backend.get_value(loss_scale()), 2) self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1) # Save model weights. save_prefix = os.path.join(self.get_temp_dir(), 'ckpt') model.save_weights(save_prefix) # Run model again for 1 step (2 examples with a batch size of 2) model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2) self.assertEqual(backend.get_value(loss_scale()), 4) self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0) # Load model weights and ensure loss scale weights are restored. model.load_weights(save_prefix) self.assertEqual(backend.get_value(loss_scale()), 2) self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1) class RnnTest(keras_parameterized.TestCase): """Test mixed precision with RNNs.""" # TODO(b/136512020): Support and test recurrent_v2.GRU. @parameterized.named_parameters( { 'testcase_name': 'base_simple', 'strategy_fn': default_strategy_fn, 'rnn_class': recurrent.SimpleRNN, }, { 'testcase_name': 'distribute_simple', 'strategy_fn': create_mirrored_strategy, 'rnn_class': recurrent.SimpleRNN, }, { 'testcase_name': 'base_gru', 'strategy_fn': default_strategy_fn, 'rnn_class': recurrent.GRU, }, { 'testcase_name': 'distribute_gru', 'strategy_fn': create_mirrored_strategy, 'rnn_class': recurrent.GRU, }) @test_util.run_in_graph_and_eager_modes # RNNs do not work properly with GradientTape in graph mode when V1 control # flow is used. @test_util.enable_control_flow_v2 def test_rnn(self, strategy_fn, rnn_class): x = array_ops.ones((2, 3, 4), dtype=dtypes.float16) strategy = strategy_fn() with strategy.scope(), policy.policy_scope('infer_float32_vars'): layer = rnn_class(units=4) def run_fn(): with backprop.GradientTape() as tape: y = layer(x) self.assertEqual(y.dtype, dtypes.float16) opt = gradient_descent.SGD(1.) grads = tape.gradient(y, layer.trainable_weights) return opt.apply_gradients(zip(grads, layer.trainable_weights)) op = strategy.experimental_run(run_fn) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) self.evaluate(op) for v in layer.weights: self.assertEqual(v.dtype, dtypes.float32) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/keras_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains testing utilities related to mixed precision.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import custom_gradient def create_identity_with_grad_check_fn(expected_gradient, expected_dtype=None): """Returns a function that asserts it's gradient has a certain value. This serves as a hook to assert intermediate gradients have a certain value. This returns an identity function. The identity's gradient function is also the identity function, except it asserts that the gradient equals `expected_gradient` and has dtype `expected_dtype`. Args: expected_gradient: The gradient function asserts that the gradient is this value. expected_dtype: The gradient function asserts the gradient has this dtype. Returns: An identity function whose gradient function asserts the gradient has a certain value. """ @custom_gradient.custom_gradient def identity_with_grad_check(x): """Function that asserts it's gradient has a certain value.""" x = array_ops.identity(x) def grad(dx): if expected_dtype: assert dx.dtype == expected_dtype, ( 'dx.dtype should be %s but is: %s' % (expected_dtype, dx.dtype)) expected_tensor = ops.convert_to_tensor(expected_gradient, dtype=dx.dtype, name='expected_gradient') assert_op = check_ops.assert_equal(dx, expected_tensor) with ops.control_dependencies([assert_op]): dx = array_ops.identity(dx) return dx return x, grad return identity_with_grad_check def create_identity_with_nan_gradients_fn(have_nan_gradients): """Returns a function that optionally has NaN gradients. This serves as a hook to introduce NaN gradients to a model. This returns an identity function. The identity's gradient function will check if the boolean tensor `have_nan_gradients` is True. If so, the gradient will be NaN. Otherwise, the gradient will also be the identity. Args: have_nan_gradients: A scalar boolean tensor. If True, gradients will be NaN. Otherwise, the gradient function is the identity function. Returns: An identity function whose gradient function will return NaNs, if `have_nan_gradients` is True. """ @custom_gradient.custom_gradient def identity_with_nan_gradients(x): """Function whose gradient is NaN iff `have_nan_gradients` is True.""" x = array_ops.identity(x) def grad(dx): # We need this control dependency, because otherwise the NaN could be # produced before `dx`. This in turn could cause the final gradient to be # produced because `dx`, causing the loss scale to be updated before `dx`, # which can cause `tf.assert_equal`s to fail. with ops.control_dependencies([dx]): nan_scalar = constant_op.constant(float('NaN'), dtype=dx.dtype) return control_flow_ops.cond( have_nan_gradients, lambda: array_ops.fill(array_ops.shape(dx), nan_scalar), lambda: dx ) return x, grad return identity_with_nan_gradients
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/test_util.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for LossScaleOptimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.ops import control_flow_v2_toggles from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training.experimental import loss_scale as loss_scale_module from tensorflow.python.training.tracking import util as trackable_utils # If called outside any strategy.scope() calls, this will return the default # strategy. default_strategy_fn = distribution_strategy_context.get_strategy def create_mirrored_strategy(): if context.num_gpus() >= 1: return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0']) else: return mirrored_strategy.MirroredStrategy(['cpu:0']) TESTCASES = ({ 'testcase_name': 'Base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'Distribute', 'strategy_fn': create_mirrored_strategy }) @test_util.with_control_flow_v2 class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase): def _run_if_in_graph_mode(self, val): # Running only in graph mode is useful, because optimizers sometimes return # a value that, in Graph mode, is runnable with self.evaluate. But in Eager # mode, the optimizer already does the computations and the return value # cannot be run. if not context.executing_eagerly(): self.evaluate(val) def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad): grad_check_fn = mp_test_util.create_identity_with_grad_check_fn( expected_grad) loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync return lambda: opt.minimize(loss, var_list=[var]) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testFixedLossScaleAppliedToLossWithMinimize(self, strategy_fn): with strategy_fn().scope() as strategy: var = variables.Variable([5.0]) opt = gradient_descent.SGD(2.0) loss_scale = 10. opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) # We need num_replicas_in_sync to divide loss_scale, otherwise loss_scale # / strategy.num_replicas_in_sync will not be exact, which could lead to # assertion failures due to rounding issues. self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0) run_fn = self._run_fn_with_grad_check( strategy, var, opt, loss_scale / strategy.num_replicas_in_sync) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # The loss is the identity of the variable. Therefore the gradient is 1, # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3 self.assertAllClose([3.], self.evaluate(var)) @test_util.deprecated_graph_mode_only def testFixedLossScaleAppliedToLossWithGetGradients(self): var = variables.Variable([2.0]) opt = gradient_descent.SGD(1.0) loss_scale = 10. opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(loss_scale) loss = grad_check_fn(var) run_op = opt.get_gradients(loss, [var]) self.evaluate(variables.global_variables_initializer()) # This will cause an assertion to run, as # mp_test_util.create_identity_with_grad_check_fn added an assertion op. self.evaluate(run_op) @test_util.run_in_graph_and_eager_modes def testGetScaledLoss(self): opt = gradient_descent.SGD(2.0) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2.) loss = ops.convert_to_tensor(5.) self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss))) self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)())) loss = ops.convert_to_tensor(5., dtype='float16') self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss))) self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)())) @test_util.run_in_graph_and_eager_modes def testGetUnscaledGradients(self): opt = gradient_descent.SGD(2.0) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2) scaled_grads = [ ops.convert_to_tensor(3.), None, ops.convert_to_tensor(-4., dtype='float16') ] grads = opt.get_unscaled_gradients(scaled_grads) grads = [self.evaluate(g) if g is not None else g for g in grads] self.assertEqual([1.5, None, -2.], grads) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testDynamicLossScale(self, strategy_fn): strategy = strategy_fn() learning_rate = 2. expected_gradient = resource_variable_ops.ResourceVariable( learning_rate / strategy.num_replicas_in_sync) with strategy.scope(): var = variables.Variable([5.0]) opt = gradient_descent.SGD(learning_rate) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=2, increment_period=1, multiplier=2) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) self.assertEqual( loss_scale.initial_loss_scale % strategy.num_replicas_in_sync, 0) run_fn = self._run_fn_with_grad_check(strategy, var, opt, expected_gradient) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # The loss is the identity of the variable. Therefore the gradient is 1, # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3 self.assertAllClose([3.], self.evaluate(var)) # Loss scale will be double, so the expected gradient is also doubled. self.evaluate(expected_gradient.assign( 2 * learning_rate / strategy.num_replicas_in_sync)) run_op = strategy.experimental_run(run_fn) self._run_if_in_graph_mode(run_op) # As before, the 2 is subtracted from the variable, making it's new value # 1. self.assertAllClose([1.], self.evaluate(var)) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testDynamicUpdate(self, strategy_fn): with strategy_fn().scope() as strategy: var = variables.Variable([1.0, 2.0]) opt = gradient_descent.SGD(1.0) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=2, increment_period=1, multiplier=2) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) # Test optimizer with finite gradients loss = lambda: var * 2.0 / strategy.num_replicas_in_sync run_fn = lambda: opt.minimize(loss, var_list=[var]) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # Gradient is 2, so variable will have 2 subtracted from it self.assertAllClose([-1.0, 0.0], self.evaluate(var)) # Loss scale has doubled from 2 to 4 self.assertEqual(4., self.evaluate(opt.loss_scale())) # Test optimizer with NaN gradients loss = lambda: var * float('NaN') run_fn = lambda: opt.minimize(loss, var_list=[var]) run_op = strategy.experimental_run(run_fn) self._run_if_in_graph_mode(run_op) # Variable should not change from before, due to NaN gradients. self.assertAllClose(self.evaluate(var), [-1.0, 0.0]) # Loss scale should half due to NaN gradients. self.assertEqual(2., self.evaluate(opt.loss_scale())) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testDynamicLossScaleWithFloat16Loss(self, strategy_fn): strategy = strategy_fn() learning_rate = 2. with strategy.scope(): var = variables.Variable([5.0]) opt = gradient_descent.SGD(learning_rate) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=2, increment_period=1, multiplier=2) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) def loss(): return math_ops.cast(var / strategy.num_replicas_in_sync, 'float16') run_fn = lambda: opt.minimize(loss, var_list=[var]) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # The loss is the identity of the variable. Therefore the gradient is 1, # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3 self.assertAllClose([3.], self.evaluate(var)) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testDynamicLossScaleWithSlots(self, strategy_fn): strategy_obj = strategy_fn() if (isinstance(strategy_obj, mirrored_strategy.MirroredStrategy) and control_flow_v2_toggles.control_flow_v2_enabled() and not context.executing_eagerly()): self.skipTest('b/138667997') with strategy_obj.scope() as strategy: var = variables.Variable([1.0, 2.0]) # An SGD optimizer with momentum has slot variables. opt = gradient_descent.SGD(1.0, momentum=1.) initial_loss_scale = 2. loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=initial_loss_scale, increment_period=1, multiplier=4) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) loss = lambda: var / strategy.num_replicas_in_sync run_fn = lambda: opt.minimize(loss, var_list=[var]) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # The momentum accumulator starts at 0 and the gradient is 1. The # accumulator is incremented by the gradient, so it is now 1. Then the # variable is subtracted by the accumulator, so the variable is subtracted # by 1. self.assertAllClose([0.0, 1.0], self.evaluate(var)) self.assertEqual(self.evaluate(opt.loss_scale()), initial_loss_scale * 4) run_op = strategy.experimental_run(run_fn) self._run_if_in_graph_mode(run_op) # The momentum accumulator was 1 before this step and the gradient is 1. # The accumulator is incremented by the gradient, so it is now 2. Then the # variable is subtracted by the accumulator, so the variable is subtracted # by 2. self.assertAllClose([-2., -1.], self.evaluate(var)) self.assertEqual(self.evaluate(opt.loss_scale()), initial_loss_scale * 16) @test_util.run_in_graph_and_eager_modes def testIterations(self): opt = gradient_descent.SGD(2.0) lso = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=10.) lso.iterations = 7 self.assertEqual(lso.iterations, 7) self.assertEqual(opt.iterations, 7) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testGettingAndSettingLearningRate(self, strategy_fn): with strategy_fn().scope() as strategy: var = variables.Variable([5.0]) opt = adam.Adam(learning_rate=1.0) loss = lambda: var * 2.0 run_fn = lambda: opt.minimize(loss, [var]) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) lr = self.evaluate(opt.lr) self.assertEqual(1.0, lr) opt.lr = 2.0 lr = self.evaluate(opt.lr) self.assertEqual(2.0, lr) self.evaluate(opt.lr.assign(3.0)) lr = self.evaluate(opt.lr) self.assertEqual(3.0, lr) with self.assertRaises(AttributeError): opt.not_an_attr += 3 @test_util.run_in_graph_and_eager_modes def testArbitraryAttributesNotExposed(self): opt = adam.Adam(learning_rate=1.0) # Test that Adam has attributes 'epsilon' and 'beta1' opt.epsilon # pylint: disable=pointless-statement opt.beta_1 # pylint: disable=pointless-statement opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=10.) # Test that attributes defined by OptimizerV2 subclasses are not exposed in # LossScaleOptimizer, and that the error message is sensible. with self.assertRaisesRegexp( AttributeError, "'LossScaleOptimizer' object has no attribute 'epsilon'"): opt.epsilon # pylint: disable=pointless-statement with self.assertRaisesRegexp( AttributeError, "'LossScaleOptimizer' object has no attribute 'beta_1'"): opt.beta_1 # pylint: disable=pointless-statement @test_util.run_in_graph_and_eager_modes def testApplyGradientsGetsUnwrappedTensors(self): # Tests that gradients passed to apply_gradients are not wrapped in a # DistributionStrategy wrapper, such as PerReplica, but instead are raw # Tensors. Optimizer subclasses that override apply_gradients() expect raw # Tensors, even though the base Optimizer can handle PerReplica gradients. outer_self = self class MyOptimizer(gradient_descent.SGD): def apply_gradients(self, grads_and_vars, name=None): for grad, _ in grads_and_vars: outer_self.assertIsInstance(grad, ops.Tensor) return super(MyOptimizer, self).apply_gradients(grads_and_vars, name) with create_mirrored_strategy().scope() as strategy: var = variables.Variable([5.0]) opt = MyOptimizer(learning_rate=1.0) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=1) loss = lambda: var * 2.0 run_fn = lambda: opt.minimize(loss, [var]) strategy.experimental_run(run_fn) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testCheckpoint(self, strategy_fn): strategy = strategy_fn() if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and not context.executing_eagerly()): # TODO(b/121381184): Enable running the test in this case. return with self.test_session(), strategy.scope(): # Build and run a simple model. var = variables.Variable([2.0]) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=1., increment_period=2., multiplier=2.) opt = gradient_descent.SGD(1., momentum=1.) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) run_fn = lambda: opt.minimize(lambda: var + 1., var_list=[var]) opt_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) self.assertEqual(self.evaluate(loss_scale()), 1.) self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1) slot_var = opt._optimizer.get_slot(var, 'momentum') slot_value = self.evaluate(slot_var).item() # Save a checkpoint. checkpoint = trackable_utils.Checkpoint(optimizer=opt, var=var) prefix = os.path.join(self.get_temp_dir(), 'ckpt') save_path = checkpoint.save(prefix) # Run model again. self.evaluate(strategy.experimental_run(run_fn)) self.assertEqual(self.evaluate(loss_scale()), 2.) self.assertEqual(self.evaluate(loss_scale._num_good_steps), 0) self.assertNotAlmostEqual(self.evaluate(slot_var).item(), slot_value) # Load checkpoint and ensure loss scale is back to it's original value. status = checkpoint.restore(save_path) status.assert_consumed() status.run_restore_ops() self.assertEqual(self.evaluate(loss_scale()), 1.) self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1) self.assertAlmostEqual(self.evaluate(slot_var).item(), slot_value) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for AutoCastVariable.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import numpy as np from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.mixed_precision.experimental import autocast_variable from tensorflow.python.ops import array_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training.tracking import util as trackable_utils TESTCASES = ({ 'testcase_name': 'base', 'distribute': False }, { 'testcase_name': 'distribute', 'distribute': True }) def get_distribute_scope(distribute): class DummyContextManager(object): def __enter__(self): pass def __exit__(self, *args): pass if distribute: return mirrored_strategy.MirroredStrategy(['cpu:0']).scope() else: return DummyContextManager() def get_autocast_var(var, distribute): if distribute: return autocast_variable.AutoCastDistributedVariable(var) else: return autocast_variable.AutoCastVariable(var) def get_var(val, dtype, name=None): return variables.VariableV1(val, use_resource=True, dtype=dtype, name=name) @test_util.run_all_in_graph_and_eager_modes class AutoCastVariableTest(test.TestCase, parameterized.TestCase): @parameterized.named_parameters(*TESTCASES) def test_read(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) # outside of auto cast scope. self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.value().dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(array_ops.identity(x).dtype, dtypes.float32) # within auto cast scope of different dtype with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual(x.dtype, dtypes.float16) self.assertEqual(x.value().dtype, dtypes.float16) self.assertEqual(x.read_value().dtype, dtypes.float16) self.assertEqual(array_ops.identity(x).dtype, dtypes.float16) # within auto cast scope of same dtype with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float32): self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.value().dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(array_ops.identity(x).dtype, dtypes.float32) @parameterized.named_parameters(*TESTCASES) def test_read_nested_scopes(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual(x.dtype, dtypes.float16) self.assertEqual(x.read_value().dtype, dtypes.float16) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float32): self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(x.dtype, dtypes.float16) self.assertEqual(x.read_value().dtype, dtypes.float16) @parameterized.named_parameters(*TESTCASES) def test_dtype_is_not_string(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.assertEqual(x.dtype, dtypes.float32) self.assertIsInstance(x.dtype, dtypes.DType) self.assertEqual(x.true_dtype, dtypes.float32) self.assertIsInstance(x.true_dtype, dtypes.DType) with ops.get_default_graph()._enable_auto_casting_variables('float16'): self.assertEqual(x.dtype, dtypes.float16) self.assertIsInstance(x.dtype, dtypes.DType) self.assertEqual(x.true_dtype, dtypes.float32) self.assertIsInstance(x.true_dtype, dtypes.DType) @parameterized.named_parameters(*TESTCASES) def test_operator_overloads(self, distribute): with get_distribute_scope(distribute): for read_dtype in (dtypes.float32, dtypes.float16): x = get_var(7., dtypes.float32) x = get_autocast_var(x, distribute) with ops.get_default_graph()._enable_auto_casting_variables( read_dtype): self.evaluate(x.initializer) self.assertAlmostEqual(8, self.evaluate(x + 1)) self.assertAlmostEqual(10, self.evaluate(3 + x)) self.assertAlmostEqual(14, self.evaluate(x + x)) self.assertAlmostEqual(5, self.evaluate(x - 2)) self.assertAlmostEqual(6, self.evaluate(13 - x)) self.assertAlmostEqual(0, self.evaluate(x - x)) self.assertAlmostEqual(14, self.evaluate(x * 2)) self.assertAlmostEqual(21, self.evaluate(3 * x)) self.assertAlmostEqual(49, self.evaluate(x * x)) self.assertAlmostEqual(3.5, self.evaluate(x / 2)) self.assertAlmostEqual(1.5, self.evaluate(10.5 / x)) self.assertAlmostEqual(3, self.evaluate(x // 2)) self.assertAlmostEqual(2, self.evaluate(15 // x)) if read_dtype == dtypes.float32: # The "mod" operator does not support float16 self.assertAlmostEqual(1, self.evaluate(x % 2)) self.assertAlmostEqual(2, self.evaluate(16 % x)) self.assertTrue(self.evaluate(x < 12)) self.assertTrue(self.evaluate(x <= 12)) self.assertFalse(self.evaluate(x > 12)) self.assertFalse(self.evaluate(x >= 12)) self.assertFalse(self.evaluate(12 < x)) self.assertFalse(self.evaluate(12 <= x)) self.assertTrue(self.evaluate(12 > x)) self.assertTrue(self.evaluate(12 >= x)) self.assertAlmostEqual(343, self.evaluate(pow(x, 3)), places=4) self.assertAlmostEqual(128, self.evaluate(pow(2, x)), places=4) self.assertAlmostEqual(-7, self.evaluate(-x)) self.assertAlmostEqual(7, self.evaluate(abs(x))) x = get_var([7, 8, 9], dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) self.assertEqual(self.evaluate(x[1]), 8) @parameterized.named_parameters(*TESTCASES) def test_assign(self, distribute): with get_distribute_scope(distribute): x = get_var(0., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) # outside of auto cast scope. v1 = constant_op.constant(3.14, dtype=dtypes.float32) v2 = constant_op.constant(3.14, dtype=dtypes.float16) def run_and_check(): # Assign float32 values self.assertAllClose(3.14, self.evaluate(x.assign(v1))) self.assertAllClose(3.14 * 2, self.evaluate(x.assign_add(v1))) self.assertAllClose(3.14, self.evaluate(x.assign_sub(v1))) # Attempt to assign float16 values with self.assertRaisesRegexp( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign(v2)) with self.assertRaisesRegexp( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign_add(v2)) with self.assertRaisesRegexp( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign_sub(v2)) # Assign Python floats self.assertAllClose(3.14, self.evaluate(x.assign(3.14))) self.assertAllClose(3.14 * 2, self.evaluate(x.assign_add(3.14))) self.assertAllClose(3.14, self.evaluate(x.assign_sub(3.14))) run_and_check() # reset x self.evaluate(x.assign(0.)) # within auto cast scope. with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): # assign still expect float32 value even if in float16 scope run_and_check() @parameterized.named_parameters(*TESTCASES) def test_assign_stays_in_true_dtype(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) # small_val is a value such that 1.0 + small_val == 1.0 in fp16, but not # in fp32 small_val = np.finfo('float16').eps / 2 small_tensor = constant_op.constant(small_val, dtype=dtypes.float32) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): # Variable should be increased, despite it appearing to be the same # float16 value. self.assertEqual(1. + small_val, self.evaluate(x.assign(1. + small_tensor))) self.assertEqual(1., self.evaluate(x.value())) self.assertEqual(1. + small_val, self.evaluate(x.value())) self.evaluate(x.assign(1.)) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual(1. + small_val, self.evaluate(x.assign_add(small_tensor))) self.assertEqual(1., self.evaluate(x.value())) self.assertEqual(1. + small_val, self.evaluate(x.value())) @parameterized.named_parameters(*TESTCASES) def test_checkpoint(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) self.evaluate(x.assign(123.)) checkpoint = trackable_utils.Checkpoint(x=x) prefix = os.path.join(self.get_temp_dir(), 'ckpt') save_path = checkpoint.save(prefix) self.evaluate(x.assign(234.)) checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.assertEqual(self.evaluate(x), 123.) @parameterized.named_parameters(*TESTCASES) def test_invalid_wrapped_variable(self, distribute): with get_distribute_scope(distribute): # Wrap a non-variable with self.assertRaisesRegexp(ValueError, 'variable must be of type'): x = constant_op.constant([1.], dtype=dtypes.float32) get_autocast_var(x, distribute) # Wrap a non-floating point variable with self.assertRaisesRegexp(ValueError, 'variable must be a floating point'): x = get_var(1, dtypes.int32) get_autocast_var(x, distribute) if distribute: # Wrap a non-distributed variable with AutoCastDistributedVariable with self.assertRaisesRegexp(ValueError, 'variable must be of type'): x = get_var(1., dtypes.float32) get_autocast_var(x, distribute) def test_repr(self): # We do not test with DistributionStrategy because we do not want to rely on # the exact __repr__ output of a DistributedVariable. x = get_var(1., dtypes.float32, name='x') x = get_autocast_var(x, distribute=False) if context.executing_eagerly(): self.assertStartsWith( repr(x), "<AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32, " "numpy=" ) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertStartsWith( repr(x), "<AutoCastVariable 'x:0' shape=() dtype=float16 " "true_dtype=float32, numpy=" ) else: self.assertEqual( repr(x), "<AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32>" ) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual( repr(x), "<AutoCastVariable 'x:0' shape=() dtype=float16 true_dtype=float32>" ) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains AutoCastVariable, a variable which automatically casts itself.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import values as distribute_values from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.training.tracking import base as trackable # TODO(reedwm) Make this subclass AutoCastVariable. class AutoCastVariable(trackable.Trackable): """Variable that will cast itself to a different dtype in applicable contexts. This class wraps a floating-point tf.Variable. It emulates the variable interface and delegates to the wrapped variable, but it additionally will cast the wrapped variable under a `Graph._enable_variable_auto_cast(dtype)` context manager. For example: ``` v = tf.Variable(1.0, dtype=tf.float32) v = AutoCastVariable(v) print(tf.identity(v).dtype) # tf.float32 with ops.get_default_graph()._enable_variable_auto_cast(tf.float16): print(tf.identity(v).dtype) # tf.float16, as v will cast itself to float16 print(v.dtype) # tf.float16, as v.dtype also changes under the ctx manager. ``` The purpose of this class is to allow Keras layers to create variables in float32, and automatically cast them to float16 or bfloat16 when the layer is called. """ def __init__(self, variable): """Creates an AutoCastVariable instance. Args: variable: A floating-point resource variable to wrap. Raises: ValueError: If `variable` is not a floating-point resource variable """ if not resource_variable_ops.is_resource_variable(variable): raise ValueError('variable must be of type tf.ResourceVariable, but got: ' '%s' % variable) if not variable.dtype.is_floating: raise ValueError('variable must be a floating point variable but has ' 'type: %s' % variable.dtype.name) self._variable = variable # Delegate to the underlying variable for checkpointing. self._gather_saveables_for_checkpoint = ( self._variable._gather_saveables_for_checkpoint) # pylint: disable=protected-access @property def name(self): return self._variable.name def _should_cast(self): """Returns True if this variable should be casted when accessed.""" g = ops.get_default_graph() # pylint:disable=protected-access return (g._auto_cast_variable_read_dtype is not None and self.true_dtype != g._auto_cast_variable_read_dtype) # pylint:enable=protected-access @property def dtype(self): """The dtype this variable will be casted to when read.""" if self._should_cast(): return ops.get_default_graph()._auto_cast_variable_read_dtype # pylint:disable=protected-access else: return self._variable.dtype @property def true_dtype(self): """The dtype of the underlying variable, before any casts are done.""" return self._variable.dtype def value(self): val = self._variable.value() if not self._should_cast(): return val # We colocate_with(None) to ignore the existing device constraints, so that # the cast is always done on the variable's device with ops.colocate_with(None, ignore_existing=True): with ops.device(val.device): return math_ops.cast(val, self.dtype) def read_value(self): val = self._variable.read_value() if not self._should_cast(): return val return math_ops.cast(val, self.dtype) def sparse_read(self, indices, name=None): """Reads the value of this variable sparsely, using `gather`.""" val = self._variable.sparse_read(indices, name=name) if not self._should_cast(): return val return math_ops.cast(val, self.dtype) def assign(self, value, use_locking=None, name=None, read_value=True): return self._variable.assign( value, use_locking=use_locking, name=name, read_value=read_value) def assign_add(self, delta, use_locking=None, name=None, read_value=True): return self._variable.assign_add( delta, use_locking=use_locking, name=name, read_value=read_value) def assign_sub(self, delta, use_locking=None, name=None, read_value=True): return self._variable.assign_sub( delta, use_locking=use_locking, name=name, read_value=read_value) # TODO(reedwm): Support assigning variables with tf.compat.v1.assign(), # var.scatter_add, etc. def __getattr__(self, name): return getattr(self._variable, name) def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): """Converts this variable to a tensor.""" if not self._should_cast(): return ops.internal_convert_to_tensor(self._variable, dtype, name, as_ref) # TODO(reedwm): Support as_ref? assert not as_ref if dtype is not None and not dtype.is_compatible_with(self.dtype): raise ValueError( 'Incompatible type conversion requested to type {!r} for variable ' 'of type {!r}'.format(dtype.name, self.dtype.name)) val = ops.internal_convert_to_tensor(self._variable, self._variable.dtype, name, as_ref=False) with ops.colocate_with(None, ignore_existing=True): with ops.device(val.device): return math_ops.cast(val, self.dtype) def _should_act_as_resource_variable(self): """Pass resource_variable_ops.is_resource_variable check.""" pass def __repr__(self): if context.executing_eagerly() and not self._in_graph_mode: repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} " 'dtype={v.dtype.name} true_dtype={v.true_dtype.name}, ' 'numpy={np_repr}>') return repr_str.format( v=self, np_repr=ops.numpy_text(self.read_value(), is_repr=True)) else: repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} " 'dtype={v.dtype.name} true_dtype={v.true_dtype.name}>') return repr_str.format(v=self) # Operator overloads: # Note we only overload operators that support floating-point types, as # non-float variables cannot be wrapped with an AutoCastVariable. # pylint: disable=multiple-statements def __add__(self, o): return self.value() + o def __radd__(self, o): return o + self.value() def __sub__(self, o): return self.value() - o def __rsub__(self, o): return o - self.value() def __mul__(self, o): return self.value() * o def __rmul__(self, o): return o * self.value() def __truediv__(self, o): return self.value() / o def __rtruediv__(self, o): return o / self.value() def __floordiv__(self, o): return self.value() // o def __rfloordiv__(self, o): return o // self.value() def __mod__(self, o): return self.value() % o def __rmod__(self, o): return o % self.value() def __lt__(self, o): return self.value() < o def __le__(self, o): return self.value() <= o def __gt__(self, o): return self.value() > o def __ge__(self, o): return self.value() >= o def __getitem__(self, o): return self.value()[o] def __pow__(self, o, modulo=None): return pow(self.value(), o, modulo) def __rpow__(self, o): return pow(o, self.value()) def __neg__(self): return -self.value() def __abs__(self): return abs(self.value()) def __div__(self, o): try: return self.value().__div__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rdiv__(self, o): try: return self.value().__rdiv__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __matmul__(self, o): try: return self.value().__matmul__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rmatmul__(self, o): try: return self.value().__rmatmul__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented # pylint: enable=multiple-statements ops.register_tensor_conversion_function( AutoCastVariable, AutoCastVariable._dense_var_to_tensor) # pylint:disable=protected-access ops.register_dense_tensor_like_type(AutoCastVariable) # We have DistributedVariable subclass to pass # isinstance(..., DistributedVariable) checks when wrapping a # DistributedVariable. # TODO(reedwm): We should not wrap DistributedVariable, but instead have # DistributedVariable wrap AutoCastVariable. Subclassing DistributedVariable is # messy, because we do not fully implement the interface of DistributedVariable. class AutoCastDistributedVariable(AutoCastVariable, distribute_values.DistributedVariable): """Version of AutoCastVariable that subclasses DistributedVariable.""" def __init__(self, variable): if not isinstance(variable, distribute_values.DistributedValues): raise ValueError('variable must be of type DistributedValues, ' 'but got: %s' % variable) super(AutoCastDistributedVariable, self).__init__(variable) def __repr__(self): return distribute_values.DistributedVariable.__repr__(self)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/autocast_variable.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the loss scaling optimizer class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import smart_cond from tensorflow.python.keras import backend from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.training.experimental import loss_scale as loss_scale_module from tensorflow.python.util.tf_export import keras_export class _UnwrapPreventer(object): """Wrapper that DistributionStrategy will not unwrap. Typically, DistributionStrategy will unwrap values when going from a cross- replica context to a replica context via `call_for_each_replica`. This class is a wrapper that DistributionStrategy will not unwrap, so it can be used to prevent it from unwrapping a value. TODO(reedwm): Find/implement a better way of preventing values from being unwrapped by DistributionStrategy """ def __init__(self, value): self.value = value @keras_export('keras.mixed_precision.experimental.LossScaleOptimizer') class LossScaleOptimizer(optimizer_v2.OptimizerV2): """An optimizer that applies loss scaling. Loss scaling is a process that multiplies the loss by a multiplier called the loss scale, and divides each gradient by the same multiplier. The pseudocode for this process is: ``` loss = ... loss *= loss_scale grads = gradients(loss, vars) grads /= loss_scale ``` Mathematically, loss scaling has no effect, but can help avoid numerical underflow in intermediate gradients when float16 tensors are used. By multiplying the loss, each intermediate gradient will have the same multiplier applied. The loss scale can either be a fixed constant, chosen by the user, or be dynamically determined. Dynamically determining the loss scale is convenient as a loss scale does not have to be explicitly chosen. However it reduces performance. This optimizer wraps another optimizer and applies loss scaling to it via a `LossScale`. Loss scaling is applied whenever gradients are computed, either through `minimize()` or `get_gradients()`. The loss scale is updated via `LossScale.update()` whenever gradients are applied, either through `minimize()` or `apply_gradients()`. For example: ```python opt = tf.keras.optimizers.SGD(0.1) opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, "dynamic") # 'minimize' applies loss scaling to the loss and updates the loss sale. opt.minimize(loss_fn) ``` If a `tf.GradientTape` is used to compute gradients instead of `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, the loss and gradients must be scaled manually. This can be done by calling `LossScaleOptimizer.get_scaled_loss` before passing the loss to `tf.GradientTape`, and `LossScaleOptimizer.get_unscaled_gradients` after computing the gradients with `tf.GradientTape`. For example: ```python opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(...) vars = ... with tf.GradientTape() as tape: loss = ... scaled_loss = opt.get_scaled_loss(loss) scaled_grads = tape.gradient(scaled_loss, vars) grads = opt.get_unscaled_gradients(scaled_grads) opt.apply_gradients(zip(grads, vars)) # Loss scale will be updated here ``` """ def __init__(self, opt, loss_scale): """Initializes this loss scale optimizer. Args: opt: The Optimizer instance to wrap. loss_scale: The loss scale to scale the loss and gradients. This can either be an int/float to use a fixed loss scale, the string "dynamic" to use dynamic loss scaling, or an instance of a LossScale. The string "dynamic" equivalent to passing `DynamicLossScale()`, and passing an int/float is equivalent to passing a FixedLossScale with the given loss scale. """ if not isinstance(opt, optimizer_v2.OptimizerV2): raise ValueError('"opt" must be an instance of OptimizerV2, but got: %s' % opt) if hasattr(opt, 'clipnorm'): raise ValueError('LossScaleOptimizer does not support wrapping ' 'optimizers with a clipnorm. Optimizer %s has clipnorm ' '%s' % (opt, opt.clipnorm)) if hasattr(opt, 'clipvalue'): raise ValueError('LossScaleOptimizer does not support wrapping ' 'optimizers with a clipvalue. Optimizer %s has ' 'clipvalue %s' % (opt, opt.clipvalue)) self._optimizer = opt self._loss_scale = loss_scale_module.get(loss_scale) for weight in loss_scale_module.get_loss_scale_weights(self._loss_scale): # We cannot call `track_variable` in the LossScale class itself, because a # file outside of Keras cannot depend on a Keras file. Calling it here # instead is OK, because a variable only needs to be tracked if used with # a Keras class, and the only way to use LossScale with a Keras class is # through the LossScaleOptimizer. backend.track_variable(weight) self._track_trackable(self._optimizer, 'base_optimizer') self._track_trackable(self._loss_scale, 'loss_scale') # Needed because the superclass's __getattribute__ checks this. self._hyper = {} @property def loss_scale(self): """The `LossScale` instance associated with this optimizer.""" return self._loss_scale def get_scaled_loss(self, loss): """Scales the loss by the loss scale. This method is only needed if you compute gradients manually, e.g. with `tf.GradientTape`. In that case, call this method to scale the loss before passing the loss to `tf.GradientTape`. If you use `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss scaling is automatically applied and this method is unneeded. If this method is called, `get_unscaled_gradients` should also be called. See the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for an example. Args: loss: The loss, which will be multiplied by the loss scale. Can either be a tensor or a callable returning a tensor. Returns: `loss` multiplied by `LossScaleOptimizer.loss_scale()`. """ loss_scale = self._loss_scale() if callable(loss): def new_loss(): loss_val = loss() return loss_val * math_ops.cast(loss_scale, loss_val.dtype) return new_loss else: return loss * math_ops.cast(loss_scale, loss.dtype) def get_unscaled_gradients(self, grads): """Unscales the gradients by the loss scale. This method is only needed if you compute gradients manually, e.g. with `tf.GradientTape`. In that case, call this method to unscale the gradients after computing them with `tf.GradientTape`. If you use `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss scaling is automatically applied and this method is unneeded. If this method is called, `get_scaled_loss` should also be called. See the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for an example. Args: grads: A list of tensors, each which will be divided by the loss scale. Can have None values, which are ignored. Returns: A new list the same size as `grads`, where every non-None value in `grads` is divided by `LossScaleOptimizer.loss_scale()`. """ loss_scale = self._loss_scale() loss_scale_reciprocal = 1. / loss_scale return [g * math_ops.cast(loss_scale_reciprocal, g.dtype) if g is not None else None for g in grads] def _compute_gradients(self, loss, var_list, grad_loss=None): loss = self.get_scaled_loss(loss) grads_and_vars = self._optimizer._compute_gradients(loss, var_list, # pylint: disable=protected-access grad_loss) grads = [g for g, _ in grads_and_vars] variables = [v for _, v in grads_and_vars] unscaled_grads = self.get_unscaled_gradients(grads) return list(zip(unscaled_grads, variables)) def get_gradients(self, loss, params): loss = self.get_scaled_loss(loss) grads = self._optimizer.get_gradients(loss, params) return self.get_unscaled_gradients(grads) def apply_gradients(self, grads_and_vars, name=None): if distribution_strategy_context.in_cross_replica_context(): raise ValueError('apply_gradients() must be called in a replica context.') grads_and_vars = tuple(grads_and_vars) return distribution_strategy_context.get_replica_context().merge_call( self._apply_gradients_cross_replica, args=(grads_and_vars, name)) def _apply_gradients_cross_replica(self, distribution, grads_and_vars, name): grads = [g for g, _ in grads_and_vars] loss_scale_update_op, should_apply_grads = self._loss_scale.update(grads) def apply_fn(): # We do not want DistributionStrategy to unwrap any MirroredVariables in # grads_and_vars, because even in a replica context, the wrapped optimizer # expects mirrored variables. So we wrap the variables with an # _UnwrapPreventer, preventing DistributionStrategy from unwrapping the # MirroredVariables. wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars]) return distribution.extended.call_for_each_replica( self._apply_gradients, args=(grads, wrapped_vars, name)) # Note: We must call this cond() in a cross-replica context. # DistributionStrategy does not support having a cond in a replica context # with a branch that calls `merge_call`, and self._optimizer.apply_gradients # calls `merge_call`. maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn, control_flow_ops.no_op) return control_flow_ops.group(maybe_apply_op, loss_scale_update_op) def _apply_gradients(self, grads, wrapped_vars, name): return self._optimizer.apply_gradients(list(zip(grads, wrapped_vars.value)), name) @property def iterations(self): return self._optimizer.iterations @iterations.setter def iterations(self, variable): self._optimizer.iterations = variable # For the most part, we only expose methods in the base OptimizerV2, not # individual subclasses like Adam. However, although "learning_rate" and "lr" # properties are not part of the base OptimizerV2 class, they are part of most # subclasses, so we expose them here for convenience. @property def learning_rate(self): return self._optimizer.learning_rate @learning_rate.setter def learning_rate(self, lr): self._optimizer.learning_rate = lr @property def lr(self): return self._optimizer.lr @lr.setter def lr(self, lr): self._optimizer.lr = lr def get_slot_names(self): """A list of names for this optimizer's slots.""" return self._optimizer.get_slot_names() # TODO(reedwm): Maybe merge this class's functionality into OptimizerV2. # TODO(reedwm): Maybe throw an error if mixed precision is used without this # optimizer being used. # TODO(reedwm): Implement get_config and from_config. This will first require # implementing deserialization support for OptimizerV2. def get_config(self): raise NotImplementedError('get_config() is not yet implemented for ' 'LossScaleOptimizers') @classmethod def from_config(cls, config, custom_objects=None): raise NotImplementedError('from_config() is not yet implemented for ' 'LossScaleOptimizers')
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Utilities related to loss functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import ops from tensorflow.python.keras import backend as K from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.losses import loss_reduction from tensorflow.python.ops.losses import util as tf_losses_utils from tensorflow.python.util.tf_export import keras_export # TODO(joshl/psv): Update references to ReductionV2 to point to its # new location. ReductionV2 = keras_export( # pylint: disable=invalid-name 'keras.losses.Reduction', v1=[])(loss_reduction.ReductionV2) def _safe_mean(losses, num_present): """Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. num_present: The number of measurable elements in `losses`. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned. """ total_loss = math_ops.reduce_sum(losses) return math_ops.div_no_nan(total_loss, num_present, name='value') def _num_elements(losses): """Computes the number of elements in `losses` tensor.""" with K.name_scope('num_elements') as scope: return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype) def reduce_weighted_loss(weighted_losses, reduction=ReductionV2.SUM_OVER_BATCH_SIZE): """Reduces the individual weighted loss measurements.""" if reduction == ReductionV2.NONE: loss = weighted_losses else: loss = math_ops.reduce_sum(weighted_losses) if reduction == ReductionV2.SUM_OVER_BATCH_SIZE: loss = _safe_mean(loss, _num_elements(weighted_losses)) return loss def compute_weighted_loss(losses, sample_weight=None, reduction=ReductionV2.SUM_OVER_BATCH_SIZE, name=None): """Computes the weighted loss. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as `losses`, or be broadcastable to `losses`. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `SUM_OVER_BATCH_SIZE`. name: Optional name for the op. Raises: ValueError: If the shape of `sample_weight` is not compatible with `losses`. Returns: Weighted loss `Tensor` of the same type as `losses`. If `reduction` is `NONE`, this has the same shape as `losses`; otherwise, it is scalar. """ ReductionV2.validate(reduction) # If this function is called directly, then we just default 'AUTO' to # 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases. if reduction == ReductionV2.AUTO: reduction = ReductionV2.SUM_OVER_BATCH_SIZE if sample_weight is None: sample_weight = 1.0 with K.name_scope(name or 'weighted_loss'): # Save the `reduction` argument for loss normalization when distributing # to multiple replicas. Used only for estimator + v1 optimizer flow. ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access losses = ops.convert_to_tensor(losses) input_dtype = losses.dtype weighted_losses = tf_losses_utils.scale_losses_by_sample_weight( losses, sample_weight) # Apply reduction function to the individual weighted losses. loss = reduce_weighted_loss(weighted_losses, reduction) # Convert the result back to the input type. loss = math_ops.cast(loss, input_dtype) return loss def scale_loss_for_distribution(loss_value): """Scales and returns the given loss value by the number of replicas.""" num_replicas = ( distribution_strategy_context.get_strategy().num_replicas_in_sync) if num_replicas > 1: loss_value *= (1. / num_replicas) return loss_value
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/losses_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras model mode constants.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys as ModeKeys # pylint: enable=unused-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/mode_keys.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Utilities related to layer/model functionality. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras import backend as K from tensorflow.python.keras.utils.conv_utils import convert_kernel from tensorflow.python.util import nest from tensorflow.python.util import object_identity from tensorflow.python.util.tf_export import keras_export @keras_export('keras.utils.get_source_inputs') def get_source_inputs(tensor, layer=None, node_index=None): """Returns the list of input tensors necessary to compute `tensor`. Output will always be a list of tensors (potentially with 1 element). Arguments: tensor: The tensor to start from. layer: Origin layer of the tensor. Will be determined via tensor._keras_history if not provided. node_index: Origin node index of the tensor. Returns: List of input tensors. """ if not hasattr(tensor, '_keras_history'): return tensor if layer is None or node_index: layer, node_index, _ = tensor._keras_history if not layer._inbound_nodes: return [tensor] else: node = layer._inbound_nodes[node_index] if not node.inbound_layers: # Reached an Input layer, stop recursion. return nest.flatten(node.input_tensors) else: source_tensors = [] for layer, node_index, _, tensor in node.iterate_inbound(): previous_sources = get_source_inputs(tensor, layer, node_index) # Avoid input redundancy. for x in previous_sources: if all(x is not t for t in source_tensors): source_tensors.append(x) return source_tensors def count_params(weights): """Count the total number of scalars composing the weights. Arguments: weights: An iterable containing the weights on which to compute params Returns: The total number of scalars composing the weights """ return int( sum( np.prod(p.shape.as_list()) for p in object_identity.ObjectIdentitySet(weights))) def print_summary(model, line_length=None, positions=None, print_fn=None): """Prints a summary of a model. Arguments: model: Keras model instance. line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. It defaults to `print` (prints to stdout). """ if print_fn is None: print_fn = print if model.__class__.__name__ == 'Sequential': sequential_like = True elif not model._is_graph_network: # We treat subclassed models as a simple sequence of layers, for logging # purposes. sequential_like = True else: sequential_like = True nodes_by_depth = model._nodes_by_depth.values() nodes = [] for v in nodes_by_depth: if (len(v) > 1) or (len(v) == 1 and len(nest.flatten(v[0].inbound_layers)) > 1): # if the model has multiple nodes # or if the nodes have multiple inbound_layers # the model is no longer sequential sequential_like = False break nodes += v if sequential_like: # search for shared layers for layer in model.layers: flag = False for node in layer._inbound_nodes: if node in nodes: if flag: sequential_like = False break else: flag = True if not sequential_like: break if sequential_like: line_length = line_length or 65 positions = positions or [.45, .85, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #'] else: line_length = line_length or 98 positions = positions or [.33, .55, .67, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to'] relevant_nodes = [] for v in model._nodes_by_depth.values(): relevant_nodes += v def print_row(fields, positions): line = '' for i in range(len(fields)): if i > 0: line = line[:-1] + ' ' line += str(fields[i]) line = line[:positions[i]] line += ' ' * (positions[i] - len(line)) print_fn(line) print_fn('Model: "{}"'.format(model.name)) print_fn('_' * line_length) print_row(to_display, positions) print_fn('=' * line_length) def print_layer_summary(layer): """Prints a summary for a single layer. Arguments: layer: target layer. """ try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' except RuntimeError: # output_shape unknown in Eager mode. output_shape = '?' name = layer.name cls_name = layer.__class__.__name__ fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()] print_row(fields, positions) def print_layer_summary_with_connections(layer): """Prints a summary for a single layer (including topological connections). Arguments: layer: target layer. """ try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' connections = [] for node in layer._inbound_nodes: if relevant_nodes and node not in relevant_nodes: # node is not part of the current network continue for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound(): connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index, tensor_index)) name = layer.name cls_name = layer.__class__.__name__ if not connections: first_connection = '' else: first_connection = connections[0] fields = [ name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection ] print_row(fields, positions) if len(connections) > 1: for i in range(1, len(connections)): fields = ['', '', '', connections[i]] print_row(fields, positions) layers = model.layers for i in range(len(layers)): if sequential_like: print_layer_summary(layers[i]) else: print_layer_summary_with_connections(layers[i]) if i == len(layers) - 1: print_fn('=' * line_length) else: print_fn('_' * line_length) model._check_trainable_weights_consistency() if hasattr(model, '_collected_trainable_weights'): trainable_count = count_params(model._collected_trainable_weights) else: trainable_count = count_params(model._unique_trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count)) print_fn('Trainable params: {:,}'.format(trainable_count)) print_fn('Non-trainable params: {:,}'.format(non_trainable_count)) print_fn('_' * line_length) def gather_trainable_weights(trainable, sub_layers, extra_variables): """Lists the trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected trainable weights/variables. """ if not trainable: return [] weights = [] for layer in sub_layers: weights += layer.trainable_weights trainable_extra_variables = [ v for v in extra_variables if v.trainable] return weights + trainable_extra_variables def gather_non_trainable_weights(trainable, sub_layers, extra_variables): """Lists the non-trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected non-trainable weights/variables. """ trainable_extra_variables = [] non_trainable_extra_variables = [] for v in extra_variables: if v.trainable: trainable_extra_variables.append(v) else: non_trainable_extra_variables.append(v) weights = [] for layer in sub_layers: weights += layer.non_trainable_weights if not trainable: trainable_weights = [] for layer in sub_layers: trainable_weights += layer.trainable_weights return (trainable_weights + trainable_extra_variables + weights + non_trainable_extra_variables) return weights + non_trainable_extra_variables @keras_export('keras.utils.convert_all_kernels_in_model') def convert_all_kernels_in_model(model): """Converts all convolution kernels in a model from Theano to TensorFlow. Also works from TensorFlow to Theano. Arguments: model: target model for the conversion. """ # Note: SeparableConvolution not included # since only supported by TF. conv_classes = { 'Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', } to_assign = [] for layer in model.layers: if layer.__class__.__name__ in conv_classes: original_kernel = K.get_value(layer.kernel) converted_kernel = convert_kernel(original_kernel) to_assign.append((layer.kernel, converted_kernel)) K.batch_set_value(to_assign) def convert_dense_weights_data_format(dense, previous_feature_map_shape, target_data_format='channels_first'): """Utility useful when changing a convnet's `data_format`. When porting the weights of a convnet from one data format to the other, if the convnet includes a `Flatten` layer (applied to the last convolutional feature map) followed by a `Dense` layer, the weights of that `Dense` layer should be updated to reflect the new dimension ordering. Arguments: dense: The target `Dense` layer. previous_feature_map_shape: A shape tuple of 3 integers, e.g. `(512, 7, 7)`. The shape of the convolutional feature map right before the `Flatten` layer that came before the target `Dense` layer. target_data_format: One of "channels_last", "channels_first". Set it "channels_last" if converting a "channels_first" model to "channels_last", or reciprocally. """ assert target_data_format in {'channels_last', 'channels_first'} kernel, bias = dense.get_weights() for i in range(kernel.shape[1]): if target_data_format == 'channels_first': c, h, w = previous_feature_map_shape original_fm_shape = (h, w, c) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (2, 0, 1)) # last -> first else: h, w, c = previous_feature_map_shape original_fm_shape = (c, h, w) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (1, 2, 0)) # first -> last kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),)) dense.set_weights([kernel, bias]) def is_builtin_layer(layer): if not getattr(layer, '_keras_api_names', None): return False # Subclasses of `Layer` that are not exported inherit the export name # of the base layer class. return (layer._keras_api_names != ('keras.layers.Layer',) and layer._keras_api_names_v1 != ('keras.layers.Layer',))
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/layer_utils.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for io_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import numpy as np import six from tensorflow.python import keras from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.utils import io_utils from tensorflow.python.platform import test try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None def create_dataset(h5_path='test.h5'): x = np.random.randn(200, 10).astype('float32') y = np.random.randint(0, 2, size=(200, 1)) f = h5py.File(h5_path, 'w') # Creating dataset to store features x_dset = f.create_dataset('my_data', (200, 10), dtype='f') x_dset[:] = x # Creating dataset to store labels y_dset = f.create_dataset('my_labels', (200, 1), dtype='i') y_dset[:] = y f.close() class TestIOUtils(keras_parameterized.TestCase): # TODO(b/137965102): eventually support this in eager + the v2 loops @keras_parameterized.run_all_keras_modes(always_skip_eager=True) def test_HDF5Matrix(self): if h5py is None: return temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir) h5_path = os.path.join(temp_dir, 'test.h5') create_dataset(h5_path) # Instantiating HDF5Matrix for the training set, # which is a slice of the first 150 elements x_train = io_utils.HDF5Matrix(h5_path, 'my_data', start=0, end=150) y_train = io_utils.HDF5Matrix(h5_path, 'my_labels', start=0, end=150) # Likewise for the test set x_test = io_utils.HDF5Matrix(h5_path, 'my_data', start=150, end=200) y_test = io_utils.HDF5Matrix(h5_path, 'my_labels', start=150, end=200) # HDF5Matrix behave more or less like Numpy matrices # with regard to indexing self.assertEqual(y_train.shape, (150, 1)) # But they do not support negative indices, so don't try print(x_train[-1]) self.assertEqual(y_train.dtype, np.dtype('i')) self.assertEqual(y_train.ndim, 2) self.assertEqual(y_train.size, 150) model = keras.models.Sequential() model.add(keras.layers.Dense(64, input_shape=(10,), activation='relu')) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile( loss='binary_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # Note: you have to use shuffle='batch' or False with HDF5Matrix model.fit(x_train, y_train, batch_size=32, shuffle='batch', verbose=False) # test that evalutation and prediction # don't crash and return reasonable results out_pred = model.predict(x_test, batch_size=32, verbose=False) out_eval = model.evaluate(x_test, y_test, batch_size=32, verbose=False) self.assertEqual(out_pred.shape, (50, 1)) self.assertEqual(out_eval.shape, ()) self.assertGreater(out_eval, 0) # test slicing for shortened array self.assertEqual(len(x_train[0:]), len(x_train)) # test __getitem__ invalid use cases with self.assertRaises(IndexError): _ = x_train[1000] with self.assertRaises(IndexError): _ = x_train[1000: 1001] with self.assertRaises(IndexError): _ = x_train[[1000, 1001]] with self.assertRaises(IndexError): _ = x_train[six.moves.range(1000, 1001)] with self.assertRaises(IndexError): _ = x_train[np.array([1000])] with self.assertRaises(TypeError): _ = x_train[None] # test normalizer normalizer = lambda x: x + 1 normalized_x_train = io_utils.HDF5Matrix( h5_path, 'my_data', start=0, end=150, normalizer=normalizer) self.assertAllClose(normalized_x_train[0][0], x_train[0][0] + 1) def test_ask_to_proceed_with_overwrite(self): with test.mock.patch.object(six.moves, 'input') as mock_log: mock_log.return_value = 'y' self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists')) mock_log.return_value = 'n' self.assertFalse( io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists')) mock_log.side_effect = ['m', 'y'] self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists')) mock_log.side_effect = ['m', 'n'] self.assertFalse( io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists')) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/io_utils_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python utilities required by Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import binascii import codecs import marshal import os import re import sys import time import types as python_types import numpy as np import six from tensorflow.python.util import nest from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export _GLOBAL_CUSTOM_OBJECTS = {} @keras_export('keras.utils.CustomObjectScope') class CustomObjectScope(object): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. Example: Consider a custom object `MyObject` (e.g. a class): ```python with CustomObjectScope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` """ def __init__(self, *args): self.custom_objects = args self.backup = None def __enter__(self): self.backup = _GLOBAL_CUSTOM_OBJECTS.copy() for objects in self.custom_objects: _GLOBAL_CUSTOM_OBJECTS.update(objects) return self def __exit__(self, *args, **kwargs): _GLOBAL_CUSTOM_OBJECTS.clear() _GLOBAL_CUSTOM_OBJECTS.update(self.backup) @keras_export('keras.utils.custom_object_scope') def custom_object_scope(*args): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Convenience wrapper for `CustomObjectScope`. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. Example: Consider a custom object `MyObject` ```python with custom_object_scope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` Arguments: *args: Variable length list of dictionaries of name, class pairs to add to custom objects. Returns: Object of type `CustomObjectScope`. """ return CustomObjectScope(*args) @keras_export('keras.utils.get_custom_objects') def get_custom_objects(): """Retrieves a live reference to the global dictionary of custom objects. Updating and clearing custom objects using `custom_object_scope` is preferred, but `get_custom_objects` can be used to directly access `_GLOBAL_CUSTOM_OBJECTS`. Example: ```python get_custom_objects().clear() get_custom_objects()['MyObject'] = MyObject ``` Returns: Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`). """ return _GLOBAL_CUSTOM_OBJECTS def serialize_keras_class_and_config(cls_name, cls_config): """Returns the serialization of the class with the given config.""" return {'class_name': cls_name, 'config': cls_config} @keras_export('keras.utils.serialize_keras_object') def serialize_keras_object(instance): _, instance = tf_decorator.unwrap(instance) if instance is None: return None if hasattr(instance, 'get_config'): return serialize_keras_class_and_config(instance.__class__.__name__, instance.get_config()) if hasattr(instance, '__name__'): return instance.__name__ raise ValueError('Cannot serialize', instance) def class_and_config_for_serialized_keras_object( config, module_objects=None, custom_objects=None, printable_module_name='object'): """Returns the class name and config for a serialized keras object.""" if (not isinstance(config, dict) or 'class_name' not in config or 'config' not in config): raise ValueError('Improper config format: ' + str(config)) class_name = config['class_name'] if custom_objects and class_name in custom_objects: cls = custom_objects[class_name] elif class_name in _GLOBAL_CUSTOM_OBJECTS: cls = _GLOBAL_CUSTOM_OBJECTS[class_name] else: module_objects = module_objects or {} cls = module_objects.get(class_name) if cls is None: raise ValueError('Unknown ' + printable_module_name + ': ' + class_name) return (cls, config['config']) @keras_export('keras.utils.deserialize_keras_object') def deserialize_keras_object(identifier, module_objects=None, custom_objects=None, printable_module_name='object'): if identifier is None: return None if isinstance(identifier, dict): # In this case we are dealing with a Keras config dictionary. config = identifier (cls, cls_config) = class_and_config_for_serialized_keras_object( config, module_objects, custom_objects, printable_module_name) if hasattr(cls, 'from_config'): arg_spec = tf_inspect.getfullargspec(cls.from_config) custom_objects = custom_objects or {} if 'custom_objects' in arg_spec.args: return cls.from_config( cls_config, custom_objects=dict( list(_GLOBAL_CUSTOM_OBJECTS.items()) + list(custom_objects.items()))) with CustomObjectScope(custom_objects): return cls.from_config(cls_config) else: # Then `cls` may be a function returning a class. # in this case by convention `config` holds # the kwargs of the function. custom_objects = custom_objects or {} with CustomObjectScope(custom_objects): return cls(**cls_config) elif isinstance(identifier, six.string_types): object_name = identifier if custom_objects and object_name in custom_objects: obj = custom_objects.get(object_name) elif object_name in _GLOBAL_CUSTOM_OBJECTS: obj = _GLOBAL_CUSTOM_OBJECTS[object_name] else: obj = module_objects.get(object_name) if obj is None: raise ValueError('Unknown ' + printable_module_name + ':' + object_name) # Classes passed by name are instantiated with no args, functions are # returned as-is. if tf_inspect.isclass(obj): return obj() return obj else: raise ValueError('Could not interpret serialized ' + printable_module_name + ': ' + identifier) def func_dump(func): """Serializes a user defined function. Arguments: func: the function to serialize. Returns: A tuple `(code, defaults, closure)`. """ if os.name == 'nt': raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/') code = codecs.encode(raw_code, 'base64').decode('ascii') else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, 'base64').decode('ascii') defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure def func_load(code, defaults=None, closure=None, globs=None): """Deserializes a user defined function. Arguments: code: bytecode of the function. defaults: defaults of the function. closure: closure of the function. globs: dictionary of global objects. Returns: A function object. """ if isinstance(code, (tuple, list)): # unpack previous dump code, defaults, closure = code if isinstance(defaults, list): defaults = tuple(defaults) def ensure_value_to_cell(value): """Ensures that a value is converted to a python cell object. Arguments: value: Any value that needs to be casted to the cell type Returns: A value wrapped as a cell object (see function "func_load") """ def dummy_fn(): # pylint: disable=pointless-statement value # just access it so it gets captured in .__closure__ cell_value = dummy_fn.__closure__[0] if not isinstance(value, type(cell_value)): return cell_value return value if closure is not None: closure = tuple(ensure_value_to_cell(_) for _ in closure) try: raw_code = codecs.decode(code.encode('ascii'), 'base64') except (UnicodeEncodeError, binascii.Error): raw_code = code.encode('raw_unicode_escape') code = marshal.loads(raw_code) if globs is None: globs = globals() return python_types.FunctionType( code, globs, name=code.co_name, argdefs=defaults, closure=closure) def has_arg(fn, name, accept_all=False): """Checks if a callable accepts a given keyword argument. Arguments: fn: Callable to inspect. name: Check if `fn` can be called with `name` as a keyword argument. accept_all: What to return if there is no parameter called `name` but the function accepts a `**kwargs` argument. Returns: bool, whether `fn` accepts a `name` keyword argument. """ arg_spec = tf_inspect.getfullargspec(fn) if accept_all and arg_spec.varkw is not None: return True return name in arg_spec.args @keras_export('keras.utils.Progbar') class Progbar(object): """Displays a progress bar. Arguments: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) stateful_metrics: Iterable of string names of metrics that should *not* be averaged over time. Metrics in this list will be displayed as-is. All others will be averaged by the progbar before display. interval: Minimum visual progress update interval (in seconds). unit_name: Display name for step counts (usually "step" or "sample"). """ def __init__(self, target, width=30, verbose=1, interval=0.05, stateful_metrics=None, unit_name='step'): self.target = target self.width = width self.verbose = verbose self.interval = interval self.unit_name = unit_name if stateful_metrics: self.stateful_metrics = set(stateful_metrics) else: self.stateful_metrics = set() self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) or 'ipykernel' in sys.modules or 'posix' in sys.modules or 'PYCHARM_HOSTED' in os.environ) self._total_width = 0 self._seen_so_far = 0 # We use a dict + list to avoid garbage collection # issues found in OrderedDict self._values = {} self._values_order = [] self._start = time.time() self._last_update = 0 def update(self, current, values=None): """Updates the progress bar. Arguments: current: Index of current step. values: List of tuples: `(name, value_for_last_step)`. If `name` is in `stateful_metrics`, `value_for_last_step` will be displayed as-is. Else, an average of the metric over time will be displayed. """ values = values or [] for k, v in values: if k not in self._values_order: self._values_order.append(k) if k not in self.stateful_metrics: if k not in self._values: self._values[k] = [v * (current - self._seen_so_far), current - self._seen_so_far] else: self._values[k][0] += v * (current - self._seen_so_far) self._values[k][1] += (current - self._seen_so_far) else: # Stateful metrics output a numeric value. This representation # means "take an average from a single value" but keeps the # numeric formatting. self._values[k] = [v, 1] self._seen_so_far = current now = time.time() info = ' - %.0fs' % (now - self._start) if self.verbose == 1: if (now - self._last_update < self.interval and self.target is not None and current < self.target): return prev_total_width = self._total_width if self._dynamic_display: sys.stdout.write('\b' * prev_total_width) sys.stdout.write('\r') else: sys.stdout.write('\n') if self.target is not None: numdigits = int(np.log10(self.target)) + 1 bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target) prog = float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: bar += ('=' * (prog_width - 1)) if current < self.target: bar += '>' else: bar += '=' bar += ('.' * (self.width - prog_width)) bar += ']' else: bar = '%7d/Unknown' % current self._total_width = len(bar) sys.stdout.write(bar) if current: time_per_unit = (now - self._start) / current else: time_per_unit = 0 if self.target is not None and current < self.target: eta = time_per_unit * (self.target - current) if eta > 3600: eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60) elif eta > 60: eta_format = '%d:%02d' % (eta // 60, eta % 60) else: eta_format = '%ds' % eta info = ' - ETA: %s' % eta_format else: if time_per_unit >= 1 or time_per_unit == 0: info += ' %.0fs/%s' % (time_per_unit, self.unit_name) elif time_per_unit >= 1e-3: info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name) else: info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name) for k in self._values_order: info += ' - %s:' % k if isinstance(self._values[k], list): avg = np.mean(self._values[k][0] / max(1, self._values[k][1])) if abs(avg) > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg else: info += ' %s' % self._values[k] self._total_width += len(info) if prev_total_width > self._total_width: info += (' ' * (prev_total_width - self._total_width)) if self.target is not None and current >= self.target: info += '\n' sys.stdout.write(info) sys.stdout.flush() elif self.verbose == 2: if self.target is not None and current >= self.target: numdigits = int(np.log10(self.target)) + 1 count = ('%' + str(numdigits) + 'd/%d') % (current, self.target) info = count + info for k in self._values_order: info += ' - %s:' % k avg = np.mean(self._values[k][0] / max(1, self._values[k][1])) if avg > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg info += '\n' sys.stdout.write(info) sys.stdout.flush() self._last_update = now def add(self, n, values=None): self.update(self._seen_so_far + n, values) def make_batches(size, batch_size): """Returns a list of batch indices (tuples of indices). Arguments: size: Integer, total size of the data to slice into batches. batch_size: Integer, batch size. Returns: A list of tuples of array indices. """ num_batches = int(np.ceil(size / float(batch_size))) return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, num_batches)] def slice_arrays(arrays, start=None, stop=None): """Slice an array or list of arrays. This takes an array-like, or a list of array-likes, and outputs: - arrays[start:stop] if `arrays` is an array-like - [x[start:stop] for x in arrays] if `arrays` is a list Can also work on list/array of indices: `slice_arrays(x, indices)` Arguments: arrays: Single array or list of arrays. start: can be an integer index (start index) or a list/array of indices stop: integer (stop index); should be None if `start` was a list. Returns: A slice of the array(s). Raises: ValueError: If the value of start is a list and stop is not None. """ if arrays is None: return [None] if isinstance(start, list) and stop is not None: raise ValueError('The stop argument has to be None if the value of start ' 'is a list.') elif isinstance(arrays, list): if hasattr(start, '__len__'): # hdf5 datasets only support list objects as indices if hasattr(start, 'shape'): start = start.tolist() return [None if x is None else x[start] for x in arrays] return [ None if x is None else None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays ] else: if hasattr(start, '__len__'): if hasattr(start, 'shape'): start = start.tolist() return arrays[start] if hasattr(start, '__getitem__'): return arrays[start:stop] return [None] def to_list(x): """Normalizes a list/tensor into a list. If a tensor is passed, we return a list of size 1 containing the tensor. Arguments: x: target object to be normalized. Returns: A list. """ if isinstance(x, list): return x return [x] def object_list_uid(object_list): """Creates a single string from object ids.""" object_list = nest.flatten(object_list) return ', '.join([str(abs(id(x))) for x in object_list]) def to_snake_case(name): intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name) insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower() # If the class is private the name starts with "_" which is not secure # for creating scopes. We prefix the name with "private" in this case. if insecure[0] != '_': return insecure return 'private' + insecure def is_all_none(structure): iterable = nest.flatten(structure) # We cannot use Python's `any` because the iterable may return Tensors. for element in iterable: if element is not None: return False return True def check_for_unexpected_keys(name, input_dict, expected_values): unknown = set(input_dict.keys()).difference(expected_values) if unknown: raise ValueError('Unknown entries in {} dictionary: {}. Only expected ' 'following keys: {}'.format(name, list(unknown), expected_values)) def validate_kwargs(kwargs, allowed_kwargs, error_message='Keyword argument not understood:'): """Checks that all keyword arguments are in the set of allowed keys.""" for kwarg in kwargs: if kwarg not in allowed_kwargs: raise TypeError(error_message, kwarg)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/generic_utils.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras generic Python utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import keras from tensorflow.python.platform import test class HasArgTest(test.TestCase): def test_has_arg(self): def f_x(x): return x def f_x_args(x, *args): _ = args return x def f_x_kwargs(x, **kwargs): _ = kwargs return x self.assertTrue(keras.utils.generic_utils.has_arg( f_x, 'x', accept_all=False)) self.assertFalse(keras.utils.generic_utils.has_arg( f_x, 'y', accept_all=False)) self.assertTrue(keras.utils.generic_utils.has_arg( f_x_args, 'x', accept_all=False)) self.assertFalse(keras.utils.generic_utils.has_arg( f_x_args, 'y', accept_all=False)) self.assertTrue(keras.utils.generic_utils.has_arg( f_x_kwargs, 'x', accept_all=False)) self.assertFalse(keras.utils.generic_utils.has_arg( f_x_kwargs, 'y', accept_all=False)) self.assertTrue(keras.utils.generic_utils.has_arg( f_x_kwargs, 'y', accept_all=True)) class TestCustomObjectScope(test.TestCase): def test_custom_object_scope(self): def custom_fn(): pass class CustomClass(object): pass with keras.utils.generic_utils.custom_object_scope( {'CustomClass': CustomClass, 'custom_fn': custom_fn}): act = keras.activations.get('custom_fn') self.assertEqual(act, custom_fn) cl = keras.regularizers.get('CustomClass') self.assertEqual(cl.__class__, CustomClass) class SerializeKerasObjectTest(test.TestCase): def test_serialize_none(self): serialized = keras.utils.generic_utils.serialize_keras_object(None) self.assertEqual(serialized, None) deserialized = keras.utils.generic_utils.deserialize_keras_object( serialized) self.assertEqual(deserialized, None) class SliceArraysTest(test.TestCase): def test_slice_arrays(self): input_a = list([1, 2, 3]) self.assertEqual( keras.utils.generic_utils.slice_arrays(input_a, start=0), [None, None, None]) self.assertEqual( keras.utils.generic_utils.slice_arrays(input_a, stop=3), [None, None, None]) self.assertEqual( keras.utils.generic_utils.slice_arrays(input_a, start=0, stop=1), [None, None, None]) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/generic_utils_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for np_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.platform import test class TestNPUtils(test.TestCase): def test_to_categorical(self): num_classes = 5 shapes = [(1,), (3,), (4, 3), (5, 4, 3), (3, 1), (3, 2, 1)] expected_shapes = [(1, num_classes), (3, num_classes), (4, 3, num_classes), (5, 4, 3, num_classes), (3, num_classes), (3, 2, num_classes)] labels = [np.random.randint(0, num_classes, shape) for shape in shapes] one_hots = [ keras.utils.to_categorical(label, num_classes) for label in labels] for label, one_hot, expected_shape in zip(labels, one_hots, expected_shapes): # Check shape self.assertEqual(one_hot.shape, expected_shape) # Make sure there is only one 1 in a row self.assertTrue(np.all(one_hot.sum(axis=-1) == 1)) # Get original labels back from one hots self.assertTrue(np.all( np.argmax(one_hot, -1).reshape(label.shape) == label)) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/np_utils_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for data_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from itertools import cycle import os import tarfile import threading import unittest import zipfile import numpy as np from six.moves.urllib.parse import urljoin from six.moves.urllib.request import pathname2url from tensorflow.python import keras from tensorflow.python.platform import test class TestGetFileAndValidateIt(test.TestCase): def test_get_file_and_validate_it(self): """Tests get_file from a url, plus extraction and validation. """ dest_dir = self.get_temp_dir() orig_dir = self.get_temp_dir() text_file_path = os.path.join(orig_dir, 'test.txt') zip_file_path = os.path.join(orig_dir, 'test.zip') tar_file_path = os.path.join(orig_dir, 'test.tar.gz') with open(text_file_path, 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open(tar_file_path, 'w:gz') as tar_file: tar_file.add(text_file_path) with zipfile.ZipFile(zip_file_path, 'w') as zip_file: zip_file.write(text_file_path) origin = urljoin('file://', pathname2url(os.path.abspath(tar_file_path))) path = keras.utils.data_utils.get_file('test.txt', origin, untar=True, cache_subdir=dest_dir) filepath = path + '.tar.gz' hashval_sha256 = keras.utils.data_utils._hash_file(filepath) hashval_md5 = keras.utils.data_utils._hash_file(filepath, algorithm='md5') path = keras.utils.data_utils.get_file( 'test.txt', origin, md5_hash=hashval_md5, untar=True, cache_subdir=dest_dir) path = keras.utils.data_utils.get_file( filepath, origin, file_hash=hashval_sha256, extract=True, cache_subdir=dest_dir) self.assertTrue(os.path.exists(filepath)) self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_sha256)) self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_md5)) os.remove(filepath) origin = urljoin('file://', pathname2url(os.path.abspath(zip_file_path))) hashval_sha256 = keras.utils.data_utils._hash_file(zip_file_path) hashval_md5 = keras.utils.data_utils._hash_file(zip_file_path, algorithm='md5') path = keras.utils.data_utils.get_file( 'test', origin, md5_hash=hashval_md5, extract=True, cache_subdir=dest_dir) path = keras.utils.data_utils.get_file( 'test', origin, file_hash=hashval_sha256, extract=True, cache_subdir=dest_dir) self.assertTrue(os.path.exists(path)) self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_sha256)) self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_md5)) class ThreadsafeIter(object): def __init__(self, it): self.it = it self.lock = threading.Lock() # After a generator throws an exception all subsequent next() calls raise a # StopIteration Exception. This, however, presents an issue when mixing # generators and threading because it means the order of retrieval need not # match the order in which the generator was called. This can make it appear # that a generator exited normally when in fact the terminating exception is # just in a different thread. In order to provide thread safety, once # self.it has thrown an exception we continue to throw the same exception. self._exception = None def __iter__(self): return self def __next__(self): return self.next() def next(self): with self.lock: if self._exception: raise self._exception # pylint: disable=raising-bad-type try: return next(self.it) except Exception as e: self._exception = e raise def threadsafe_generator(f): def g(*a, **kw): return ThreadsafeIter(f(*a, **kw)) return g class TestSequence(keras.utils.data_utils.Sequence): def __init__(self, shape, value=1.): self.shape = shape self.inner = value def __getitem__(self, item): return np.ones(self.shape, dtype=np.uint32) * item * self.inner def __len__(self): return 100 def on_epoch_end(self): self.inner *= 5.0 class FaultSequence(keras.utils.data_utils.Sequence): def __getitem__(self, item): raise IndexError(item, 'item is not present') def __len__(self): return 100 @threadsafe_generator def create_generator_from_sequence_threads(ds): for i in cycle(range(len(ds))): yield ds[i] def create_generator_from_sequence_pcs(ds): for i in cycle(range(len(ds))): yield ds[i] class TestEnqueuers(test.TestCase): def test_generator_enqueuer_threads(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_threads(TestSequence([3, 200, 200, 3])), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(int(next(gen_output)[0, 0, 0, 0])) self.assertEqual(len(set(acc) - set(range(100))), 0) enqueuer.stop() @unittest.skipIf( os.name == 'nt', 'use_multiprocessing=True does not work on windows properly.') def test_generator_enqueuer_processes(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_pcs(TestSequence([3, 200, 200, 3])), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(int(next(gen_output)[0, 0, 0, 0])) self.assertNotEqual(acc, list(range(100))) enqueuer.stop() def test_generator_enqueuer_fail_threads(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_threads(FaultSequence()), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(IndexError): next(gen_output) @unittest.skipIf( os.name == 'nt', 'use_multiprocessing=True does not work on windows properly.') def test_generator_enqueuer_fail_processes(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_pcs(FaultSequence()), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(IndexError): next(gen_output) def test_ordered_enqueuer_threads(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) self.assertEqual(acc, list(range(100))) enqueuer.stop() def test_ordered_enqueuer_processes(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) self.assertEqual(acc, list(range(100))) enqueuer.stop() def test_ordered_enqueuer_fail_threads(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( FaultSequence(), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(IndexError): next(gen_output) def test_ordered_enqueuer_fail_processes(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( FaultSequence(), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(IndexError): next(gen_output) def test_on_epoch_end_processes(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(200): acc.append(next(gen_output)[0, 0, 0, 0]) # Check that order was keep in GeneratorEnqueuer with processes self.assertEqual(acc[100:], list([k * 5 for k in range(100)])) enqueuer.stop() def test_context_switch(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=True) enqueuer2 = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3], value=15), use_multiprocessing=True) enqueuer.start(3, 10) enqueuer2.start(3, 10) gen_output = enqueuer.get() gen_output2 = enqueuer2.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) self.assertEqual(acc[-1], 99) # One epoch is completed so enqueuer will switch the Sequence acc = [] for _ in range(100): acc.append(next(gen_output2)[0, 0, 0, 0]) self.assertEqual(acc[-1], 99 * 15) # One epoch has been completed so enqueuer2 will switch # Be sure that both Sequence were updated self.assertEqual(next(gen_output)[0, 0, 0, 0], 0) self.assertEqual(next(gen_output)[0, 0, 0, 0], 5) self.assertEqual(next(gen_output2)[0, 0, 0, 0], 0) self.assertEqual(next(gen_output2)[0, 0, 0, 0], 15 * 5) # Tear down everything enqueuer.stop() enqueuer2.stop() def test_on_epoch_end_threads(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) # Check that order was keep in GeneratorEnqueuer with processes self.assertEqual(acc, list([k * 5 for k in range(100)])) enqueuer.stop() if __name__ == '__main__': # Bazel sets these environment variables to very long paths. # Tempfile uses them to create long paths, and in turn multiprocessing # library tries to create sockets named after paths. Delete whatever bazel # writes to these to avoid tests failing due to socket addresses being too # long. for var in ('TMPDIR', 'TMP', 'TEMP'): if var in os.environ: del os.environ[var] test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/data_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras composite tensor support.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np import scipy.sparse from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.layers import core from tensorflow.python.keras.layers import Layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import test # Define test-only Layer classes to validate passing Sparse and Ragged tensors # between layers. class ToDense(Layer): """Create a dense (standard) tensor from the given input tensor.""" def __init__(self, default_value, **kwargs): super(ToDense, self).__init__(**kwargs) self._default_value = default_value def call(self, inputs): if isinstance(inputs, ragged_tensor.RaggedTensor): output = inputs.to_tensor(default_value=self._default_value) elif isinstance(inputs, sparse_tensor.SparseTensor): output = sparse_ops.sparse_tensor_to_dense( inputs, default_value=self._default_value) elif isinstance(inputs, ops.Tensor): output = inputs else: raise TypeError("Unexpected tensor type %s" % type(inputs).__name__) # Return a float so that we can compile models with this as the final layer. return math_ops.cast(output, dtypes.float32) class ToRagged(Layer): """Create a ragged tensor based on a given dense tensor.""" def __init__(self, padding, ragged_rank=1, **kwargs): super(ToRagged, self).__init__(**kwargs) self._padding = padding self._ragged_rank = ragged_rank def call(self, inputs): return ragged_tensor.RaggedTensor.from_tensor( inputs, padding=self._padding, ragged_rank=self._ragged_rank) class ToSparse(Layer): """Create a sparse tensor based on a given dense tensor.""" def call(self, inputs): indices = array_ops.where_v2(math_ops.not_equal(inputs, 0)) values = array_ops.gather_nd(inputs, indices) shape = array_ops.shape(inputs, out_type=dtypes.int64) return sparse_tensor.SparseTensor(indices, values, dense_shape=shape) class _SubclassModel(keras.Model): """A Keras subclass model.""" def __init__(self, layers, i_layer=None): super(_SubclassModel, self).__init__() # Note that clone and build doesn't support lists of layers in subclassed # models. Adding each layer directly here. for i, layer in enumerate(layers): setattr(self, self._layer_name_for_i(i), layer) self.num_layers = len(layers) if i_layer is not None: self._set_inputs(i_layer) def _layer_name_for_i(self, i): return "layer{}".format(i) def call(self, inputs, **kwargs): x = inputs for i in range(self.num_layers): layer = getattr(self, self._layer_name_for_i(i)) x = layer(x) return x def get_model_from_layers_with_input(layers, input_shape=None, input_dtype=None, model_input=None): """Builds a model from a sequence of layers.""" if model_input is not None and input_shape is not None: raise ValueError("Cannot specify a model_input and an input shape.") model_type = testing_utils.get_model_type() if model_type == "subclass": return _SubclassModel(layers, model_input) if model_type == "sequential": model = keras.models.Sequential() if model_input is not None: model.add(model_input) elif input_shape is not None: model.add(keras.Input(shape=input_shape, dtype=input_dtype)) for layer in layers: model.add(layer) return model if model_type == "functional": if model_input is not None: inputs = model_input else: if not input_shape: raise ValueError("Cannot create a functional model from layers with no " "input shape.") inputs = keras.Input(shape=input_shape, dtype=input_dtype) outputs = inputs for layer in layers: outputs = layer(outputs) return keras.Model(inputs, outputs) raise ValueError("Unknown model type {}".format(model_type)) def get_test_mode_kwargs(): run_eagerly = testing_utils.should_run_eagerly() # Certain things weren't supported correctly in the old path, therefore # with these changes, some tests now only pass in the single code path in V2. if run_eagerly or context.executing_eagerly(): experimental_run_tf_function = True else: experimental_run_tf_function = testing_utils.should_run_tf_function() return { "run_eagerly": run_eagerly, "experimental_run_tf_function": experimental_run_tf_function } @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class CompositeTensorInternalTest(keras_parameterized.TestCase): def test_internal_ragged_tensors(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0), ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) expected_output = np.array([[1, -1], [2, 3]]) output = model.predict(input_data) self.assertAllEqual(expected_output, output) def test_internal_sparse_tensors(self): # Create a model that accepts an input, converts it to Sparse, and # converts the sparse tensor back to a dense tensor. layers = [ToSparse(), ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data) self.assertAllEqual(expected_output, output) def test_training_internal_ragged_tensors(self): # Create a model that implements y=Mx. This is easy to learn and will # demonstrate appropriate gradient passing. (We have to use RaggedTensors # for this test, as ToSparse() doesn't support gradient propagation through # the layer.) TODO(b/124796939): Investigate this. layers = [core.Dense(2), ToRagged(padding=0), ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) input_data = np.random.rand(1024, 1) expected_data = np.concatenate((input_data * 3, input_data * .5), axis=-1) model.compile(loss="mse", optimizer="adam", **get_test_mode_kwargs()) history = model.fit(input_data, expected_data, epochs=10, verbose=0) # If the model trained, the loss stored at history[0] should be different # than the one stored at history[-1]. self.assertNotEqual(history.history["loss"][-1], history.history["loss"][0]) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class CompositeTensorOutputTest(keras_parameterized.TestCase): def test_ragged_tensor_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0)] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) model._experimental_run_tf_function = testing_utils.should_run_tf_function() model._run_eagerly = testing_utils.should_run_eagerly() # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) output = model.predict(input_data) expected_values = [[1], [2, 3]] self.assertAllEqual(expected_values, output) def test_ragged_tensor_rebatched_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0)] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) model._experimental_run_tf_function = testing_utils.should_run_tf_function() model._run_eagerly = testing_utils.should_run_eagerly() # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]]) output = model.predict(input_data, batch_size=2) expected_values = [[1], [2, 3], [4], [5, 6]] self.assertAllEqual(expected_values, output) def test_sparse_tensor_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToSparse()] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) model._experimental_run_tf_function = testing_utils.should_run_tf_function() model._run_eagerly = testing_utils.should_run_eagerly() # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) output = model.predict(input_data) expected_indices = np.array([[0, 0], [1, 0], [1, 1]]) expected_values = np.array([1, 2, 3]) expected_dense_shape = np.array([2, 3]) self.assertAllEqual(output.indices, expected_indices) self.assertAllEqual(output.values, expected_values) self.assertAllEqual(output.dense_shape, expected_dense_shape) def test_sparse_tensor_rebatched_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToSparse()] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) model._experimental_run_tf_function = testing_utils.should_run_tf_function() model._run_eagerly = testing_utils.should_run_eagerly() # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]]) output = model.predict(input_data, batch_size=2) expected_indices = np.array([[0, 0], [1, 0], [1, 1], [2, 0], [3, 0], [3, 1]]) expected_values = np.array([1, 2, 3, 4, 5, 6]) expected_dense_shape = np.array([4, 3]) self.assertAllEqual(output.indices, expected_indices) self.assertAllEqual(output.values, expected_values) self.assertAllEqual(output.dense_shape, expected_dense_shape) def get_input_name(use_dict): # Define the input name. if not use_dict: return None # This is the same as not setting 'name'. elif testing_utils.get_model_type() == "subclass": return "input_1" # Subclass models don"t support input names. else: return "test_input_name" def get_kwargs(use_dataset, action="predict"): if use_dataset or not context.executing_eagerly(): if action == "fit": return {"steps_per_epoch": 1} return {"steps": 1} else: return {"batch_size": 2} def prepare_inputs(data, use_dict, use_dataset, action, input_name): input_data, expected_output = data batch_size = input_data.shape[0] # Prepare the input data. if use_dict: input_data = {input_name: input_data} if use_dataset: if action == "predict": input_data = dataset_ops.DatasetV2.from_tensor_slices(input_data).batch( batch_size) else: input_data = dataset_ops.DatasetV2.from_tensor_slices( (input_data, expected_output)).batch(batch_size) expected_output = None return (input_data, expected_output) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( use_dict=[True, False], use_dataset=[True, False], action=["predict", "evaluate", "fit"])) class SparseTensorInputTest(keras_parameterized.TestCase): def test_sparse_tensors(self, use_dict, use_dataset, action): data = [(sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]), np.array([[[1, -1, -1]], [[2, 3, -1]]])), (sparse_tensor.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4]), np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]))] # Prepare the model to test. input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=(1, None), sparse=True, name=input_name, dtype=dtypes.int32) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs()) kwargs = get_kwargs(use_dataset, action) # Prepare the input data for data_element in data: input_data, expected_output = prepare_inputs(data_element, use_dict, use_dataset, action, input_name) # Perform the action. if action == "predict": result = model.predict(input_data, **kwargs) self.assertAllEqual(expected_output, result) if action == "evaluate": result = model.evaluate(input_data, expected_output, **kwargs) self.assertAllEqual(1.0, result[-1]) if action == "fit": # TODO(momernick): What's the best way of validating that fit happened? _ = model.fit(input_data, expected_output, shuffle=False, **kwargs) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class ScipySparseTensorInputTest(keras_parameterized.TestCase, test_util.TensorFlowTestCase): def test_sparse_scipy_predict_inputs_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use # a one-dimensional shape; note also that scipy's default dtype is int64. model_input = input_layer.Input(shape=(3,), sparse=True, dtype=dtypes.int64) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data, steps=1) self.assertAllEqual(expected_output, output) input_data_2 = scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3]) expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.predict(input_data_2, steps=1) self.assertAllEqual(expected_output_2, output_2) def test_sparse_scipy_eval_inputs(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use # a one-dimensional shape; note also that scipy's default dtype is int64. model_input = input_layer.Input(shape=(3,), sparse=True, dtype=dtypes.int64) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], experimental_run_tf_function=testing_utils.should_run_tf_function()) input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.evaluate(input_data, expected_output, steps=1) self.assertAllEqual(1.0, output[-1]) input_data_2 = scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3]) expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.evaluate(input_data_2, expected_output_2, steps=1) self.assertAllEqual(1.0, output_2[-1]) def test_sparse_scipy_predict_input_dicts_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use # a one-dimensional shape; note also that scipy's default dtype is int64. if testing_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support input names. else: input_name = "test_input_name" model_input = input_layer.Input( shape=(3,), sparse=True, name=input_name, dtype=dtypes.int64) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) input_data = { input_name: scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) } expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data, steps=1) self.assertAllEqual(expected_output, output) input_data_2 = { input_name: scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3]) } expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.predict(input_data_2, steps=1) self.assertAllEqual(expected_output_2, output_2) def test_sparse_scipy_eval_input_dicts(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use # a one-dimensional shape; note also that scipy's default dtype is int64. if testing_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support input names. else: input_name = "test_input_name" model_input = input_layer.Input( shape=(3,), sparse=True, name=input_name, dtype=dtypes.int64) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], experimental_run_tf_function=testing_utils.should_run_tf_function()) input_data = { input_name: scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) } expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.evaluate(input_data, expected_output, steps=1) self.assertAllEqual(1.0, output[-1]) input_data_2 = { input_name: scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3]) } expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.evaluate(input_data_2, expected_output_2, steps=1) self.assertAllEqual(1.0, output_2[-1]) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( use_dict=[True, False], use_dataset=[True, False], action=["predict", "evaluate", "fit"])) class RaggedTensorInputTest(keras_parameterized.TestCase, test_util.TensorFlowTestCase): def test_ragged_input(self, use_dict, use_dataset, action): data = [(ragged_factory_ops.constant([[[1]], [[2, 3]]]), np.array([[[1, -1]], [[2, 3]]]))] # Prepare the model to test. input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=(None, None), ragged=True, name=input_name, dtype=dtypes.int32) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs()) # Prepare the input data for data_element in data: input_data, expected_output = prepare_inputs(data_element, use_dict, use_dataset, action, input_name) # Perform the action. if action == "predict": result = model.predict(input_data) self.assertAllEqual(expected_output, result) if action == "evaluate": result = model.evaluate(input_data, expected_output) self.assertAllEqual(1.0, result[-1]) if action == "fit": # TODO(momernick): What's the best way of validating that fit happened? _ = model.fit(input_data, expected_output, shuffle=False) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes @parameterized.named_parameters( *test_util.generate_combinations_with_testcase_name( use_dict=[True, False], use_dataset=[True, False])) class RaggedTensorInputValidationTest(keras_parameterized.TestCase, test_util.TensorFlowTestCase): def test_ragged_tensor_input_with_one_none_dimension(self, use_dict, use_dataset): # Define some input data. data = [(ragged_factory_ops.constant([[[1, 0]], [[2, 3]]], ragged_rank=1), np.array([[[1, 0]], [[2, 3]]]))] # Prepare the model to test. input_shape = (None, 2) # RaggedTensorInputTest uses (None, None). input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=input_shape, ragged=True, name=input_name, dtype=dtypes.int32) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs()) for data_element in data: input_data, expected_output = prepare_inputs( data_element, use_dict, use_dataset, action="predict", input_name=input_name) result = model.predict(input_data) self.assertAllEqual(expected_output, result) def test_ragged_tensor_input_with_no_none_dimension(self, use_dict, use_dataset): # Define some input data. data = [(ragged_factory_ops.constant([[[1, 0]], [[2, 3]]], ragged_rank=0), np.array([[[1, 0]], [[2, 3]]]))] # Prepare the model to test. input_shape = (1, 2) # RaggedTensorInputTest uses (None, None). input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=input_shape, ragged=True, name=input_name, dtype=dtypes.int32) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs()) kwargs = get_kwargs(use_dataset) for data_element in data: input_data, expected_output = prepare_inputs( data_element, use_dict, use_dataset, action="predict", input_name=input_name) result = model.predict(input_data, **kwargs) self.assertAllEqual(expected_output, result) def test_ragged_tensor_input_with_wrong_ragged_rank_fails( self, use_dict, use_dataset): # Define some input data that will NOT match the input shape spec. data = [(ragged_factory_ops.constant([[[1, 0]], [[2, 3]]]), None)] # Prepare the model to test. input_shape = (None, 2) # RaggedTensorInputTest uses (None, None). input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=input_shape, ragged=True, name=input_name, dtype=dtypes.int32) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs()) # Define some input data with the wrong ragged rank for data_element in data: input_data, _ = prepare_inputs( data_element, use_dict, use_dataset, action="predict", input_name=input_name) with self.assertRaisesRegex(ValueError, ".*don't have the same nested.*"): _ = model.predict(input_data) # CompositeTensor shape validation only happens in non-eager modes and in non- # subclassed models, so we run a separate parameterized test for them. @keras_parameterized.run_with_all_model_types(exclude_models=["subclass"]) @keras_parameterized.run_all_keras_modes(always_skip_eager=True) class SparseTensorInputValidationTest(keras_parameterized.TestCase): def test_sparse_scipy_input_checks_shape(self): model_input = input_layer.Input(shape=(3,), sparse=True, dtype=dtypes.int32) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 4]) with self.assertRaisesRegex(ValueError, ".*got array with shape.*"): _ = model.predict(input_data) def test_sparse_tensor_input_checks_shapes(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. model_input = input_layer.Input( shape=(2, None), sparse=True, dtype=dtypes.int32) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) # Define some input data. input_data = sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) kwargs = get_kwargs(use_dataset=False) with self.assertRaisesRegex(ValueError, ".*got array with shape.*"): _ = model.predict(input_data, **kwargs) def test_ragged_tensor_input_with_wrong_value_shape(self): # Create a model that accepts a ragged input and converts it to dense. model_input = input_layer.Input( shape=(None, 4), ragged=True, dtype=dtypes.int32) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) # Define some input data with the wrong ragged rank input_data = ragged_factory_ops.constant([[[1, 0]], [[2, 3]]], ragged_rank=1) with self.assertRaisesRegex(ValueError, ".*got array with shape.*"): _ = model.predict(input_data) @keras_parameterized.run_with_all_model_types( exclude_models=["functional"]) @keras_parameterized.run_all_keras_modes class UndefinedCompositeTensorInputsTest(keras_parameterized.TestCase): def test_subclass_implicit_sparse_inputs_fails(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. layers = [ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers) # Define some input data. input_data = sparse_tensor.SparseTensor([[0, 0], [1, 0], [1, 1]], [1, 2, 3], [2, 3]) kwargs = get_kwargs(False) with self.assertRaisesRegex( ValueError, ".*All SparseTensor and RaggedTensor inputs .*"): _ = model.predict(input_data, **kwargs) def test_subclass_implicit_sparse_scipy_inputs_fails(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. layers = [ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers) # Define some input data. input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) with self.assertRaisesRegex(ValueError, ".*either a single array.*"): _ = model.predict(input_data) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/composite_tensor_support_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for kernelized_utils.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.keras.utils import kernelized_utils from tensorflow.python.platform import test def _exact_gaussian(stddev): return functools.partial( kernelized_utils.exact_gaussian_kernel, stddev=stddev) def _exact_laplacian(stddev): return functools.partial( kernelized_utils.exact_laplacian_kernel, stddev=stddev) class KernelizedUtilsTest(test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=10.0), [[1.0]]), ('laplacian', _exact_laplacian(stddev=50.0), [[1.0]])) def test_equal_vectors(self, exact_kernel_fn, expected_values): """Identical vectors give exactly the identity kernel value.""" x = constant_op.constant([0.5, -0.5, -0.5, 0.5]) y = constant_op.constant([0.5, -0.5, -0.5, 0.5]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # x and y are identical and therefore K(x, y) will be precisely equal to # the identity value of the kernel. self.assertAllClose(expected_values, exact_kernel, atol=1e-6) @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=10.0), [[1.0]]), ('laplacian', _exact_laplacian(stddev=50.0), [[1.0]])) def test_almost_identical_vectors(self, exact_kernel_fn, expected_values): """Almost identical vectors give the identity kernel value.""" x = constant_op.constant([1.0, 0.4, -2.1, -1.1]) y = constant_op.constant([1.01, 0.39, -2.099, -1.101]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # x and y are almost identical and therefore K(x, y) will be almost equal to # the identity value of the kernel. self.assertAllClose(expected_values, exact_kernel, atol=1e-3) @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=1.0), [[0.99], [0.977]]), ('laplacian', _exact_laplacian(stddev=5.0), [[0.96], [0.94]])) def test_similar_matrices(self, exact_kernel_fn, expected_values): """Pairwise "close" vectors give high kernel values (similarity scores).""" x = constant_op.constant([1.0, 3.4, -2.1, 0.9, 3.3, -2.0], shape=[2, 3]) y = constant_op.constant([1.1, 3.35, -2.05]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # The 2 rows of x are close to y. The pairwise kernel values (similarity # scores) are somewhat close to the identity value of the kernel. self.assertAllClose(expected_values, exact_kernel, atol=1e-2) @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=2.0), [[.997, .279], [.251, 1.], [.164, 0.019]]), ('laplacian', _exact_laplacian(stddev=2.0), [[.904, .128], [.116, 1.], [.07, 0.027]])) def test_matrices_varying_similarity(self, exact_kernel_fn, expected_values): """Test matrices with row vectors of varying pairwise similarity.""" x = constant_op.constant([1.0, 2., -2., 0.9, 3.3, -1.0], shape=[3, 2]) y = constant_op.constant([1.1, 2.1, -2., 0.9], shape=[2, 2]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) self.assertAllClose(expected_values, exact_kernel, atol=1e-2) @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=1.0), [[0.0]]), ('laplacian', _exact_laplacian(stddev=1.0), [[0.0]])) def test_completely_dissimilar_vectors(self, exact_kernel_fn, expected_values): """Very dissimilar vectors give very low similarity scores.""" x = constant_op.constant([1.0, 3.4, -2.1, -5.1]) y = constant_op.constant([0.5, 2.1, 1.0, 3.0]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # x and y are very "far" from each other and so the corresponding kernel # value will be very low. self.assertAllClose(expected_values, exact_kernel, atol=1e-2) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/kernelized_utils_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.keras.utils.data_utils import OrderedEnqueuer from tensorflow.python.keras.utils.data_utils import Sequence from tensorflow.python.keras.utils.data_utils import SequenceEnqueuer from tensorflow.python.keras.utils.generic_utils import class_and_config_for_serialized_keras_object from tensorflow.python.keras.utils.generic_utils import custom_object_scope from tensorflow.python.keras.utils.generic_utils import CustomObjectScope from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object from tensorflow.python.keras.utils.generic_utils import get_custom_objects from tensorflow.python.keras.utils.generic_utils import Progbar from tensorflow.python.keras.utils.generic_utils import serialize_keras_class_and_config from tensorflow.python.keras.utils.generic_utils import serialize_keras_object from tensorflow.python.keras.utils.io_utils import HDF5Matrix from tensorflow.python.keras.utils.layer_utils import convert_all_kernels_in_model from tensorflow.python.keras.utils.layer_utils import get_source_inputs from tensorflow.python.keras.utils.layer_utils import print_summary from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model from tensorflow.python.keras.utils.np_utils import normalize from tensorflow.python.keras.utils.np_utils import to_categorical from tensorflow.python.keras.utils.vis_utils import model_to_dot from tensorflow.python.keras.utils.vis_utils import plot_model del absolute_import del division del print_function
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility methods related to kernelized layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops def _to_matrix(u): """If input tensor is a vector (i.e., has rank 1), converts it to matrix.""" u_rank = len(u.shape) if u_rank not in [1, 2]: raise ValueError('The input tensor should have rank 1 or 2. Given rank: {}' .format(u_rank)) if u_rank == 1: return array_ops.expand_dims(u, 0) return u def _align_matrices(x, y): """Aligns x and y tensors to allow computations over pairs of their rows.""" x_matrix = _to_matrix(x) y_matrix = _to_matrix(y) x_shape = x_matrix.shape y_shape = y_matrix.shape if y_shape[1] != x_shape[1]: # dimensions do not match. raise ValueError( 'The outermost dimensions of the input tensors should match. Given: {} ' 'vs {}.'.format(y_shape[1], x_shape[1])) x_tile = array_ops.tile( array_ops.expand_dims(x_matrix, 1), [1, y_shape[0], 1]) y_tile = array_ops.tile( array_ops.expand_dims(y_matrix, 0), [x_shape[0], 1, 1]) return x_tile, y_tile def inner_product(u, v): u = _to_matrix(u) v = _to_matrix(v) return math_ops.matmul(u, v, transpose_b=True) def exact_gaussian_kernel(x, y, stddev): r"""Computes exact Gaussian kernel value(s) for tensors x and y and stddev. The Gaussian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v||^2 / (2* stddev^2)) where the norm is the l2-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible. """ x_aligned, y_aligned = _align_matrices(x, y) diff_squared_l2_norm = math_ops.reduce_sum( math_ops.squared_difference(x_aligned, y_aligned), 2) return math_ops.exp(-diff_squared_l2_norm / (2 * stddev * stddev)) def exact_laplacian_kernel(x, y, stddev): r"""Computes exact Laplacian kernel value(s) for tensors x and y using stddev. The Laplacian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v|| / stddev) where the norm is the l1-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible. """ x_aligned, y_aligned = _align_matrices(x, y) diff_l1_norm = math_ops.reduce_sum( math_ops.abs(math_ops.subtract(x_aligned, y_aligned)), 2) return math_ops.exp(-diff_l1_norm / stddev)
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/kernelized_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for metrics_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.keras.utils import metrics_utils from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedSizeOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ { 'x_list': [1], 'y_list': [2] }, { 'x_list': [1, 2], 'y_list': [2, 3] }, { 'x_list': [1, 2, 4], 'y_list': [2, 3, 5] }, { 'x_list': [[1, 2], [3, 4]], 'y_list': [[2, 3], [5, 6]] }, ]) def test_passing_dense_tensors(self, x_list, y_list): x = constant_op.constant(x_list) y = constant_op.constant(y_list) [x, y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y]) x.shape.assert_is_compatible_with(y.shape) @parameterized.parameters([ { 'x_list': [1], }, { 'x_list': [1, 2], }, { 'x_list': [1, 2, 4], }, { 'x_list': [[1, 2], [3, 4]], }, ]) def test_passing_one_dense_tensor(self, x_list): x = constant_op.constant(x_list) [x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x]) @parameterized.parameters([ { 'x_list': [1], 'y_list': [2] }, { 'x_list': [1, 2], 'y_list': [2, 3] }, { 'x_list': [1, 2, 4], 'y_list': [2, 3, 5] }, { 'x_list': [[1, 2], [3, 4]], 'y_list': [[2, 3], [5, 6]] }, { 'x_list': [[1, 2], [3, 4], [1]], 'y_list': [[2, 3], [5, 6], [3]] }, { 'x_list': [[1, 2], [], [1]], 'y_list': [[2, 3], [], [3]] }, ]) def test_passing_both_ragged(self, x_list, y_list): x = ragged_factory_ops.constant(x_list) y = ragged_factory_ops.constant(y_list) [x, y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y]) x.shape.assert_is_compatible_with(y.shape) @parameterized.parameters([ { 'x_list': [1], }, { 'x_list': [1, 2], }, { 'x_list': [1, 2, 4], }, { 'x_list': [[1, 2], [3, 4]], }, { 'x_list': [[1, 2], [3, 4], [1]], }, { 'x_list': [[1, 2], [], [1]], }, ]) def test_passing_one_ragged(self, x_list): x = ragged_factory_ops.constant(x_list) [x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x]) @parameterized.parameters([ { 'x_list': [1], 'y_list': [2], 'mask_list': [0] }, { 'x_list': [1, 2], 'y_list': [2, 3], 'mask_list': [0, 1] }, { 'x_list': [1, 2, 4], 'y_list': [2, 3, 5], 'mask_list': [1, 1, 1] }, { 'x_list': [[1, 2], [3, 4]], 'y_list': [[2, 3], [5, 6]], 'mask_list': [[1, 1], [0, 1]] }, { 'x_list': [[1, 2], [3, 4], [1]], 'y_list': [[2, 3], [5, 6], [3]], 'mask_list': [[1, 1], [0, 0], [1]] }, { 'x_list': [[1, 2], [], [1]], 'y_list': [[2, 3], [], [3]], 'mask_list': [[1, 1], [], [0]] }, ]) def test_passing_both_ragged_with_mask(self, x_list, y_list, mask_list): x = ragged_factory_ops.constant(x_list) y = ragged_factory_ops.constant(y_list) mask = ragged_factory_ops.constant(mask_list) [x, y], mask = \ metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y], mask) x.shape.assert_is_compatible_with(y.shape) y.shape.assert_is_compatible_with(mask.shape) @parameterized.parameters([ { 'x_list': [1], 'mask_list': [0] }, { 'x_list': [1, 2], 'mask_list': [0, 1] }, { 'x_list': [1, 2, 4], 'mask_list': [1, 1, 1] }, { 'x_list': [[1, 2], [3, 4]], 'mask_list': [[1, 1], [0, 1]] }, { 'x_list': [[1, 2], [3, 4], [1]], 'mask_list': [[1, 1], [0, 0], [1]] }, { 'x_list': [[1, 2], [], [1]], 'mask_list': [[1, 1], [], [0]] }, ]) def test_passing_one_ragged_with_mask(self, x_list, mask_list): x = ragged_factory_ops.constant(x_list) mask = ragged_factory_ops.constant(mask_list) [x], mask = \ metrics_utils.ragged_assert_compatible_and_get_flat_values([x], mask) x.shape.assert_is_compatible_with(mask.shape) @parameterized.parameters([ { 'x_list': [[[1, 3]]], 'y_list': [[2, 3]] }, ]) def test_failing_different_ragged_and_dense_ranks(self, x_list, y_list): x = ragged_factory_ops.constant(x_list) y = ragged_factory_ops.constant(y_list) with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises [x, y ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y]) @parameterized.parameters([ { 'x_list': [[[1, 3]]], 'y_list': [[[2, 3]]], 'mask_list': [[0, 1]] }, ]) def test_failing_different_mask_ranks(self, x_list, y_list, mask_list): x = ragged_factory_ops.constant(x_list) y = ragged_factory_ops.constant(y_list) mask = ragged_factory_ops.constant(mask_list) with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises [x, y ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y], mask) # we do not support such cases that ragged_ranks are different but overall # dimension shapes and sizes are identical due to adding too much performance # overheads to the overall use cases. def test_failing_different_ragged_ranks(self): dt = constant_op.constant([[[1, 2]]]) # adding a ragged dimension x = ragged_tensor.RaggedTensor.from_row_splits(dt, row_splits=[0, 1]) y = ragged_factory_ops.constant([[[[1, 2]]]]) with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises [x, y], _ = \ metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y]) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/metrics_utils_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities used by convolution layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensorflow.python.keras import backend def convert_data_format(data_format, ndim): if data_format == 'channels_last': if ndim == 3: return 'NWC' elif ndim == 4: return 'NHWC' elif ndim == 5: return 'NDHWC' else: raise ValueError('Input rank not supported:', ndim) elif data_format == 'channels_first': if ndim == 3: return 'NCW' elif ndim == 4: return 'NCHW' elif ndim == 5: return 'NCDHW' else: raise ValueError('Input rank not supported:', ndim) else: raise ValueError('Invalid data_format:', data_format) def normalize_tuple(value, n, name): """Transforms a single integer or iterable of integers into an integer tuple. Arguments: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed. """ if isinstance(value, int): return (value,) * n else: try: value_tuple = tuple(value) except TypeError: raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value)) if len(value_tuple) != n: raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value)) for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError): raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value) + ' ' 'including element ' + str(single_value) + ' of type' + ' ' + str(type(single_value))) return value_tuple def conv_output_length(input_length, filter_size, padding, stride, dilation=1): """Determines output length of a convolution given input length. Arguments: input_length: integer. filter_size: integer. padding: one of "same", "valid", "full", "causal" stride: integer. dilation: dilation rate, integer. Returns: The output length (integer). """ if input_length is None: return None assert padding in {'same', 'valid', 'full', 'causal'} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if padding in ['same', 'causal']: output_length = input_length elif padding == 'valid': output_length = input_length - dilated_filter_size + 1 elif padding == 'full': output_length = input_length + dilated_filter_size - 1 return (output_length + stride - 1) // stride def conv_input_length(output_length, filter_size, padding, stride): """Determines input length of a convolution given output length. Arguments: output_length: integer. filter_size: integer. padding: one of "same", "valid", "full". stride: integer. Returns: The input length (integer). """ if output_length is None: return None assert padding in {'same', 'valid', 'full'} if padding == 'same': pad = filter_size // 2 elif padding == 'valid': pad = 0 elif padding == 'full': pad = filter_size - 1 return (output_length - 1) * stride - 2 * pad + filter_size def deconv_output_length(input_length, filter_size, padding, output_padding=None, stride=0, dilation=1): """Determines output length of a transposed convolution given input length. Arguments: input_length: Integer. filter_size: Integer. padding: one of `"same"`, `"valid"`, `"full"`. output_padding: Integer, amount of padding along the output dimension. Can be set to `None` in which case the output length is inferred. stride: Integer. dilation: Integer. Returns: The output length (integer). """ assert padding in {'same', 'valid', 'full'} if input_length is None: return None # Get the dilated kernel size filter_size = filter_size + (filter_size - 1) * (dilation - 1) # Infer length if output padding is None, else compute the exact length if output_padding is None: if padding == 'valid': length = input_length * stride + max(filter_size - stride, 0) elif padding == 'full': length = input_length * stride - (stride + filter_size - 2) elif padding == 'same': length = input_length * stride else: if padding == 'same': pad = filter_size // 2 elif padding == 'valid': pad = 0 elif padding == 'full': pad = filter_size - 1 length = ((input_length - 1) * stride + filter_size - 2 * pad + output_padding) return length def normalize_data_format(value): if value is None: value = backend.image_data_format() data_format = value.lower() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('The `data_format` argument must be one of ' '"channels_first", "channels_last". Received: ' + str(value)) return data_format def normalize_padding(value): if isinstance(value, (list, tuple)): return value padding = value.lower() if padding not in {'valid', 'same', 'causal'}: raise ValueError('The `padding` argument must be a list/tuple or one of ' '"valid", "same" (or "causal", only for `Conv1D). ' 'Received: ' + str(padding)) return padding def convert_kernel(kernel): """Converts a Numpy kernel matrix from Theano format to TensorFlow format. Also works reciprocally, since the transformation is its own inverse. Arguments: kernel: Numpy array (3D, 4D or 5D). Returns: The converted kernel. Raises: ValueError: in case of invalid kernel shape or invalid data_format. """ kernel = np.asarray(kernel) if not 3 <= kernel.ndim <= 5: raise ValueError('Invalid kernel shape:', kernel.shape) slices = [slice(None, None, -1) for _ in range(kernel.ndim)] no_flip = (slice(None, None), slice(None, None)) slices[-2:] = no_flip return np.copy(kernel[slices]) def conv_kernel_mask(input_shape, kernel_shape, strides, padding): """Compute a mask representing the connectivity of a convolution operation. Assume a convolution with given parameters is applied to an input having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries indicating pairs of input and output locations that are connected by a weight. Example: ```python >>> input_shape = (4,) >>> kernel_shape = (2,) >>> strides = (1,) >>> padding = "valid" >>> conv_kernel_mask(input_shape, kernel_shape, strides, padding) array([[ True, False, False], [ True, True, False], [False, True, True], [False, False, True]], dtype=bool) ``` where rows and columns correspond to inputs and outputs respectively. Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `"same"` or `"valid"`. Returns: A boolean 2N-D `np.ndarray` of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)` is the spatial shape of the output. `True` entries in the mask represent pairs of input-output locations that are connected by a weight. Raises: ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the same number of dimensions. NotImplementedError: if `padding` is not in {`"same"`, `"valid"`}. """ if padding not in {'same', 'valid'}: raise NotImplementedError('Padding type %s not supported. ' 'Only "valid" and "same" ' 'are implemented.' % padding) in_dims = len(input_shape) if isinstance(kernel_shape, int): kernel_shape = (kernel_shape,) * in_dims if isinstance(strides, int): strides = (strides,) * in_dims kernel_dims = len(kernel_shape) stride_dims = len(strides) if kernel_dims != in_dims or stride_dims != in_dims: raise ValueError('Number of strides, input and kernel dimensions must all ' 'match. Received: %d, %d, %d.' % (stride_dims, in_dims, kernel_dims)) output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding) mask_shape = input_shape + output_shape mask = np.zeros(mask_shape, np.bool) output_axes_ticks = [range(dim) for dim in output_shape] for output_position in itertools.product(*output_axes_ticks): input_axes_ticks = conv_connected_inputs(input_shape, kernel_shape, output_position, strides, padding) for input_position in itertools.product(*input_axes_ticks): mask[input_position + output_position] = True return mask def conv_kernel_idxs(input_shape, kernel_shape, strides, padding, filters_in, filters_out, data_format): """Yields output-input tuples of indices in a CNN layer. The generator iterates over all `(output_idx, input_idx)` tuples, where `output_idx` is an integer index in a flattened tensor representing a single output image of a convolutional layer that is connected (via the layer weights) to the respective single input image at `input_idx` Example: ```python >>> input_shape = (2, 2) >>> kernel_shape = (2, 1) >>> strides = (1, 1) >>> padding = "valid" >>> filters_in = 1 >>> filters_out = 1 >>> data_format = "channels_last" >>> list(conv_kernel_idxs(input_shape, kernel_shape, strides, padding, >>> filters_in, filters_out, data_format)) [(0, 0), (0, 2), (1, 1), (1, 3)] ``` Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `"same"` or `"valid"`. filters_in: `int`, number if filters in the input to the layer. filters_out: `int', number if filters in the output of the layer. data_format: string, "channels_first" or "channels_last". Yields: The next tuple `(output_idx, input_idx)`, where `output_idx` is an integer index in a flattened tensor representing a single output image of a convolutional layer that is connected (via the layer weights) to the respective single input image at `input_idx`. Raises: ValueError: if `data_format` is neither `"channels_last"` nor `"channels_first"`, or if number of strides, input, and kernel number of dimensions do not match. NotImplementedError: if `padding` is neither `"same"` nor `"valid"`. """ if padding not in ('same', 'valid'): raise NotImplementedError('Padding type %s not supported. ' 'Only "valid" and "same" ' 'are implemented.' % padding) in_dims = len(input_shape) if isinstance(kernel_shape, int): kernel_shape = (kernel_shape,) * in_dims if isinstance(strides, int): strides = (strides,) * in_dims kernel_dims = len(kernel_shape) stride_dims = len(strides) if kernel_dims != in_dims or stride_dims != in_dims: raise ValueError('Number of strides, input and kernel dimensions must all ' 'match. Received: %d, %d, %d.' % (stride_dims, in_dims, kernel_dims)) output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding) output_axes_ticks = [range(dim) for dim in output_shape] if data_format == 'channels_first': concat_idxs = lambda spatial_idx, filter_idx: (filter_idx,) + spatial_idx elif data_format == 'channels_last': concat_idxs = lambda spatial_idx, filter_idx: spatial_idx + (filter_idx,) else: raise ValueError('Data format %s not recignized.' '`data_format` must be "channels_first" or ' '"channels_last".' % data_format) for output_position in itertools.product(*output_axes_ticks): input_axes_ticks = conv_connected_inputs(input_shape, kernel_shape, output_position, strides, padding) for input_position in itertools.product(*input_axes_ticks): for f_in in range(filters_in): for f_out in range(filters_out): out_idx = np.ravel_multi_index( multi_index=concat_idxs(output_position, f_out), dims=concat_idxs(output_shape, filters_out)) in_idx = np.ravel_multi_index( multi_index=concat_idxs(input_position, f_in), dims=concat_idxs(input_shape, filters_in)) yield (out_idx, in_idx) def conv_connected_inputs(input_shape, kernel_shape, output_position, strides, padding): """Return locations of the input connected to an output position. Assume a convolution with given parameters is applied to an input having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method returns N ranges specifying the input region that was convolved with the kernel to produce the output at position `output_position = (p_out1, ..., p_outN)`. Example: ```python >>> input_shape = (4, 4) >>> kernel_shape = (2, 1) >>> output_position = (1, 1) >>> strides = (1, 1) >>> padding = "valid" >>> conv_connected_inputs(input_shape, kernel_shape, output_position, >>> strides, padding) [xrange(1, 3), xrange(1, 2)] ``` Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. output_position: tuple of size N: `(p_out1, ..., p_outN)`, a single position in the output of the convolution. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `"same"` or `"valid"`. Returns: N ranges `[[p_in_left1, ..., p_in_right1], ..., [p_in_leftN, ..., p_in_rightN]]` specifying the region in the input connected to output_position. """ ranges = [] ndims = len(input_shape) for d in range(ndims): left_shift = int(kernel_shape[d] / 2) right_shift = kernel_shape[d] - left_shift center = output_position[d] * strides[d] if padding == 'valid': center += left_shift start = max(0, center - left_shift) end = min(input_shape[d], center + right_shift) ranges.append(range(start, end)) return ranges def conv_output_shape(input_shape, kernel_shape, strides, padding): """Return the output shape of an N-D convolution. Forces dimensions where input is empty (size 0) to remain empty. Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `"same"` or `"valid"`. Returns: tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output. """ dims = range(len(kernel_shape)) output_shape = [ conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d]) for d in dims ] output_shape = tuple( [0 if input_shape[d] == 0 else output_shape[d] for d in dims]) return output_shape
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/conv_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-import-not-at-top """Utilities for file download and caching.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import abstractmethod from contextlib import closing import gc import hashlib import multiprocessing from multiprocessing.pool import ThreadPool import os import random import shutil import signal import sys import tarfile import threading import time import weakref import zipfile import numpy as np import six from six.moves.urllib.error import HTTPError from six.moves.urllib.error import URLError from six.moves.urllib.request import urlopen from tensorflow.python.keras.utils.generic_utils import Progbar from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export try: import queue except ImportError: import Queue as queue if sys.version_info[0] == 2: def urlretrieve(url, filename, reporthook=None, data=None): """Replacement for `urlretrive` for Python 2. Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy `urllib` module, known to have issues with proxy management. Arguments: url: url to retrieve. filename: where to store the retrieved data locally. reporthook: a hook function that will be called once on establishment of the network connection and once after each block read thereafter. The hook will be passed three arguments; a count of blocks transferred so far, a block size in bytes, and the total size of the file. data: `data` argument passed to `urlopen`. """ def chunk_read(response, chunk_size=8192, reporthook=None): content_type = response.info().get('Content-Length') total_size = -1 if content_type is not None: total_size = int(content_type.strip()) count = 0 while True: chunk = response.read(chunk_size) count += 1 if reporthook is not None: reporthook(count, chunk_size, total_size) if chunk: yield chunk else: break response = urlopen(url, data) with open(filename, 'wb') as fd: for chunk in chunk_read(response, reporthook=reporthook): fd.write(chunk) else: from six.moves.urllib.request import urlretrieve def is_generator_or_sequence(x): """Check if `x` is a Keras generator type.""" return tf_inspect.isgenerator(x) or isinstance(x, Sequence) def _extract_archive(file_path, path='.', archive_format='auto'): """Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats. Arguments: file_path: path to the archive file path: path to extract the archive file archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. Returns: True if a match was found and an archive extraction was completed, False otherwise. """ if archive_format is None: return False if archive_format == 'auto': archive_format = ['tar', 'zip'] if isinstance(archive_format, six.string_types): archive_format = [archive_format] for archive_type in archive_format: if archive_type == 'tar': open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type == 'zip': open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False @keras_export('keras.utils.get_file') def get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None): """Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `~/.keras`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.keras/datasets/example.txt`. Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. Arguments: fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. untar: Deprecated in favor of 'extract'. boolean, whether the file should be decompressed md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file for verification file_hash: The expected hash string of the file after download. The sha256 and md5 hash algorithms are both supported. cache_subdir: Subdirectory under the Keras cache dir where the file is saved. If an absolute path `/path/to/folder` is specified the file will be saved at that location. hash_algorithm: Select the hash algorithm to verify the file. options are 'md5', 'sha256', and 'auto'. The default 'auto' detects the hash algorithm in use. extract: True tries extracting the file as an Archive, like tar or zip. archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. cache_dir: Location to store cached files, when None it defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored). Returns: Path to the downloaded file """ if cache_dir is None: cache_dir = os.path.join(os.path.expanduser('~'), '.keras') if md5_hash is not None and file_hash is None: file_hash = md5_hash hash_algorithm = 'md5' datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): datadir_base = os.path.join('/tmp', '.keras') datadir = os.path.join(datadir_base, cache_subdir) if not os.path.exists(datadir): os.makedirs(datadir) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) download = False if os.path.exists(fpath): # File found; verify integrity if a hash was provided. if file_hash is not None: if not validate_file(fpath, file_hash, algorithm=hash_algorithm): print('A local file was found, but it seems to be ' 'incomplete or outdated because the ' + hash_algorithm + ' file hash does not match the original value of ' + file_hash + ' so we will re-download the data.') download = True else: download = True if download: print('Downloading data from', origin) class ProgressTracker(object): # Maintain progbar for the lifetime of download. # This design was chosen for Python 2.7 compatibility. progbar = None def dl_progress(count, block_size, total_size): if ProgressTracker.progbar is None: if total_size == -1: total_size = None ProgressTracker.progbar = Progbar(total_size) else: ProgressTracker.progbar.update(count * block_size) error_msg = 'URL fetch failure on {}: {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except (Exception, KeyboardInterrupt) as e: if os.path.exists(fpath): os.remove(fpath) raise ProgressTracker.progbar = None if untar: if not os.path.exists(untar_fpath): _extract_archive(fpath, datadir, archive_format='tar') return untar_fpath if extract: _extract_archive(fpath, datadir, archive_format) return fpath def _hash_file(fpath, algorithm='sha256', chunk_size=65535): """Calculates a file sha256 or md5 hash. Example: ```python >>> from keras.data_utils import _hash_file >>> _hash_file('/path/to/file.zip') 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` Arguments: fpath: path to the file being validated algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: The file hash """ if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b''): hasher.update(chunk) return hasher.hexdigest() def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535): """Validates a file against a sha256 or md5 hash. Arguments: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: Whether the file is valid """ if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64): hasher = 'sha256' else: hasher = 'md5' if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash): return True else: return False @keras_export('keras.utils.Sequence') class Sequence(object): """Base object for fitting to a sequence of data, such as a dataset. Every `Sequence` must implement the `__getitem__` and the `__len__` methods. If you want to modify your dataset between epochs you may implement `on_epoch_end`. The method `__getitem__` should return a complete batch. Notes: `Sequence` are a safer way to do multiprocessing. This structure guarantees that the network will only train once on each sample per epoch which is not the case with generators. Examples: ```python from skimage.io import imread from skimage.transform import resize import numpy as np import math # Here, `x_set` is list of path to the images # and `y_set` are the associated classes. class CIFAR10Sequence(Sequence): def __init__(self, x_set, y_set, batch_size): self.x, self.y = x_set, y_set self.batch_size = batch_size def __len__(self): return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ resize(imread(file_name), (200, 200)) for file_name in batch_x]), np.array(batch_y) ``` """ @abstractmethod def __getitem__(self, index): """Gets batch at position `index`. Arguments: index: position of the batch in the Sequence. Returns: A batch """ raise NotImplementedError @abstractmethod def __len__(self): """Number of batch in the Sequence. Returns: The number of batches in the Sequence. """ raise NotImplementedError def on_epoch_end(self): """Method called at the end of every epoch. """ pass def __iter__(self): """Create a generator that iterate over the Sequence.""" for item in (self[i] for i in range(len(self))): yield item def iter_sequence_infinite(seq): """Iterates indefinitely over a Sequence. Arguments: seq: Sequence instance. Yields: Batches of data from the Sequence. """ while True: for item in seq: yield item # Global variables to be shared across processes _SHARED_SEQUENCES = {} # We use a Value to provide unique id to different processes. _SEQUENCE_COUNTER = None # Because multiprocessing pools are inherently unsafe, starting from a clean # state can be essential to avoiding deadlocks. In order to accomplish this, we # need to be able to check on the status of Pools that we create. _DATA_POOLS = weakref.WeakSet() _WORKER_ID_QUEUE = None # Only created if needed. _WORKER_IDS = set() def get_worker_id_queue(): """Lazily create the queue to track worker ids.""" global _WORKER_ID_QUEUE if _WORKER_ID_QUEUE is None: _WORKER_ID_QUEUE = multiprocessing.Queue() return _WORKER_ID_QUEUE def init_pool(seqs): global _SHARED_SEQUENCES _SHARED_SEQUENCES = seqs @keras_export('keras.experimental.terminate_keras_multiprocessing_pools') def terminate_keras_multiprocessing_pools(grace_period=0.1, use_sigkill=False): """Destroy Keras' multiprocessing pools to prevent deadlocks. In general multiprocessing.Pool can interact quite badly with other, seemingly unrelated, parts of a codebase due to Pool's reliance on fork. This method cleans up all pools which are known to belong to Keras (and thus can be safely terminated). Args: grace_period: Time (in seconds) to wait for process cleanup to propagate. use_sigkill: Boolean of whether or not to perform a cleanup pass using SIGKILL. Returns: A list of human readable strings describing all issues encountered. It is up to the caller to decide whether to treat this as an error condition. """ errors = [] # First cleanup the pools spawned by Keras. If we start killing workers and # a parent pool is still alive it will just spawn replacements which we don't # want. gc.collect() for pool in _DATA_POOLS: pool.close() pool.terminate() # We do not join the pool, because that would wait forever if a worker # refused to exit. # Finally, delete our reference to the pool so that we do not block garbage # collection. del pool # If there were any pools, sleep for a small grace period to allow everything # to finalize. if _DATA_POOLS: time.sleep(grace_period) # Now we kill any workers which are still alive. However we must compare # the worker identifier to the set of identifiers which are known to have been # spawned by pools belonging to Keras to avoid deleting unrelated workers. # First we call the .terminate() method of a worker, and then if it still # persists we directly send a signal to the process. Certain worker tasks may # be able to gracefully handle shutdown, so we send a SIGTERM and then # optionally follow up with a SIGKILL. visited_workers = set() cleanup_passes = ['.terminate', 'SIGTERM'] if use_sigkill: cleanup_passes.append('SIGKILL') cleanup_passes.append('log') for cleanup_pass in cleanup_passes: while True: # In rare cases, queue.qsize() overestimates the number of elements. This # loop is designed to be more robust. try: _WORKER_IDS.add(get_worker_id_queue().get_nowait()) except queue.Empty: break gc.collect() workers_terminated_this_pass = False for worker in multiprocessing.active_children(): ident = worker.ident if ident in _WORKER_IDS and worker.is_alive(): try: if cleanup_pass == '.terminate': # First we ask nicely. worker.terminate() worker.join(timeout=grace_period) visited_workers.add(ident) workers_terminated_this_pass = True elif cleanup_pass in ('SIGTERM', 'SIGKILL'): # Then we ask increasingly tersely. os.kill(worker.pid, signal.SIGKILL if cleanup_pass == 'SIGKILL' else signal.SIGTERM) workers_terminated_this_pass = True elif cleanup_pass == 'log': # And finally we give up and log the failure. errors.append('worker still alive: {}, pid={}, hash={}' .format(worker.name, worker.pid, hash(worker))) except OSError: # Worker exited since the start of this loop. pass if workers_terminated_this_pass: # There can be a small propagation delay between worker destruction and # workers reporting False for is_alive and no longer appearing in the # list of active children. Once again, we sleep for a small grace period. # This prevents false positives from workers which are simply still in the # process of spinning down. time.sleep(grace_period) # Finally we remove the visited worker ids to handle the edge case that a # pid is reused. _WORKER_IDS.difference_update(visited_workers) gc.collect() for pool in _DATA_POOLS: errors.append('pool still exists: {}, hash={}'.format(pool, hash(pool))) return errors def get_index(uid, i): """Get the value from the Sequence `uid` at index `i`. To allow multiple Sequences to be used at the same time, we use `uid` to get a specific one. A single Sequence would cause the validation to overwrite the training Sequence. Arguments: uid: int, Sequence identifier i: index Returns: The value at index `i`. """ return _SHARED_SEQUENCES[uid][i] @keras_export('keras.utils.SequenceEnqueuer') class SequenceEnqueuer(object): """Base class to enqueue inputs. The task of an Enqueuer is to use parallelism to speed up preprocessing. This is done with processes or threads. Example: ```python enqueuer = SequenceEnqueuer(...) enqueuer.start() datas = enqueuer.get() for data in datas: # Use the inputs; training, evaluating, predicting. # ... stop sometime. enqueuer.close() ``` The `enqueuer.get()` should be an infinite stream of datas. """ def __init__(self, sequence, use_multiprocessing=False): self.sequence = sequence self.use_multiprocessing = use_multiprocessing global _SEQUENCE_COUNTER if _SEQUENCE_COUNTER is None: try: _SEQUENCE_COUNTER = multiprocessing.Value('i', 0) except OSError: # In this case the OS does not allow us to use # multiprocessing. We resort to an int # for enqueuer indexing. _SEQUENCE_COUNTER = 0 if isinstance(_SEQUENCE_COUNTER, int): self.uid = _SEQUENCE_COUNTER _SEQUENCE_COUNTER += 1 else: # Doing Multiprocessing.Value += x is not process-safe. with _SEQUENCE_COUNTER.get_lock(): self.uid = _SEQUENCE_COUNTER.value _SEQUENCE_COUNTER.value += 1 self.workers = 0 self.executor_fn = None self.queue = None self.run_thread = None self.stop_signal = None def is_running(self): return self.stop_signal is not None and not self.stop_signal.is_set() def start(self, workers=1, max_queue_size=10): """Starts the handler's workers. Arguments: workers: Number of workers. max_queue_size: queue size (when full, workers could block on `put()`) """ if self.use_multiprocessing: self.executor_fn = self._get_executor_init(workers) else: # We do not need the init since it's threads. self.executor_fn = lambda _: ThreadPool(workers) self.workers = workers self.queue = queue.Queue(max_queue_size) self.stop_signal = threading.Event() self.run_thread = threading.Thread(target=self._run) self.run_thread.daemon = True self.run_thread.start() def _send_sequence(self): """Sends current Iterable to all workers.""" # For new processes that may spawn _SHARED_SEQUENCES[self.uid] = self.sequence def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. Arguments: timeout: maximum time to wait on `thread.join()` """ self.stop_signal.set() with self.queue.mutex: self.queue.queue.clear() self.queue.unfinished_tasks = 0 self.queue.not_full.notify() self.run_thread.join(timeout) _SHARED_SEQUENCES[self.uid] = None def __del__(self): if self.is_running(): self.stop() @abstractmethod def _run(self): """Submits request to the executor and queue the `Future` objects.""" raise NotImplementedError @abstractmethod def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. Arguments: workers: Number of workers. Returns: Function, a Function to initialize the pool """ raise NotImplementedError @abstractmethod def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Returns Generator yielding tuples `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ raise NotImplementedError @keras_export('keras.utils.OrderedEnqueuer') class OrderedEnqueuer(SequenceEnqueuer): """Builds a Enqueuer from a Sequence. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: sequence: A `tf.keras.utils.data_utils.Sequence` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch """ def __init__(self, sequence, use_multiprocessing=False, shuffle=False): super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing) self.shuffle = shuffle def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. Arguments: workers: Number of workers. Returns: Function, a Function to initialize the pool """ def pool_fn(seqs): pool = multiprocessing.Pool( workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue())) _DATA_POOLS.add(pool) return pool return pool_fn def _wait_queue(self): """Wait for the queue to be empty.""" while True: time.sleep(0.1) if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set(): return def _run(self): """Submits request to the executor and queue the `Future` objects.""" sequence = list(range(len(self.sequence))) self._send_sequence() # Share the initial sequence while True: if self.shuffle: random.shuffle(sequence) with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: for i in sequence: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(get_index, (self.uid, i)), block=True) # Done with the current epoch, waiting for the final batches self._wait_queue() if self.stop_signal.is_set(): # We're done return # Call the internal on epoch end. self.sequence.on_epoch_end() self._send_sequence() # Update the pool def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Yields: The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ try: while self.is_running(): inputs = self.queue.get(block=True).get() self.queue.task_done() if inputs is not None: yield inputs except Exception: # pylint: disable=broad-except self.stop() six.reraise(*sys.exc_info()) def init_pool_generator(gens, random_seed=None, id_queue=None): """Initializer function for pool workers. Args: gens: State which should be made available to worker processes. random_seed: An optional value with which to seed child processes. id_queue: A multiprocessing Queue of worker ids. This is used to indicate that a worker process was created by Keras and can be terminated using the cleanup_all_keras_forkpools utility. """ global _SHARED_SEQUENCES _SHARED_SEQUENCES = gens worker_proc = multiprocessing.current_process() # name isn't used for anything, but setting a more descriptive name is helpful # when diagnosing orphaned processes. worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name) if random_seed is not None: np.random.seed(random_seed + worker_proc.ident) if id_queue is not None: # If a worker dies during init, the pool will just create a replacement. id_queue.put(worker_proc.ident, block=True, timeout=0.1) def next_sample(uid): """Gets the next value from the generator `uid`. To allow multiple generators to be used at the same time, we use `uid` to get a specific one. A single generator would cause the validation to overwrite the training generator. Arguments: uid: int, generator identifier Returns: The next value of generator `uid`. """ return six.next(_SHARED_SEQUENCES[uid]) @keras_export('keras.utils.GeneratorEnqueuer') class GeneratorEnqueuer(SequenceEnqueuer): """Builds a queue out of a data generator. The provided generator can be finite in which case the class will throw a `StopIteration` exception. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: generator: a generator function which yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, will be incremented by one for each worker. """ def __init__(self, sequence, use_multiprocessing=False, random_seed=None): super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing) self.random_seed = random_seed def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. Arguments: workers: Number of works. Returns: A Function to initialize the pool """ def pool_fn(seqs): pool = multiprocessing.Pool( workers, initializer=init_pool_generator, initargs=(seqs, self.random_seed, get_worker_id_queue())) _DATA_POOLS.add(pool) return pool return pool_fn def _run(self): """Submits request to the executor and queue the `Future` objects.""" self._send_sequence() # Share the initial generator with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: while True: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(next_sample, (self.uid,)), block=True) def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Yields: The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ try: while self.is_running(): inputs = self.queue.get(block=True).get() self.queue.task_done() if inputs is not None: yield inputs except StopIteration: # Special case for finite generators last_ones = [] while self.queue.qsize() > 0: last_ones.append(self.queue.get(block=True)) # Wait for them to complete for f in last_ones: f.wait() # Keep the good ones last_ones = [future.get() for future in last_ones if future.successful()] for inputs in last_ones: if inputs is not None: yield inputs except Exception as e: # pylint: disable=broad-except self.stop() if 'generator already executing' in str(e): raise RuntimeError( 'Your generator is NOT thread-safe. ' 'Keras requires a thread-safe generator when ' '`use_multiprocessing=False, workers > 1`. ') six.reraise(*sys.exc_info())
tensorflow-r1.15.5-nv23.03
tensorflow/python/keras/utils/data_utils.py