python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes continue statements by de-sugaring into a control boolean."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
class _Continue(object):
def __init__(self):
self.used = False
self.control_var_name = None
def __repr__(self):
return '<_Continue(used: {}, var: {})>'.format(self.used,
self.control_var_name)
class _Block(object):
"""Tracks information about lexical blocks as they are visited in the AST.
Mainly, this object tracks the creation of block guards that replace
`continue` statements (e.g. `if not continue_:`).
Attributes:
create_guard_current: bool, whether to create a guard for the current
statement.
create_guard_next: bool, whether to create a guard for the next
statement.
is_loop_type: bool, whether this block is the body of a loop.
"""
def __init__(self):
self.is_loop_type = False
self.create_guard_current = False
self.create_guard_next = False
class ContinueCanonicalizationTransformer(converter.Base):
"""Canonicalizes continue statements into additional conditionals."""
def visit_Continue(self, node):
self.state[_Continue].used = True
for block in reversed(self.state[_Block].stack):
# See ContinueCanonicalizationTest.test_multiple_continues for an example
# it's necessary to create guards for all enclosing affected blocks, not
# just that of the current block.
block.create_guard_next = True
if block.is_loop_type:
# continue only affects the innermost loop
break
template = """
var_name = True
"""
return templates.replace(
template, var_name=self.state[_Continue].control_var_name)
def _postprocess_statement(self, node):
if self.state[_Continue].used:
block = self.state[_Block]
should_wrap_current = block.create_guard_current
# After processing propagate whether to guard the next statement
block.create_guard_current = block.create_guard_next
block.create_guard_next = False
if should_wrap_current:
template = """
if ag__.not_(var_name):
original_node
"""
cond, = templates.replace(
template,
var_name=self.state[_Continue].control_var_name,
original_node=node)
return cond, cond.body
return node, None
def _visit_loop_body(self, node, nodes):
self.state[_Continue].enter()
self.state[_Block].enter()
self.state[_Block].is_loop_type = True
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
continue_var = self.ctx.namer.new_symbol('continue_', scope.referenced)
self.state[_Continue].control_var_name = continue_var
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
if self.state[_Continue].used:
template = """
var_name = False
"""
control_var_init = templates.replace(template, var_name=continue_var)
nodes = control_var_init + nodes
self.state[_Block].exit()
self.state[_Continue].exit()
return nodes
def _visit_non_loop_body(self, nodes):
self.state[_Block].enter()
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
self.state[_Block].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_For(self, node):
node.target = self.generic_visit(node.target)
node.iter = self.generic_visit(node.iter)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_If(self, node):
node.body = self._visit_non_loop_body(node.body)
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_non_loop_body(node.body)
return node
def visit_Try(self, node):
node.body = self._visit_non_loop_body(node.body)
node.orelse = self._visit_non_loop_body(node.orelse)
# In Python 3.8 and later continue is allowed in finally blocks
node.finalbody = self._visit_non_loop_body(node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
node.body = self._visit_non_loop_body(node.body)
return node
def transform(node, ctx):
transformer = ContinueCanonicalizationTransformer(ctx)
node = transformer.visit(node)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/continue_statements.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conditional_expressions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import conditional_expressions
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.platform import test
class ConditionalExpressionsTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, test_fn, *inputs):
ns = {}
with self.converted(test_fn, conditional_expressions, ns) as result:
self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
def test_basic(self):
def test_fn(x):
return 1 if x else 0
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 3)
def test_nested_orelse(self):
def test_fn(x):
y = x * x if x > 0 else x if x else 1
return y
self.assertTransformedEquivalent(test_fn, -2)
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 2)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/conditional_expressions_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for slice operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import templates
class SliceTransformer(converter.Base):
"""Converts slicing operations to their TF counterpart.
Currently, relying on the default slice operator that Tensor uses is
insufficient, because TensorArray and tensor lists use dedicated index read
and write functions.
"""
def _process_single_assignment(self, target, value):
if not isinstance(target, gast.Subscript):
return None
if not isinstance(target.slice, gast.Index):
return None
template = """
target = ag__.set_item(target, key, item)
"""
return templates.replace(
template, target=target.value, key=target.slice.value, item=value)
def visit_Assign(self, node):
node = self.generic_visit(node)
# TODO(mdan): Support unpackings and multiple assignments.
if len(node.targets) != 1:
raise NotImplementedError('multiple assignment')
replacement = self._process_single_assignment(node.targets[0], node.value)
if replacement is not None:
return replacement
return node
def visit_Subscript(self, node):
node = self.generic_visit(node)
if not isinstance(node.slice, gast.Index):
return node
if not isinstance(node.ctx, gast.Load):
# Index writes are handled at a higher level, one at which the rvalue is
# also available.
return node
dtype = self.get_definition_directive(
node.value,
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
template = """
ag__.get_item(
target,
key,
opts=ag__.GetItemOpts(element_dtype=dtype))
"""
return templates.replace_as_expression(
template, target=node.value, key=node.slice.value, dtype=dtype)
def transform(node, ctx):
return SliceTransformer(ctx).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/slices.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts the ternary conditional operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import templates
class ConditionalExpressionTransformer(converter.Base):
"""Converts contitional expressions to functional form."""
def visit_IfExp(self, node):
return templates.replace_as_expression(
'''ag__.if_stmt(test, lambda: true_expr,
lambda: false_expr, lambda: (), lambda _: None)''',
test=node.test,
true_expr=node.body,
false_expr=node.orelse)
def transform(node, ctx):
node = ConditionalExpressionTransformer(ctx).visit(node)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/conditional_expressions.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts assert statements to their corresponding TF calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import templates
class AssertTransformer(converter.Base):
"""Transforms Assert nodes to Call so they can be handled as functions."""
def visit_Assert(self, node):
self.generic_visit(node)
# Note: The lone tf.Assert call will be wrapped with control_dependencies
# by side_effect_guards.
template = """
ag__.assert_stmt(test, lambda: msg)
"""
if node.msg is None:
return templates.replace(
template,
test=node.test,
msg=gast.Constant('Assertion error', kind=None))
elif isinstance(node.msg, gast.Constant):
return templates.replace(template, test=node.test, msg=node.msg)
else:
raise NotImplementedError('can only convert string messages for now.')
def transform(node, ctx):
return AssertTransformer(ctx).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/asserts.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arg_defaults module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import arg_defaults
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.platform import test
class ArgDefaultsTransformerTest(converter_testing.TestCase):
def assertTransformedFirstLineIs(self, node, expected):
self.assertEqual(
compiler.ast_to_source(node,
include_encoding_marker=False).split('\n')[0],
expected)
def test_no_args(self):
def test_fn():
pass
node, ctx = self.prepare(test_fn, {})
node = arg_defaults.transform(node, ctx)
self.assertTransformedFirstLineIs(node, 'def test_fn():')
def test_no_defaults(self):
def test_fn(a, b, *c, **e):
return a, b, c, e
node, ctx = self.prepare(test_fn, {})
node = arg_defaults.transform(node, ctx)
self.assertTransformedFirstLineIs(node, 'def test_fn(a, b, *c, **e):')
# TODO(mdan): Add kwonly-arg tests when PY2 is no longer supported.
def test_arg_defaults(self):
def test_fn(a, b=1, c=2):
return a, b, c
node, ctx = self.prepare(test_fn, {})
node = arg_defaults.transform(node, ctx)
self.assertTransformedFirstLineIs(node, 'def test_fn(a, b=None, c=None):')
def test_arg_defaults_with_vararg(self):
def test_fn(a, b=1, *c): # pylint: disable=keyword-arg-before-vararg
return a, b, c
node, ctx = self.prepare(test_fn, {})
node = arg_defaults.transform(node, ctx)
self.assertTransformedFirstLineIs(node, 'def test_fn(a, b=None, *c):')
def test_arg_defaults_ignores_inner_lambda(self):
def test_fn():
return (lambda x=7: x)()
node, ctx = self.prepare(test_fn, {})
node = arg_defaults.transform(node, ctx)
with self.converted(test_fn, arg_defaults, {}) as result:
self.assertEqual(test_fn(), result.test_fn())
def test_arg_defaults_ignores_inner_function(self):
def test_fn():
def inner_fn(a=3):
return a
return inner_fn()
node, ctx = self.prepare(test_fn, {})
node = arg_defaults.transform(node, ctx)
with self.converted(test_fn, arg_defaults, {}) as result:
self.assertEqual(test_fn(), result.test_fn())
def test_arg_defaults_ignores_inner_function_returned(self):
def test_fn():
def inner_fn(a=3):
return a
return inner_fn
node, ctx = self.prepare(test_fn, {})
node = arg_defaults.transform(node, ctx)
with self.converted(test_fn, arg_defaults, {}) as result:
self.assertEqual(test_fn()(), result.test_fn()())
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/arg_defaults_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for misc module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.utils import misc
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops.variables import Variable
from tensorflow.python.platform import test
class MiscTest(test.TestCase):
def test_capitalize_initial(self):
self.assertEqual('', misc.capitalize_initial(''))
self.assertEqual('A', misc.capitalize_initial('A'))
self.assertEqual('Ab', misc.capitalize_initial('Ab'))
self.assertEqual('AbC', misc.capitalize_initial('AbC'))
self.assertEqual('A', misc.capitalize_initial('a'))
self.assertEqual('Ab', misc.capitalize_initial('ab'))
self.assertEqual('AbC', misc.capitalize_initial('abC'))
@test_util.run_deprecated_v1
def test_alias_single_tensor(self):
a = constant(1)
new_a = misc.alias_tensors(a)
self.assertFalse(new_a is a)
with self.cached_session() as sess:
self.assertEqual(1, self.evaluate(new_a))
@test_util.run_deprecated_v1
def test_alias_tensors(self):
a = constant(1)
v = Variable(2)
s = 'a'
l = [1, 2, 3]
new_a, new_v, new_s, new_l = misc.alias_tensors(a, v, s, l)
self.assertFalse(new_a is a)
self.assertTrue(new_v is v)
self.assertTrue(new_s is s)
self.assertTrue(new_l is l)
with self.cached_session() as sess:
self.assertEqual(1, self.evaluate(new_a))
def test_get_range_len(self):
get_range_as_graph = def_function.function(misc.get_range_len)
test_range = [(i, constant_op.constant(i)) for i in range(-3, 3)]
results = []
for i, ti in test_range:
for j, tj in test_range:
for k, tk in test_range:
if k == 0:
continue
results.append(((i, j, k), get_range_as_graph(ti, tj, tk)))
for (i, j, k), result_tensor in results:
self.assertEqual(
len(list(range(i, j, k))), self.evaluate(result_tensor))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/misc_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for context_managers module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.utils import context_managers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class ContextManagersTest(test.TestCase):
def test_control_dependency_on_returns(self):
# Just dry run them.
with context_managers.control_dependency_on_returns(None):
pass
with context_managers.control_dependency_on_returns(
constant_op.constant(1)):
pass
with context_managers.control_dependency_on_returns(
tensor_array_ops.TensorArray(dtypes.int32, size=1)):
pass
with context_managers.control_dependency_on_returns(
[constant_op.constant(1),
constant_op.constant(2)]):
pass
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/context_managers_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def alias_tensors(*args):
"""Wraps any Tensor arguments with an identity op.
Any other argument, including Variables, is returned unchanged.
Args:
*args: Any arguments. Must contain at least one element.
Returns:
Same as *args, with Tensor instances replaced as described.
Raises:
ValueError: If args doesn't meet the requirements.
"""
def alias_if_tensor(a):
return array_ops.identity(a) if isinstance(a, ops.Tensor) else a
# TODO(mdan): Recurse into containers?
# TODO(mdan): Anything we can do about variables? Fake a scope reuse?
if len(args) > 1:
return (alias_if_tensor(a) for a in args)
elif len(args) == 1:
return alias_if_tensor(args[0])
raise ValueError('at least one argument required')
def capitalize_initial(s):
"""Capitalizes the initial of a string only."""
if s:
return s[0].upper() + s[1:]
return s
def get_range_len(start, limit, delta):
dist = ops.convert_to_tensor(limit - start)
unadjusted_len = dist // delta
adjustment = math_ops.cast(
gen_math_ops.not_equal(dist % delta,
array_ops.zeros_like(unadjusted_len)), dist.dtype)
final_len = unadjusted_len + adjustment
return gen_math_ops.maximum(final_len, array_ops.zeros_like(final_len))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/misc.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for wrap_py_func module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class PyFuncTest(test.TestCase):
def test_wrap_py_func_simple(self):
def test_fn(a, b, c):
return a + b + c
with self.cached_session() as sess:
result = py_func.wrap_py_func(test_fn, dtypes.int32,
(1, constant_op.constant(1), 1))
self.assertEqual(3, self.evaluate(result))
result = py_func.wrap_py_func(test_fn, dtypes.int32, (1, 1, 1))
self.assertEqual(3, self.evaluate(result))
result = py_func.wrap_py_func(
test_fn, dtypes.int32,
(constant_op.constant(1), 1, constant_op.constant(1)))
self.assertEqual(3, self.evaluate(result))
def test_wrap_py_func_complex_args(self):
class TestClass(object):
def __init__(self):
self.foo = 5
def test_fn(a, b):
return a * b.foo
with self.cached_session() as sess:
result = py_func.wrap_py_func(test_fn, dtypes.int32, (7, TestClass()))
self.assertEqual(35, self.evaluate(result))
result = py_func.wrap_py_func(test_fn, dtypes.int32,
(constant_op.constant(7), TestClass()))
self.assertEqual(35, self.evaluate(result))
def test_wrap_py_func_kwargs(self):
class TestClass(object):
def __init__(self, foo):
self.foo = foo
def test_fn(a, b, c, d):
return a * b.foo + c * d.foo
with self.cached_session() as sess:
result = py_func.wrap_py_func(test_fn, dtypes.int32, (7, TestClass(5)), {
'c': 11,
'd': TestClass(13)
})
self.assertEqual(178, self.evaluate(result))
result = py_func.wrap_py_func(test_fn, dtypes.int32,
(constant_op.constant(7), TestClass(5)), {
'c': constant_op.constant(11),
'd': TestClass(13)
})
self.assertEqual(178, self.evaluate(result))
def test_wrap_py_func_dummy_return(self):
side_counter = [0]
def test_fn(_):
side_counter[0] += 1
with self.cached_session() as sess:
result = py_func.wrap_py_func(test_fn, None, (5,), use_dummy_return=True)
self.assertEqual(1, self.evaluate(result))
self.assertEqual([1], side_counter)
result = py_func.wrap_py_func(
test_fn, None, (constant_op.constant(5),), use_dummy_return=True)
self.assertEqual(1, self.evaluate(result))
self.assertEqual([2], side_counter)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/py_func_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A typed list in Python."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
def dynamic_list_append(target, element):
"""Converts a list append call inline."""
if isinstance(target, tensor_array_ops.TensorArray):
return target.write(target.size(), element)
# TODO(mdan): What's the right way to check this?
# TODO(mdan): We may not need this branch.
# It may be possible to use TensorList alone if the loop body will not
# require wrapping it, although we'd have to think about an autoboxing
# mechanism for lists received as parameter.
if isinstance(target, ops.Tensor):
return list_ops.tensor_list_push_back(target, element)
# Python targets (including TensorList): fallback to their original append.
target.append(element)
return target
class TensorList(object):
"""Tensor list wrapper API-compatible with Python built-in list."""
def __init__(self, shape, dtype):
self.dtype = dtype
self.shape = shape
self.clear()
def append(self, value):
self.list_ = list_ops.tensor_list_push_back(self.list_, value)
def pop(self):
self.list_, value = list_ops.tensor_list_pop_back(self.list_, self.dtype)
return value
def clear(self):
self.list_ = list_ops.empty_tensor_list(self.shape, self.dtype)
def count(self):
return list_ops.tensor_list_length(self.list_)
def __getitem__(self, key):
return list_ops.tensor_list_get_item(self.list_, key, self.dtype)
def __setitem__(self, key, value):
self.list_ = list_ops.tensor_list_set_item(self.list_, key, value)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/tensor_list.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Autograph lists."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.utils import tensor_list as tl
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class TensorListTest(test.TestCase):
def _shape(self, shape_tuple):
return constant(shape_tuple, dtypes.int32)
@test_util.run_v1_only("b/117943489")
def test_dynamic_list_append(self):
l = []
l = tl.dynamic_list_append(l, 1)
self.assertListEqual(l, [1])
l = list_ops.empty_tensor_list(self._shape(()), dtypes.int32)
l = tl.dynamic_list_append(l, 1)
s = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
self.assertAllEqual(s, [1])
l = tensor_array_ops.TensorArray(dtypes.int32, size=0, dynamic_size=True)
l = tl.dynamic_list_append(l, 1)
s = l.stack()
self.assertAllEqual(s, [1])
l = tl.TensorList(self._shape(()), dtypes.int32)
l = tl.dynamic_list_append(l, 1)
self.assertAllEqual(l[0], 1)
def test_list_append_python(self):
with context.eager_mode():
a = constant(3.0)
l = tl.TensorList(a.shape, a.dtype)
l.append(a)
self.assertEqual(l.count().numpy(), 1)
l.append(a)
self.assertEqual(l.count().numpy(), 2)
_ = l.pop()
self.assertEqual(l.count().numpy(), 1)
a2 = l.pop()
self.assertEqual(l.count().numpy(), 0)
self.assertEqual(a.numpy(), a2.numpy())
def test_list_index_python(self):
with context.eager_mode():
a = constant(3.0)
b = constant(2.0)
l = tl.TensorList(a.shape, a.dtype)
l.append(a)
self.assertEqual(l[0].numpy(), a.numpy())
l[0] = ops.convert_to_tensor(b)
self.assertEqual(l[0].numpy(), b.numpy())
@test_util.run_deprecated_v1
def test_list_append_tf(self):
a = constant(3.0)
l = tl.TensorList(a.shape, a.dtype)
l.append(a)
c1 = l.count()
l.append(a)
c2 = l.count()
_ = l.pop()
c3 = l.count()
a2 = l.pop()
c4 = l.count()
c1, c2, c3, c4, a, a2 = self.evaluate([c1, c2, c3, c4, a, a2])
self.assertEqual(c1, 1)
self.assertEqual(c2, 2)
self.assertEqual(c3, 1)
self.assertEqual(c4, 0)
self.assertEqual(a, a2)
def test_list_index_tf(self):
a = constant(3.0)
b = constant(2.0)
l = tl.TensorList(a.shape, a.dtype)
l.append(a)
l0 = l[0]
l[0] = b
l1 = l[0]
l0, l1, a, b = self.evaluate([l0, l1, a, b])
self.assertEqual(l0, a)
self.assertEqual(l1, b)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/tensor_list_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module defines tensor utilities not found in TensorFlow.
The reason these utilities are not defined in TensorFlow is because they may
not be not fully robust, although they work in the vast majority of cases. So
we define them here in order for their behavior to be consistently verified.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import tensor_array_ops
def is_dense_tensor(t):
# TODO(mdan): Resolve this inconsistency.
return (tensor_util.is_tensor(t) and
not isinstance(t, sparse_tensor.SparseTensor))
def is_tensor_array(t):
return isinstance(t, tensor_array_ops.TensorArray)
def is_tensor_list(t):
# TODO(mdan): This is just a heuristic.
# With TF lacking support for templated types, this is unfortunately the
# closest we can get right now. A dedicated op ought to be possible to
# construct.
return (tensor_util.is_tensor(t) and t.dtype == dtypes.variant and
not t.shape.ndims)
def is_range_tensor(t):
"""Returns True if a tensor is the result of a tf.range op. Best effort."""
return tensor_util.is_tensor(t) and hasattr(t, 'op') and t.op.type == 'Range'
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/tensors.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pyfunc creation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import script_ops
class MatchDType(namedtuple('MatchDType', ('arg_number',))):
"""Allows matching the dtype of an argument.
Used in conjunction with function calls. For example, MatchDType(0) will
match the DType of the first argument.
"""
pass
def wrap_py_func(f, return_dtypes, args, kwargs=None, use_dummy_return=False):
"""Helper that wraps a callable to py_func.
The helper passes tensor arguments through the py_func interface. Non-tensor
arguments are allowed, and will be passed to f directly. Note that non-tensor
arguments are captured by f will not update every time the wrapper is
called (this is consistent with its argument list, which only includes
the tensor arguments). In general, it's safest not to reuse this wrapper.
Args:
f: Callable
return_dtypes: None, individual of tuple/list of DType or MatchDType, the
data type for each of f's return value(s). Set to None if f has no
return values or use_dummy_return is True. Use MatchDType to define a
dtype identical to that of `i`th argument (argument 0 is the first);
an argument must of Tensor type if it is to be used with MatchDType.
args: Positional arguments for f, as list or tuple.
kwargs: Keyword arguments for f, as dict with string keys. May be None.
use_dummy_return: If True, the function will return a dummy value of 1
and discard its actual return value.
Returns:
The return values of f converted to tensor.
Raises:
ValueError: if any of the arguments are incorrect.
"""
if return_dtypes and use_dummy_return:
raise ValueError('if use_dummy_return is True, return_dtypes must be empty')
tensor_args = []
tensor_args_idx = {}
# Of the positional arguments, only grab the tensor ones to be passed through
# the py_func.
n_args = len(args)
arg_is_tensor = tuple(map(tensor_util.is_tensor, args))
for i in range(n_args):
if arg_is_tensor[i]:
tensor_args_idx[i] = len(tensor_args)
tensor_args.append(args[i])
# We essentially take the tensor kwargs, if any, and add them to the list of
# positional arguments. The kwargs are then reconstructed inside the py_func.
#
# For example, if
#
# args = [Tensor(1), 'foo']
# kwargs = {'a': Tensor(2), 'b': 'bar'}
#
# Then
#
# tensor_args = (Tensor(1), Tensor(2))
# kwarg_keys = ('a', 'b')
if kwargs:
kwarg_keys = tuple(kwargs.keys())
kwarg_is_tensor = {k: tensor_util.is_tensor(kwargs[k]) for k in kwarg_keys}
for k in kwarg_keys:
if kwarg_is_tensor[k]:
tensor_args_idx[k] = len(tensor_args)
tensor_args.append(kwargs[k])
else:
kwarg_keys = ()
# Set up return dtypes.
def match_arg_dtype(arg_number):
arg = args[arg_number]
if not arg_is_tensor[arg_number]:
raise ValueError(
'argument %d was used with MatchDType and must be a tf.Tensor, but '
'was %s instead' % (arg_number, type(arg)))
return arg.dtype
if return_dtypes:
if isinstance(return_dtypes, MatchDType):
return_dtypes = match_arg_dtype(return_dtypes.arg_number)
elif isinstance(return_dtypes, (list, tuple)):
return_dtypes = tuple(
match_arg_dtype(a.arg_number) if isinstance(a, MatchDType) else a
for a in return_dtypes)
else:
assert isinstance(return_dtypes, dtypes.DType)
def f_wrapper(*tensor_args):
f_args = tuple(tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a
for i, a in enumerate(args))
f_kwargs = {
k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k]
for i, k in enumerate(kwarg_keys)
}
retval = f(*f_args, **f_kwargs)
return 1 if use_dummy_return else retval
if use_dummy_return:
return_dtypes = dtypes.int32
return script_ops.eager_py_func(f_wrapper, tensor_args, return_dtypes)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/py_func.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility module that contains APIs usable in the generated code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.utils.context_managers import control_dependency_on_returns
from tensorflow.python.autograph.utils.misc import alias_tensors
from tensorflow.python.autograph.utils.py_func import wrap_py_func
from tensorflow.python.autograph.utils.tensor_list import dynamic_list_append
from tensorflow.python.autograph.utils.testing import fake_tf
from tensorflow.python.autograph.utils.type_check import is_tensor
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensors module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.utils import tensors
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class TensorsTest(test.TestCase):
def _simple_tensor_array(self):
return tensor_array_ops.TensorArray(dtypes.int32, size=3)
def _simple_tensor_list(self):
return list_ops.empty_tensor_list(
element_shape=constant_op.constant([1]), element_dtype=dtypes.int32)
def _simple_list_of_tensors(self):
return [constant_op.constant(1), constant_op.constant(2)]
def test_is_tensor_array(self):
self.assertTrue(tensors.is_tensor_array(self._simple_tensor_array()))
self.assertFalse(tensors.is_tensor_array(self._simple_tensor_list()))
self.assertFalse(tensors.is_tensor_array(constant_op.constant(1)))
self.assertFalse(tensors.is_tensor_array(self._simple_list_of_tensors()))
self.assertFalse(tensors.is_tensor_array(None))
def test_is_tensor_list(self):
self.assertFalse(tensors.is_tensor_list(self._simple_tensor_array()))
self.assertTrue(tensors.is_tensor_list(self._simple_tensor_list()))
self.assertFalse(tensors.is_tensor_list(constant_op.constant(1)))
self.assertFalse(tensors.is_tensor_list(self._simple_list_of_tensors()))
self.assertFalse(tensors.is_tensor_list(None))
def is_range_tensor(self):
self.assertTrue(tensors.is_range_tensor(math_ops.range(1)))
self.assertTrue(tensors.is_range_tensor(math_ops.range(1, 2)))
self.assertTrue(tensors.is_range_tensor(math_ops.range(1, 2, 3)))
self.assertFalse(tensors.is_range_tensor(None))
self.assertFalse(tensors.is_range_tensor(constant_op.constant(range(1))))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/tensors_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various context managers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops
def control_dependency_on_returns(return_value):
"""Create a TF control dependency on the return values of a function.
If the function had no return value, a no-op context is returned.
Args:
return_value: The return value to set as control dependency.
Returns:
A context manager.
"""
def control_dependency_handle(t):
if isinstance(t, tensor_array_ops.TensorArray):
return t.flow
return t
if return_value is None:
return contextlib.contextmanager(lambda: (yield))()
# TODO(mdan): Filter to tensor objects.
if not isinstance(return_value, (list, tuple)):
return_value = (return_value,)
return_value = tuple(control_dependency_handle(t) for t in return_value)
return ops.control_dependencies(return_value)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/context_managers.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used in autograph-generated code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_util
def is_tensor(*args):
"""Check if any arguments are tensors.
Args:
*args: Python objects that may or may not be tensors.
Returns:
True if any *args are TensorFlow types, False if none are.
"""
return any(tensor_util.is_tensor(a) for a in args)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/type_check.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def fake_tf():
"""Creates a fake module that looks like TensorFlow, for testing."""
mod = imp.new_module('tensorflow')
mod_contents = {}
mod_contents.update(gen_math_ops.__dict__)
mod_contents.update(math_ops.__dict__)
mod_contents.update(ops.__dict__)
mod_contents.update(mod.__dict__)
mod.__dict__.update(mod_contents)
return mod
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/testing.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and debugging utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import traceback
# TODO(mdan): Use a custom logger class.
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
VERBOSITY_VAR_NAME = 'AUTOGRAPH_VERBOSITY'
DEFAULT_VERBOSITY = 0
verbosity_level = None # vlog-like. Takes precedence over the env variable.
echo_log_to_stdout = False
# In interactive Python, logging echo is enabled by default.
if hasattr(sys, 'ps1') or hasattr(sys, 'ps2'):
echo_log_to_stdout = True
@tf_export('autograph.set_verbosity')
def set_verbosity(level, alsologtostdout=False):
"""Sets the AutoGraph verbosity level.
_Debug logging in AutoGraph_
More verbose logging is useful to enable when filing bug reports or doing
more in-depth debugging.
There are two means to control the logging verbosity:
* The `set_verbosity` function
* The `AUTOGRAPH_VERBOSITY` environment variable
`set_verbosity` takes precedence over the environment variable.
For example:
```python
import os
import tensorflow as tf
os.environ['AUTOGRAPH_VERBOSITY'] = 5
# Verbosity is now 5
tf.autograph.set_verbosity(0)
# Verbosity is now 0
os.environ['AUTOGRAPH_VERBOSITY'] = 1
# No effect, because set_verbosity was already called.
```
Logs entries are output to [absl](https://abseil.io)'s
[default output](https://abseil.io/docs/python/guides/logging),
with `INFO` level.
Logs can be mirrored to stdout by using the `alsologtostdout` argument.
Mirroring is enabled by default when Python runs in interactive mode.
Args:
level: int, the verbosity level; larger values specify increased verbosity;
0 means no logging. When reporting bugs, it is recommended to set this
value to a larger number, like 10.
alsologtostdout: bool, whether to also output log messages to `sys.stdout`.
"""
global verbosity_level
global echo_log_to_stdout
verbosity_level = level
echo_log_to_stdout = alsologtostdout
@tf_export('autograph.trace')
def trace(*args):
"""Traces argument information at compilation time.
`trace` is useful when debugging, and it always executes during the tracing
phase, that is, when the TF graph is constructed.
_Example usage_
```python
import tensorflow as tf
for i in tf.range(10):
tf.autograph.trace(i)
# Output: <Tensor ...>
```
Args:
*args: Arguments to print to `sys.stdout`.
"""
print(*args)
def get_verbosity():
global verbosity_level
if verbosity_level is not None:
return verbosity_level
return int(os.getenv(VERBOSITY_VAR_NAME, DEFAULT_VERBOSITY))
def has_verbosity(level):
return get_verbosity() >= level
def _output_to_stdout(msg, *args, **kwargs):
print(msg % args)
if kwargs.get('exc_info', False):
traceback.print_exc()
def error(level, msg, *args, **kwargs):
if has_verbosity(level):
logging.error(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout('ERROR: ' + msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
if has_verbosity(level):
logging.info(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
logging.warn(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout('WARNING: ' + msg, *args, **kwargs)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/ag_logging.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for type_check."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.autograph.utils import type_check
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class TypeCheckTest(test.TestCase):
@test_util.run_deprecated_v1
def test_checks(self):
self.assertTrue(type_check.is_tensor(constant_op.constant([1, 2, 3])))
self.assertTrue(
type_check.is_tensor(test_util.variables.Variable([1, 2, 3])))
self.assertTrue(
type_check.is_tensor(
test_util.array_ops.placeholder(test_util.dtypes.float32)))
self.assertFalse(type_check.is_tensor(3))
self.assertFalse(type_check.is_tensor(numpy.eye(3)))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/utils/type_check_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for special_functions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.autograph.lang import special_functions
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
class SpecialFunctionsTest(test.TestCase):
def test_match_staging_level(self):
some_tensor = constant_op.constant(0)
tensor_one = special_functions.match_staging_level(1, some_tensor)
python_one = special_functions.match_staging_level(1, 1)
with self.cached_session() as sess:
self.assertTrue(tensor_util.is_tensor(tensor_one))
self.assertAllEqual(self.evaluate(tensor_one), 1)
self.assertEqual(python_one, 1)
def test_tensor_list_empty_list(self):
l = special_functions.tensor_list([],
element_dtype=dtypes.int32,
element_shape=())
sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [])
l = special_functions.tensor_list((),
element_dtype=dtypes.int32,
element_shape=())
sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [])
def test_tensor_list_tensor(self):
l = special_functions.tensor_list(
constant_op.constant([], dtype=dtypes.int32))
sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [])
def test_tensor_list_unsupported_initializer(self):
with self.assertRaisesRegexp(ValueError, 'unknown type'):
special_functions.tensor_list(np.array([1, 2, 3]))
def test_tensor_list_empty_list_no_type(self):
with self.assertRaisesRegexp(
ValueError, 'element_dtype and element_shape are required'):
special_functions.tensor_list([])
def test_tensor_list_from_elements(self):
elements = [constant_op.constant([1, 2]), constant_op.constant([3, 4])]
l = special_functions.tensor_list(elements)
sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [[1, 2], [3, 4]])
def test_tensor_list_array_from_elements(self):
elements = [constant_op.constant([1, 2]), constant_op.constant([3, 4])]
l = special_functions.tensor_list(elements, use_tensor_array=True)
sl = l.stack()
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [[1, 2], [3, 4]])
def test_stack(self):
self.assertEqual(special_functions.stack(1, strict=False), 1)
self.assertListEqual(
special_functions.stack([1, 2, 3], strict=False), [1, 2, 3])
# TODO(mdan): This should probably forward to tf.stack.
self.assertTrue(
isinstance(
special_functions.stack(
[constant_op.constant(1),
constant_op.constant(2)], strict=False), list))
with self.assertRaises(ValueError):
special_functions.stack([1, 2, 3])
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(
t, element_shape=constant_op.constant([], dtype=dtypes.int32))
self.assertTrue(
tensor_util.is_tensor(
special_functions.stack(l, element_dtype=dtypes.float32)))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/lang/special_functions_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Directives are special no-op functions that serve as compilation markers.
They provide static information like type hints, compilation and TensorFlow
overrides.
These serve as annotations in the compiled code, allowing the user some control
over the compilation process. They have no functional role at runtime.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs.doc_controls import do_not_generate_docs
UNSPECIFIED = object()
def set_element_type(entity, dtype, shape=UNSPECIFIED):
"""Indicates that the entity is expected hold items of specified type/shape.
The staged TensorFlow ops will reflect and assert this data type. Ignored
otherwise.
Args:
entity: The entity to annotate.
dtype: TensorFlow dtype value to assert for entity.
shape: Optional shape to assert for entity.
"""
del entity
del dtype
del shape
# TODO(b/140125096): Implement.
@do_not_generate_docs
@tf_export('autograph.experimental.set_loop_options')
def set_loop_options(
parallel_iterations=UNSPECIFIED,
back_prop=UNSPECIFIED,
swap_memory=UNSPECIFIED,
maximum_iterations=UNSPECIFIED):
"""Specifies additional arguments to be passed to the enclosing while_loop.
The parameters apply to and only to the immediately enclosing loop. It only
has effect if the loop is staged as a TF while_loop; otherwise the parameters
have no effect.
Usage example:
@tf.function(autograph=True)
def dynamic_rnn(..., parallel_iterations=32):
num_steps = ...
for t in tf.range(num_steps):
tf.autograph.experimental.set_loop_options(
parallel_iterations=parallel_iterations)
...
Args:
parallel_iterations: See tf.while_loop.
back_prop: See tf.while_loop.
swap_memory: See tf.while_loop.
maximum_iterations: See tf.while_loop.
"""
del parallel_iterations
del back_prop
del swap_memory
del maximum_iterations
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/lang/directives.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Special functions that only make sense for AutoGraph.
These functions are meant to ensure feature parity between Python and AutoGraph,
so that the exact same code works in both modes. In general, AutoGraph will
replace these calls.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_util
def _validate_list_constructor(elements, element_dtype, element_shape):
"""Validates the inputs of tensor_list."""
if element_dtype is not None and element_shape is not None:
return
if tensor_util.is_tensor(elements):
return
if isinstance(elements, (list, tuple)):
if elements:
return
else:
raise ValueError(
'element_dtype and element_shape are required when elements are'
' empty')
raise ValueError(
'unknown type for elements: {}; only Tensor, list and tuple are'
' allowed'.format(type(elements)))
def match_staging_level(value, like_value):
"""Casts a value to be staged at the same level as another."""
if tensor_util.is_tensor(like_value):
return constant_op.constant(value)
return value
def tensor_list(elements,
element_dtype=None,
element_shape=None,
use_tensor_array=False):
"""Creates an tensor list and populates it with the given elements.
This function provides a more uniform access to tensor lists and tensor
arrays, and allows optional initialization.
Note: this function is a simplified wrapper. If you need greater control,
it is recommended to use the underlying implementation directly.
Args:
elements: Iterable[tf.Tensor, ...], the elements to initially fill the list
with
element_dtype: Optional[tf.DType], data type for the elements in the list;
required if the list is empty
element_shape: Optional[tf.TensorShape], shape for the elements in the list;
required if the list is empty
use_tensor_array: bool, whether to use the more compatible but restrictive
tf.TensorArray implementation
Returns:
Union[tf.Tensor, tf.TensorArray], the new list.
Raises:
ValueError: for invalid arguments
"""
_validate_list_constructor(elements, element_dtype, element_shape)
if use_tensor_array:
return data_structures.tf_tensor_array_new(elements, element_dtype,
element_shape)
else:
return data_structures.tf_tensor_list_new(elements, element_dtype,
element_shape)
def stack(list_or_tensor, element_dtype=None, strict=True):
"""Stacks the input, if it admits the notion of stacking.
For example, a list of tensors can be stacked into a larger tensor. This
function is similar to tf.stack, but it accepts non-lists and lists of
non-tensors as arguments. In the latter case, the function does nothing.
Args:
list_or_tensor: Any
element_dtype: tf.DType, optional dtypedtype for the elements in the list.
Required if the input is stackable, and the list is untyped.
strict: bool, if True an error is raised if the input is not stackable.
Otherwise the function is a no-op.
Returns:
Any, if the input is stackable, the result will be a tf.Tensor. Otherwise,
if strict=False, the result will be list_or_tensor.
Raises:
ValueError: if strict=True and the input is not stackable.
"""
if strict:
def raise_error(x):
raise ValueError('%s must be stackable when strict=True' % x)
original_call = raise_error
else:
original_call = lambda x: x
return data_structures.list_stack(
list_or_tensor,
data_structures.ListStackOpts(
element_dtype=element_dtype, original_call=original_call))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/lang/special_functions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow graph (CFG) structure for Python AST representation.
The CFG is a digraph with edges representing valid control flow. Each
node is associated with exactly one AST node, but not all AST nodes may have
a corresponding CFG counterpart.
Once built, the CFG itself is immutable, but the values it holds need not be;
they are usually annotated with information extracted by walking the graph.
"""
# TODO(mdan): The notion of 'statements' below is inaccurate.
# They should rather be called 'block statements', because they include
# statements that may have a body, e.g. if and while.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from enum import Enum
# pylint:disable=g-bad-import-order
import gast
# pylint:enable=g-bad-import-order
from tensorflow.python.autograph.pyct import compiler
class Node(object):
"""A node in the CFG.
Although new instances of this class are mutable, the objects that a user
finds in the CFG are typically not.
The nodes represent edges in the CFG graph, and maintain pointers to allow
efficient walking in both forward and reverse order. The following property
holds for all nodes: "child in node.next" iff "node in child.prev".
Attributes:
next: FrozenSet[Node, ...], the nodes that follow this node, in control
flow order
prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse
control flow order
ast_node: ast.AST, the AST node corresponding to this CFG node
"""
def __init__(self, next_, prev, ast_node):
self.next = next_
self.prev = prev
self.ast_node = ast_node
def freeze(self):
self.next = frozenset(self.next)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
self.prev = weakref.WeakSet(self.prev)
def __repr__(self):
if isinstance(self.ast_node, gast.FunctionDef):
return 'def %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.ClassDef):
return 'class %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.withitem):
return compiler.ast_to_source(
self.ast_node.context_expr, include_encoding_marker=False).strip()
return compiler.ast_to_source(
self.ast_node, include_encoding_marker=False).strip()
class Graph(
collections.namedtuple(
'Graph',
['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
which it is associated. The index can also be enumerated in top-down, depth
first order.
Walking the graph in forward or reverse order is supported by double
parent-child links.
Note: the error nodes are not wired to their corresponding finally guards,
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
The graph also maintains edges corresponding to higher level statements
like for-else loops. A node is considered successor of a statement if there
is an edge from a node that is lexically a child of that statement to a node
that is not. Statement predecessors are analogously defined.
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
node
stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their predecessor CFG nodes
stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their successor CFG nodes
"""
def __repr__(self):
return self.as_dot()
def as_dot(self):
"""Print CFG in DOT format."""
result = 'digraph CFG {\n'
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
for next_ in node.next:
result += ' %s -> %s;\n' % (id(node), id(next_))
result += '}'
return result
class _WalkMode(Enum):
FORWARD = 1
REVERSE = 2
# TODO(mdan): Rename to DataFlowAnalyzer.
# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
class GraphVisitor(object):
"""Base class for a CFG visitors.
This implementation is not thread safe.
The visitor has some facilities to simplify dataflow analyses. In particular,
it allows revisiting the nodes at the decision of the subclass. This can be
used to visit the graph until the state reaches a fixed point.
For more details on dataflow analysis, see
https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
Note: the literature generally suggests visiting successor nodes only when the
state of the current node changed, regardless of whether that successor has
ever been visited. This implementation visits every successor at least once.
Attributes:
graph: Graph
in_: Dict[Node, Any], stores node-keyed state during a visit
out: Dict[Node, Any], stores node-keyed state during a visit
"""
def __init__(self, graph):
self.graph = graph
self.reset()
def init_state(self, node):
"""State initialization function. Optional to overload.
An in/out state slot will be created for each node in the graph. Subclasses
must overload this to control what that is initialized to.
Args:
node: Node
"""
raise NotImplementedError('Subclasses must implement this.')
# TODO(mdan): Rename to flow?
def visit_node(self, node):
"""Visitor function.
Args:
node: Node
Returns:
bool, whether the node should be revisited; subclasses can visit every
reachable node exactly once by always returning False
"""
raise NotImplementedError('Subclasses must implement this.')
def reset(self):
self.in_ = {
node: self.init_state(node) for node in self.graph.index.values()
}
self.out = {
node: self.init_state(node) for node in self.graph.index.values()
}
def _visit_internal(self, mode):
"""Visits the CFG, depth-first."""
assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
if mode == _WalkMode.FORWARD:
open_ = [self.graph.entry]
elif mode == _WalkMode.REVERSE:
open_ = list(self.graph.exit)
closed = set()
while open_:
node = open_.pop(0)
closed.add(node)
should_revisit = self.visit_node(node)
if mode == _WalkMode.FORWARD:
children = node.next
elif mode == _WalkMode.REVERSE:
children = node.prev
for next_ in children:
if should_revisit or next_ not in closed:
open_.append(next_)
def visit_forward(self):
self._visit_internal(_WalkMode.FORWARD)
def visit_reverse(self):
self._visit_internal(_WalkMode.REVERSE)
class GraphBuilder(object):
"""Builder that constructs a CFG from a given AST.
This GraphBuilder facilitates constructing the DAG that forms the CFG when
nodes
are supplied in lexical order (i.e., top-down, depth first). Under these
conditions, it supports building patterns found in typical structured
programs.
This builder ignores the flow generated by exceptions, which are assumed to
always be catastrophic and present purely for diagnostic purposes (e.g. to
print debug information). Statements like raise and try/catch sections are
allowed and will generate control flow edges, but ordinaty statements are
assumed not to raise exceptions.
Finally sections are also correctly interleaved between break/continue/return
nodes and their subsequent statements.
Important concepts:
* nodes - nodes refer refer to CFG nodes; AST nodes are qualified explicitly
* leaf set - since the graph is constructed gradually, a leaf set maintains
the CFG nodes that will precede the node that the builder expects to
receive next; when an ordinary node is added, it is connected to the
existing leaves and it in turn becomes the new leaf
* jump nodes - nodes that should generate edges other than what
ordinary nodes would; these correspond to break, continue and return
statements
* sections - logical delimiters for subgraphs that require special
edges; there are various types of nodes, each admitting various
types of jump nodes; sections are identified by their corresponding AST
node
"""
# TODO(mdan): Perhaps detail this in a markdown doc.
# TODO(mdan): Add exception support.
def __init__(self, parent_ast_node):
self.reset()
self.parent = parent_ast_node
def reset(self):
"""Resets the state of this factory."""
self.head = None
self.errors = set()
self.node_index = {}
# TODO(mdan): Too many primitives. Use classes.
self.leaves = set()
# Note: This mechanism requires that nodes are added in lexical order (top
# to bottom, depth first).
self.active_stmts = set()
self.owners = {} # type: Set[any]
self.forward_edges = set() # type: Tuple[Node, Node] # (from, to)
self.finally_sections = {}
# Dict values represent (entry, exits)
self.finally_section_subgraphs = {
} # type: Dict[ast.AST, Tuple[Node, Set[Node]]]
# Whether the guard section can be reached from the statement that precedes
# it.
self.finally_section_has_direct_flow = {}
# Finally sections that await their first node.
self.pending_finally_sections = set()
# Exit jumps keyed by the section they affect.
self.exits = {}
# The entry of loop sections, keyed by the section.
self.section_entry = {}
# Continue jumps keyed by the section they affect.
self.continues = {}
# The entry of conditional sections, keyed by the section.
self.cond_entry = {}
# Lists of leaf nodes corresponding to each branch in the section.
self.cond_leaves = {}
def _connect_nodes(self, first, second):
"""Connects nodes to signify that control flows from first to second.
Args:
first: Union[Set[Node, ...], Node]
second: Node
"""
if isinstance(first, Node):
first.next.add(second)
second.prev.add(first)
self.forward_edges.add((first, second))
else:
for node in first:
self._connect_nodes(node, second)
def _add_new_node(self, ast_node):
"""Grows the graph by adding a CFG node following the current leaves."""
if ast_node is self.node_index:
raise ValueError('%s added twice' % ast_node)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
node = Node(next_=set(), prev=weakref.WeakSet(), ast_node=ast_node)
self.node_index[ast_node] = node
self.owners[node] = frozenset(self.active_stmts)
if self.head is None:
self.head = node
for leaf in self.leaves:
self._connect_nodes(leaf, node)
# If any finally section awaits its first node, populate it.
for section_id in self.pending_finally_sections:
self.finally_section_subgraphs[section_id][0] = node
self.pending_finally_sections = set()
return node
def begin_statement(self, stmt):
"""Marks the beginning of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in
the CFG's stmt_prev and stmt_next attributes
"""
self.active_stmts.add(stmt)
def end_statement(self, stmt):
"""Marks the end of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in
the CFG's stmt_prev and stmt_next attributes; must match a key
previously passed to begin_statement.
"""
self.active_stmts.remove(stmt)
def add_ordinary_node(self, ast_node):
"""Grows the graph by adding an ordinary CFG node.
Ordinary nodes are followed by the next node, in lexical order, that is,
they become the new leaf set.
Args:
ast_node: ast.AST
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set((node,))
return node
def _add_jump_node(self, ast_node, guards):
"""Grows the graph by adding a jump node.
Jump nodes are added to the current leaf set, and the leaf set becomes
empty. If the jump node is the last in a cond section, then it may be added
back to the leaf set by a separate mechanism.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections active for this node
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set()
# The guards themselves may not yet be complete, and will be wired later.
self.finally_sections[node] = guards
return node
def _connect_jump_to_finally_sections(self, node):
"""Connects a jump node to the finally sections protecting it."""
cursor = set((node,))
if node not in self.finally_sections:
return cursor
for guard_section_id in self.finally_sections[node]:
guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id]
self._connect_nodes(cursor, guard_begin)
cursor = guard_ends
del self.finally_sections[node]
# TODO(mdan): Should garbage-collect finally_section_subgraphs.
return cursor
def add_exit_node(self, ast_node, section_id, guards):
"""Grows the graph by adding an exit node.
This node becomes an exit for the current section.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.exits[section_id].add(node)
def add_continue_node(self, ast_node, section_id, guards):
"""Grows the graph by adding a reentry node.
This node causes control flow to go back to the loop section's entry.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.continues[section_id].add(node)
def add_error_node(self, ast_node, guards):
"""Grows the graph by adding an error node.
This node becomes an exit for the entire graph.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.errors.add(node)
self.leaves = set()
def enter_section(self, section_id):
"""Enters a regular section.
Regular sections admit exit jumps, which end the section.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_exit_node
"""
assert section_id not in self.exits
self.exits[section_id] = set()
def exit_section(self, section_id):
"""Exits a regular section."""
# Exits are jump nodes, which may be protected.
for exit_ in self.exits[section_id]:
self.leaves |= self._connect_jump_to_finally_sections(exit_)
del self.exits[section_id]
def enter_loop_section(self, section_id, entry_node):
"""Enters a loop section.
Loop sections define an entry node. The end of the section always flows back
to the entry node. These admit continue jump nodes which also flow to the
entry node.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_continue_node
entry_node: ast.AST, the entry node into the loop (e.g. the test node
for while loops)
"""
assert section_id not in self.section_entry
assert section_id not in self.continues
self.continues[section_id] = set()
node = self.add_ordinary_node(entry_node)
self.section_entry[section_id] = node
def exit_loop_section(self, section_id):
"""Exits a loop section."""
self._connect_nodes(self.leaves, self.section_entry[section_id])
# continues are jump nodes, which may be protected.
for reentry in self.continues[section_id]:
guard_ends = self._connect_jump_to_finally_sections(reentry)
self._connect_nodes(guard_ends, self.section_entry[section_id])
# Loop nodes always loop back.
self.leaves = set((self.section_entry[section_id],))
del self.continues[section_id]
del self.section_entry[section_id]
def enter_cond_section(self, section_id):
"""Enters a conditional section.
Conditional sections define an entry node, and one or more branches.
Args:
section_id: Hashable, the same node that will be used in calls to the
section_id arg passed to new_cond_branch
"""
assert section_id not in self.cond_entry
assert section_id not in self.cond_leaves
self.cond_leaves[section_id] = []
def new_cond_branch(self, section_id):
"""Begins a new branch in a cond section."""
assert section_id in self.cond_leaves
if section_id in self.cond_entry:
# Subsequent splits move back to the split point, and memorize the
# current leaves.
self.cond_leaves[section_id].append(self.leaves)
self.leaves = self.cond_entry[section_id]
else:
# If this is the first time we split a section, just remember the split
# point.
self.cond_entry[section_id] = self.leaves
def exit_cond_section(self, section_id):
"""Exits a conditional section."""
for split in self.cond_leaves[section_id]:
self.leaves |= split
del self.cond_entry[section_id]
del self.cond_leaves[section_id]
def enter_finally_section(self, section_id):
"""Enters a finally section."""
# TODO(mdan): This, not the caller, should track the active sections.
self.finally_section_subgraphs[section_id] = [None, None]
if self.leaves:
self.finally_section_has_direct_flow[section_id] = True
else:
self.finally_section_has_direct_flow[section_id] = False
self.pending_finally_sections.add(section_id)
def exit_finally_section(self, section_id):
"""Exits a finally section."""
assert section_id not in self.pending_finally_sections, 'Empty finally?'
self.finally_section_subgraphs[section_id][1] = self.leaves
# If the guard can only be reached by a jump, then it will not flow
# into the statement that follows it.
if not self.finally_section_has_direct_flow[section_id]:
self.leaves = set()
del self.finally_section_has_direct_flow[section_id]
def build(self):
"""Returns the CFG accumulated so far and resets the builder.
Returns:
Graph
"""
# Freeze the nodes.
for node in self.node_index.values():
node.freeze()
# Build the statement edges.
stmt_next = {}
stmt_prev = {}
for node in self.node_index.values():
for stmt in self.owners[node]:
if stmt not in stmt_prev:
stmt_prev[stmt] = set()
if stmt not in stmt_next:
stmt_next[stmt] = set()
for first, second in self.forward_edges:
stmts_exited = self.owners[first] - self.owners[second]
for stmt in stmts_exited:
stmt_next[stmt].add(second)
stmts_entered = self.owners[second] - self.owners[first]
for stmt in stmts_entered:
stmt_prev[stmt].add(first)
for stmt in stmt_next:
stmt_next[stmt] = frozenset(stmt_next[stmt])
for stmt in stmt_prev:
stmt_prev[stmt] = frozenset(stmt_prev[stmt])
# Construct the final graph object.
result = Graph(
entry=self.head,
exit=self.leaves,
error=self.errors,
index=self.node_index,
stmt_prev=stmt_prev,
stmt_next=stmt_next)
# Reset the state.
self.reset()
return result
class AstToCfg(gast.NodeVisitor):
"""Converts an AST to CFGs.
A separate CFG will be constructed for each function.
"""
def __init__(self):
super(AstToCfg, self).__init__()
self.builder_stack = []
self.builder = None
self.cfgs = {}
self.lexical_scopes = []
def _enter_lexical_scope(self, node):
self.lexical_scopes.append(node)
def _exit_lexical_scope(self, node):
leaving_node = self.lexical_scopes.pop()
assert node == leaving_node
def _get_enclosing_finally_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.finalbody:
included.append(node)
if isinstance(node, stop_at):
return node, included
return None, included
def _process_basic_statement(self, node):
self.generic_visit(node)
self.builder.add_ordinary_node(node)
def _process_exit_statement(self, node, *exits_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(
tuple(exits_nodes_of_type))
if try_node is None:
raise ValueError(
'%s that is not enclosed by any of %s' % (node, exits_nodes_of_type))
self.builder.add_exit_node(node, try_node, guards)
def _process_continue_statement(self, node, *loops_to_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(
tuple(loops_to_nodes_of_type))
if try_node is None:
raise ValueError('%s that is not enclosed by any of %s' %
(node, loops_to_nodes_of_type))
self.builder.add_continue_node(node, try_node, guards)
def visit_ClassDef(self, node):
# We also keep the ClassDef node in the CFG, since it technically is a
# statement.
# For example, this is legal and allows executing user code:
#
# class Foo(bar()):
# pass
#
# It also has a scope:
#
# class Bar(object):
# a = 1
if self.builder is None:
self.generic_visit(node)
return
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self._process_basic_statement(node)
self._exit_lexical_scope(node)
# TODO(mdan): Track the CFG local to the class definition as well?
self.builder = self.builder_stack.pop()
def visit_FunctionDef(self, node):
# We also keep the FunctionDef node in the CFG. This allows us to determine
# things like reaching definitions via closure. Note that the function body
# will be stored in a separate graph, because function definitions are not
# the same as function calls.
if self.builder is not None:
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self._process_basic_statement(node.args)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_section(node)
self._exit_lexical_scope(node)
self.cfgs[node] = self.builder.build()
self.builder = self.builder_stack.pop()
def visit_Return(self, node):
self._process_exit_statement(node, gast.FunctionDef)
def visit_Expr(self, node):
self._process_basic_statement(node)
def visit_Assign(self, node):
self._process_basic_statement(node)
def visit_AnnAssign(self, node):
self._process_basic_statement(node)
def visit_AugAssign(self, node):
self._process_basic_statement(node)
def visit_Pass(self, node):
self._process_basic_statement(node)
def visit_Global(self, node):
self._process_basic_statement(node)
def visit_Nonlocal(self, node):
self._process_basic_statement(node)
def visit_Print(self, node):
self._process_basic_statement(node)
def visit_Raise(self, node):
try_node, guards = self._get_enclosing_finally_scopes((gast.FunctionDef,))
if try_node is None:
raise ValueError('%s that is not enclosed by any FunctionDef' % node)
self.builder.add_error_node(node, guards)
def visit_Assert(self, node):
# Ignoring the effect of exceptions.
self._process_basic_statement(node)
def visit_Delete(self, node):
self._process_basic_statement(node)
def visit_If(self, node):
# No need to track ifs as lexical scopes, for now.
# Lexical scopes are generally tracked in order to be able to resolve the
# targets of jump statements like break/continue/etc. Since there is no
# statement that can interrupt a conditional, we don't need to track their
# lexical scope. That may change in the future.
self.builder.begin_statement(node)
self.builder.enter_cond_section(node)
self._process_basic_statement(node.test)
self.builder.new_cond_branch(node)
for stmt in node.body:
self.visit(stmt)
self.builder.new_cond_branch(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_cond_section(node)
self.builder.end_statement(node)
def visit_While(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self.builder.enter_loop_section(node, node.test)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# the statements inside it don't affect the loop itself. For example, a
# break in the loop's orelse will not affect the loop itself.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_For(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
# Note: Strictly speaking, this should be node.target + node.iter.
# However, the activity analysis accounts for this inconsistency,
# so dataflow analysis produces the correct values.
self.builder.enter_loop_section(node, node.iter)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# they don't count as loop bodies. For example, a break in the loop's
# orelse will affect the parent loop, not the current one.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_Break(self, node):
self._process_exit_statement(node, gast.While, gast.For)
def visit_Continue(self, node):
self._process_continue_statement(node, gast.While, gast.For)
def visit_ExceptHandler(self, node):
self.builder.begin_statement(node)
if node.type is not None:
self.visit(node.type)
if node.name is not None:
self.visit(node.name)
for stmt in node.body:
self.visit(stmt)
self.builder.end_statement(node)
def visit_Try(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
# Note: the current simplification is that the try block fully executes
# regardless of whether an exception triggers or not. This is consistent
# with blocks free of try/except, which also don't account for the
# possibility of an exception being raised mid-block.
for stmt in node.body:
self.visit(stmt)
# The orelse is an optional continuation of the body.
if node.orelse:
block_representative = node.orelse[0]
self.builder.enter_cond_section(block_representative)
self.builder.new_cond_branch(block_representative)
for stmt in node.orelse:
self.visit(stmt)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
self._exit_lexical_scope(node)
if node.handlers:
# Using node would be inconsistent. Using the first handler node is also
# inconsistent, but less so.
block_representative = node.handlers[0]
self.builder.enter_cond_section(block_representative)
for block in node.handlers:
self.builder.new_cond_branch(block_representative)
self.visit(block)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
if node.finalbody:
self.builder.enter_finally_section(node)
for stmt in node.finalbody:
self.visit(stmt)
self.builder.exit_finally_section(node)
self.builder.end_statement(node)
def visit_With(self, node):
# TODO(mdan): Mark the context manager's exit call as exit guard.
for item in node.items:
self._process_basic_statement(item)
for stmt in node.body:
self.visit(stmt)
def build(node):
visitor = AstToCfg()
visitor.visit(node)
return visitor.cfgs
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/cfg.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import test
class TransformerTest(test.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
source_code=None, source_file=None, future_features=(), namespace=None)
return transformer.Context(entity_info)
def test_entity_scope_tracking(self):
class TestTransformer(transformer.Base):
# The choice of note to assign to is arbitrary. Using Assign because it's
# easy to find in the tree.
def visit_Assign(self, node):
anno.setanno(node, 'enclosing_entities', self.enclosing_entities)
return self.generic_visit(node)
# This will show up in the lambda function.
def visit_BinOp(self, node):
anno.setanno(node, 'enclosing_entities', self.enclosing_entities)
return self.generic_visit(node)
tr = TestTransformer(self._simple_context())
def test_function():
a = 0
class TestClass(object):
def test_method(self):
b = 0
def inner_function(x):
c = 0
d = lambda y: (x + y)
return c, d
return b, inner_function
return a, TestClass
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
test_function_node = node
test_class = test_function_node.body[1]
test_method = test_class.body[0]
inner_function = test_method.body[1]
lambda_node = inner_function.body[1].value
a = test_function_node.body[0]
b = test_method.body[0]
c = inner_function.body[0]
lambda_expr = lambda_node.body
self.assertEqual(
(test_function_node,), anno.getanno(a, 'enclosing_entities'))
self.assertEqual((test_function_node, test_class, test_method),
anno.getanno(b, 'enclosing_entities'))
self.assertEqual(
(test_function_node, test_class, test_method, inner_function),
anno.getanno(c, 'enclosing_entities'))
self.assertEqual((test_function_node, test_class, test_method,
inner_function, lambda_node),
anno.getanno(lambda_expr, 'enclosing_entities'))
def assertSameAnno(self, first, second, key):
self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
def assertDifferentAnno(self, first, second, key):
self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
def test_state_tracking(self):
class LoopState(object):
pass
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'loop_state', self.state[LoopState].value)
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_While(self, node):
self.state[LoopState].enter()
node = self.generic_visit(node)
self.state[LoopState].exit()
return node
def visit_If(self, node):
self.state[CondState].enter()
node = self.generic_visit(node)
self.state[CondState].exit()
return node
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
while a:
_ = 'a'
if a > 2:
_ = 'b'
while True:
raise '1'
if a > 3:
_ = 'c'
while True:
raise '1'
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
fn_body = node.body
outer_while_body = fn_body[1].body
self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
first_if_body = outer_while_body[1].body
self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
'cond_state')
self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
first_inner_while_body = first_if_body[1].body
self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
'cond_state')
self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
'loop_state')
second_if_body = outer_while_body[2].body
self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
second_inner_while_body = second_if_body[1].body
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'cond_state')
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'loop_state')
def test_local_scope_info_stack(self):
class TestTransformer(transformer.Base):
# Extract all string constants from the block.
def visit_Constant(self, node):
self.set_local(
'string', self.get_local('string', default='') + str(node.value))
return self.generic_visit(node)
def _annotate_result(self, node):
self.enter_local_scope()
node = self.generic_visit(node)
anno.setanno(node, 'test', self.get_local('string'))
self.exit_local_scope()
return node
def visit_While(self, node):
return self._annotate_result(node)
def visit_For(self, node):
return self._annotate_result(node)
tr = TestTransformer(self._simple_context())
def test_function(a):
"""Docstring."""
assert a == 'This should not be counted'
for i in range(3):
_ = 'a'
if i > 2:
return 'b'
else:
_ = 'c'
while 4:
raise '1'
return 'nor this'
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
for_node = node.body[2]
while_node = for_node.body[1].orelse[1]
self.assertFalse(anno.hasanno(for_node, 'string'))
self.assertEqual('3a2bc', anno.getanno(for_node, 'test'))
self.assertFalse(anno.hasanno(while_node, 'string'))
self.assertEqual('41', anno.getanno(while_node, 'test'))
def test_local_scope_info_stack_checks_integrity(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
self.enter_local_scope()
return self.generic_visit(node)
def visit_For(self, node):
node = self.generic_visit(node)
self.exit_local_scope()
return node
tr = TestTransformer(self._simple_context())
def no_exit(a):
if a > 0:
print(a)
return None
node, _ = parser.parse_entity(no_exit, future_features=())
with self.assertRaises(AssertionError):
tr.visit(node)
def no_entry(a):
for _ in a:
print(a)
node, _ = parser.parse_entity(no_entry, future_features=())
with self.assertRaises(AssertionError):
tr.visit(node)
def test_visit_block_postprocessing(self):
class TestTransformer(transformer.Base):
def _process_body_item(self, node):
if isinstance(node, gast.Assign) and (node.value.id == 'y'):
if_node = gast.If(
gast.Name(
'x', ctx=gast.Load(), annotation=None, type_comment=None),
[node], [])
return if_node, if_node.body
return node, None
def visit_FunctionDef(self, node):
node.body = self.visit_block(
node.body, after_visit=self._process_body_item)
return node
def test_function(x, y):
z = x
z = y
return z
tr = TestTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
self.assertEqual(len(node.body), 2)
self.assertTrue(isinstance(node.body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1], gast.If))
self.assertTrue(isinstance(node.body[1].body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1].body[1], gast.Return))
def test_robust_error_on_list_visit(self):
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
# This is broken because visit expects a single node, not a list, and
# the body of an if is a list.
# Importantly, the default error handling in visit also expects a single
# node. Therefore, mistakes like this need to trigger a type error
# before the visit called here installs its error handler.
# That type error can then be caught by the enclosing call to visit,
# and correctly blame the If node.
self.visit(node.body)
return node
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"'
self.assertRegexpMatches(obtained_message, expected_message)
def test_robust_error_on_ast_corruption(self):
# A child class should not be able to be so broken that it causes the error
# handling in `transformer.Base` to raise an exception. Why not? Because
# then the original error location is dropped, and an error handler higher
# up in the call stack gives misleading information.
# Here we test that the error handling in `visit` completes, and blames the
# correct original exception, even if the AST gets corrupted.
class NotANode(object):
pass
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
node.body = NotANode()
raise ValueError('I blew up')
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
# The message should reference the exception actually raised, not anything
# from the exception handler.
expected_substring = 'I blew up'
self.assertTrue(expected_substring in obtained_message, obtained_message)
def test_origin_info_propagated_to_new_nodes(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
return gast.Pass()
tr = TestTransformer(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 1
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
node = tr.visit(node)
created_pass_node = node.body[1]
# Takes the line number of the if statement.
self.assertEqual(
anno.getanno(created_pass_node, anno.Basic.ORIGIN).loc.lineno, 102)
def test_origin_info_preserved_in_moved_nodes(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
return node.body
tr = TestTransformer(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 1
x += 3
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
node = tr.visit(node)
assign_node = node.body[1]
aug_assign_node = node.body[2]
# Keep their original line numbers.
self.assertEqual(
anno.getanno(assign_node, anno.Basic.ORIGIN).loc.lineno, 103)
self.assertEqual(
anno.getanno(aug_assign_node, anno.Basic.ORIGIN).loc.lineno, 104)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/transformer_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for errors module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.autograph.pyct import errors
from tensorflow.python.platform import test
class ErrorMetadataBaseTest(test.TestCase):
def test_create_exception_default_constructor(self):
class CustomError(Exception):
pass
em = errors.ErrorMetadataBase(
callsite_tb=(),
cause_metadata=None,
cause_message='test message',
source_map={})
exc = em.create_exception(CustomError())
self.assertIsInstance(exc, CustomError)
self.assertIn('test message', str(exc))
def test_create_exception_custom_constructor(self):
class CustomError(Exception):
def __init__(self):
super(CustomError, self).__init__('test_message')
em = errors.ErrorMetadataBase(
callsite_tb=(),
cause_metadata=None,
cause_message='test message',
source_map={})
exc = em.create_exception(CustomError())
self.assertIsNone(exc)
def test_get_message_when_frame_info_code_is_none(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', None),
('/path/two.py', 171, 'test_fn_2', 'test code'),
]
cause_message = 'Test message'
em = errors.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={})
self.assertRegex(
em.get_message(),
re.compile('test_fn_1.*test_fn_2.*Test message', re.DOTALL))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/errors_test.py
|
# coding=utf-8
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for compiler module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import gast
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
class CompilerTest(test.TestCase):
def test_parser_compile_identity(self):
def test_fn(x):
a = True
b = ''
if a:
b = x + 1
return b
node, _ = parser.parse_entity(test_fn, future_features=())
module, _, _ = compiler.ast_to_object(node)
self.assertEqual(
textwrap.dedent(tf_inspect.getsource(test_fn)),
tf_inspect.getsource(module.test_fn))
def test_ast_to_source(self):
node = gast.If(
test=gast.Num(1),
body=[
gast.Assign(
targets=[gast.Name('a', gast.Store(), None)],
value=gast.Name('b', gast.Load(), None))
],
orelse=[
gast.Assign(
targets=[gast.Name('a', gast.Store(), None)],
value=gast.Str('c'))
])
source = compiler.ast_to_source(node, indentation=' ')
self.assertEqual(
textwrap.dedent("""
# coding=utf-8
if 1:
a = b
else:
a = 'c'
""").strip(), source.strip())
def test_ast_to_object(self):
node = gast.FunctionDef(
name='f',
args=gast.arguments(
args=[gast.Name('a', gast.Param(), None)],
vararg=None,
kwonlyargs=[],
kwarg=None,
defaults=[],
kw_defaults=[]),
body=[
gast.Return(
gast.BinOp(
op=gast.Add(),
left=gast.Name('a', gast.Load(), None),
right=gast.Num(1)))
],
decorator_list=[],
returns=None)
module, source, _ = compiler.ast_to_object(node)
expected_source = """
# coding=utf-8
def f(a):
return a + 1
"""
self.assertEqual(
textwrap.dedent(expected_source).strip(),
source.strip())
self.assertEqual(2, module.f(1))
with open(module.__file__, 'r') as temp_output:
self.assertEqual(
textwrap.dedent(expected_source).strip(),
temp_output.read().strip())
def test_source_to_entity(self):
test_source = textwrap.dedent(u"""
# coding=utf-8
def f(a):
'日本語 Δθₜ ← Δθₜ₋₁ + ∇Q(sₜ, aₜ)(rₜ + γₜ₊₁ max Q(⋅))'
return a + 1
""")
module, _ = compiler.source_to_entity(test_source, delete_on_exit=True)
self.assertEqual(module.f(1), 2)
self.assertEqual(
module.f.__doc__, '日本語 Δθₜ ← Δθₜ₋₁ + ∇Q(sₜ, aₜ)(rₜ + γₜ₊₁ max Q(⋅))')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/compiler_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting AST to code and Python entities.
Adapted from Tangent.
"""
# TODO(mdan): Consolidate with parser and rename to parsing.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan): Use six for compatibility here.
import atexit
import imp
import os
import tempfile
import astor
import gast
import six
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.utils import ag_logging
def ast_to_source(node, indentation=' ', include_encoding_marker=True):
"""Return the source code of given AST.
Args:
node: The code to compile, as an AST object.
indentation: The string to use for indentation.
include_encoding_marker: Bool, thether to include a comment on the first
line to explicitly specify UTF-8 encoding.
Returns:
code: The source code generated from the AST object
source_mapping: A mapping between the user and AutoGraph generated code.
"""
if not isinstance(node, (list, tuple)):
node = (node,)
generator = astor.code_gen.SourceGenerator(indentation, False,
astor.string_repr.pretty_string)
for n in node:
if isinstance(n, gast.AST):
n = gast.gast_to_ast(n)
generator.visit(n)
generator.result.append('\n')
# In some versions of Python, literals may appear as actual values. This
# ensures everything is string.
code = ''.join(map(str, generator.result))
# Strip leading blank lines.
code_lines = code.split('\n')
trimmed_code_lines = []
for l in code_lines:
if l.rstrip() or trimmed_code_lines:
trimmed_code_lines.append(l)
code = '\n'.join(trimmed_code_lines)
# Work around the reference cycle generated by astor.
# See https://github.com/berkerpeksag/astor/blob/55dd323f7d8d696610c703c0296763c567685c31/astor/code_gen.py#L162 # pylint:disable=line-too-long
# Reference cycles are quite disliked by TensorFlow's tests.
if hasattr(generator, 'write'):
generator.write = None
del generator
if include_encoding_marker:
code = '# coding=utf-8\n' + code
return code
def source_to_entity(source, delete_on_exit):
"""Loads the given source code as a Python module."""
if six.PY2:
source = source.encode('utf-8')
f = tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False)
else:
f = tempfile.NamedTemporaryFile( # pylint:disable=unexpected-keyword-arg
mode='w', suffix='.py', delete=False, encoding='utf-8')
with f:
module_name = os.path.basename(f.name[:-3])
f.write(source)
if delete_on_exit and ag_logging.get_verbosity() < 3:
atexit.register(lambda: os.remove(f.name))
return imp.load_source(module_name, f.name), f.name
# TODO(mdan): Rename: ast_to_entity
def ast_to_object(nodes,
indentation=' ',
include_source_map=False,
delete_on_exit=True):
"""Return the Python objects represented by given AST.
Compiling the AST code this way ensures that the source code is readable by
e.g. `pdb` or `inspect`.
Args:
nodes: Union[ast.AST, Iterable[ast.AST]], the code to compile, as an AST
object.
indentation: Text, the string to use for indentation.
include_source_map: bool, whether return a source map.
delete_on_exit: bool, whether to delete the temporary file used for
compilation on exit.
Returns:
Tuple[module, Text, Dict[LineLocation, OriginInfo]], containing:
the module containing the unparsed nodes, the source code corresponding to
nodes, and the source map. Is include_source_map is False, the source map
will be None.
"""
if not isinstance(nodes, (list, tuple)):
nodes = (nodes,)
source = ast_to_source(nodes, indentation=indentation)
module, _ = source_to_entity(source, delete_on_exit)
if include_source_map:
source_map = origin_info.create_source_map(nodes, source, module.__file__)
else:
source_map = None
# TODO(mdan): Return a structured object.
return module, source, source_map
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/compiler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cfg module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.platform import test
class CountingVisitor(cfg.GraphVisitor):
def __init__(self, graph):
super(CountingVisitor, self).__init__(graph)
self.counts = {}
def init_state(self, _):
return None
def visit_node(self, node):
self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
return False # visit only once
class GraphVisitorTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs, node
def test_basic_coverage_forward(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_forward()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
# The return node should be unreachable in forward direction.
self.assertNotIn(node.body[0].body[2], visitor.counts)
self.assertEqual(visitor.counts[node.body[1]], 1)
def test_basic_coverage_reverse(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_reverse()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
self.assertTrue(visitor.counts[node.body[0].body[2]], 1)
self.assertEqual(visitor.counts[node.body[1]], 1)
class AstToCfgTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs
def _repr_set(self, node_set):
return frozenset(repr(n) for n in node_set)
def _as_set(self, elements):
if elements is None:
return frozenset()
elif isinstance(elements, str):
return frozenset((elements,))
else:
return frozenset(elements)
def assertGraphMatches(self, graph, edges):
"""Tests whether the CFG contains the specified edges."""
for prev, node_repr, next_ in edges:
matched = False
for cfg_node in graph.index.values():
if repr(cfg_node) == node_repr:
if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
matched = True
break
if not matched:
self.fail(
'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
def assertStatementEdges(self, graph, edges):
"""Tests whether the CFG contains the specified statement edges."""
for prev_node_reprs, node_repr, next_node_reprs in edges:
matched = False
partial_matches = []
self.assertSetEqual(
frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
for stmt_ast_node in graph.stmt_next:
ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
stmt_ast_node.lineno)
if ast_repr == node_repr:
actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
partial_matches.append((actual_prev, node_repr, actual_next))
if (self._as_set(prev_node_reprs) == actual_prev and
self._as_set(next_node_reprs) == actual_next):
matched = True
break
if not matched:
self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
def test_straightline(self):
def test_fn(a):
a += 1
a = 2
a = 3
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'a += 1'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', 'return'),
('a = 3', 'return', None),
),
)
def test_straightline_no_return(self):
def test_fn(a, b):
a = b + 1
a += max(a)
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a, b', 'a = (b + 1)'),
('a = (b + 1)', 'a += max(a)', None),
),
)
def test_unreachable_code(self):
def test_fn(a):
return
a += 1 # pylint:disable=unreachable
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'return'),
('a', 'return', None),
(None, 'a += 1', None),
),
)
def test_if_straightline(self):
def test_fn(a):
if a > 0:
a = 1
else:
a += -1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('(a > 0)', 'a = 1', None),
('(a > 0)', 'a += (- 1)', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_nested(self):
def test_fn(a):
if a > 0:
if a > 1:
a = 1
else:
a = 2
else:
if a > 2:
a = 3
else:
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', ('(a > 1)', '(a > 2)')),
('(a > 0)', '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', None),
('(a > 1)', 'a = 2', None),
('(a > 0)', '(a > 2)', ('a = 3', 'a = 4')),
('(a > 2)', 'a = 3', None),
('(a > 2)', 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'If:2', None),
('(a > 0)', 'If:3', None),
('(a > 0)', 'If:8', None),
),
)
def test_branch_straightline_semi(self):
def test_fn(a):
if a > 0:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', 'a = 1'),
('(a > 0)', 'a = 1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_return(self):
def test_fn(a):
if a > 0:
return
else:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('return', 'a = 1')),
('(a > 0)', 'a = 1', 'a = 2'),
('(a > 0)', 'return', None),
('a = 1', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', 'a = 2'),),
)
def test_branch_return_minimal(self):
def test_fn(a):
if a > 0:
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'return'),
('(a > 0)', 'return', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_while_straightline(self):
def test_fn(a):
while a > 0:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 2'),),
)
def test_while_else_straightline(self):
def test_fn(a):
while a > 0:
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 3'),),
)
def test_while_else_continue(self):
def test_fn(a):
while a > 0:
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', '(a > 0)'),
('a = 0', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
),
)
def test_while_else_break(self):
def test_fn(a):
while a > 0:
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_while_else_return(self):
def test_fn(a):
while a > 0:
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', 'a = 1'),
),
)
def test_while_nested_straightline(self):
def test_fn(a):
while a > 0:
while a > 1:
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
),
)
def test_while_nested_continue(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 3:
continue
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'continue', 'a = 1'), '(a > 1)', ('(a > 3)', 'a = 2')),
('(a > 1)', '(a > 3)', ('continue', 'a = 1')),
('(a > 3)', 'continue', '(a > 1)'),
('(a > 3)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
),
)
def test_while_nested_break(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 2:
break
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(graph, (
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
('(a > 1)', '(a > 2)', ('break', 'a = 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'a = 1', '(a > 1)'),
(('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
))
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
),
)
def test_for_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 2'),),
)
def test_for_else_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 3'),),
)
def test_for_else_continue(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', 'range(0, a)'),
('(a > 1)', 'a = 0', 'a = 1'),
('a = 0', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
),
)
def test_for_else_break(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_for_else_return(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', 'a = 1'),
),
)
def test_for_nested_straightline(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('b += 1', 'a = 2')),
('range(1, a)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
),
)
def test_for_nested_continue(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 3:
continue
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'continue', 'b += 1'), 'range(1, a)',
('(a > 3)', 'a = 2')),
('range(1, a)', '(a > 3)', ('continue', 'b += 1')),
('(a > 3)', 'continue', 'range(1, a)'),
('(a > 3)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
),
)
def test_for_nested_break(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 2:
break
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('(a > 2)', 'a = 2')),
('range(1, a)', '(a > 2)', ('break', 'b += 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'b += 1', 'range(1, a)'),
(('range(1, a)', 'break'), 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
),
)
def test_complex(self):
def test_fn(a):
b = 0
while a > 0:
for b in range(0, a):
if a > 2:
break
if a > 3:
if a > 4:
continue
else:
max(a)
break
b += 1
else: # for b in range(0, a):
return a
a = 2
for a in range(1, a):
return b
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('b = 0', 'a = 2'), '(a > 0)', ('range(0, a)', 'range(1, a)')),
(
('(a > 0)', 'continue', 'b += 1'),
'range(0, a)',
('(a > 2)', 'return a'),
),
('range(0, a)', '(a > 2)', ('(a > 3)', 'break')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', '(a > 3)', ('(a > 4)', 'b += 1')),
('(a > 3)', '(a > 4)', ('continue', 'max(a)')),
('(a > 4)', 'max(a)', 'break'),
('max(a)', 'break', 'a = 2'),
('(a > 4)', 'continue', 'range(0, a)'),
('(a > 3)', 'b += 1', 'range(0, a)'),
('range(0, a)', 'return a', None),
('break', 'a = 2', '(a > 0)'),
('(a > 0)', 'range(1, a)', ('return b', 'a = 3')),
('range(1, a)', 'return b', None),
('range(1, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('b = 0', 'While:3', 'range(1, a)'),
('(a > 0)', 'For:4', 'a = 2'),
('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
('(a > 0)', 'For:17', 'a = 3'),
),
)
def test_finally_straightline(self):
def test_fn(a):
try:
a += 1
finally:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a += 1', 'a = 2'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
def test_return_finally(self):
def test_fn(a):
try:
return a
finally:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'return a', 'a = 1'),
('return a', 'a = 1', None),
(None, 'a = 2', None),
),
)
def test_break_finally(self):
def test_fn(a):
while a > 0:
try:
break
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'break'),
('(a > 0)', 'break', 'a = 1'),
('break', 'a = 1', None),
),
)
def test_continue_finally(self):
def test_fn(a):
while a > 0:
try:
continue
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', 'continue'),
('(a > 0)', 'continue', 'a = 1'),
('continue', 'a = 1', '(a > 0)'),
),
)
def test_with_straightline(self):
def test_fn(a):
with max(a) as b:
a = 0
return b
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'max(a)', 'a = 0'),
('max(a)', 'a = 0', 'return b'),
('a = 0', 'return b', None),
),
)
def test_lambda_basic(self):
def test_fn(a):
a = lambda b: a + b
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = (lambda b: (a + b))', 'return a'),
('a = (lambda b: (a + b))', 'return a', None),
),
)
def test_pass(self):
def test_fn(a): # pylint:disable=unused-argument
pass
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'pass', None),
),
)
def test_try_finally(self):
def test_fn(a):
try:
a = 1
finally:
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 1', 'a = 2', 'return a'),
('a = 2', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
),
)
def test_try_except_single_bare(self):
def test_fn(a):
try:
a = 1
a = 2
except: # pylint:disable=bare-except
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
def test_try_except_single(self):
def test_fn(a):
try:
a = 1
a = 2
except Exception1: # pylint:disable=undefined-variable
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
def test_try_except_single_aliased(self):
def test_fn(a):
try:
a = 1
except Exception1 as e: # pylint:disable=undefined-variable,unused-variable
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
def test_try_except_single_tuple_aliased(self):
def test_fn(a):
try:
a = 1
except (Exception1, Exception2) as e: # pylint:disable=undefined-variable,unused-variable
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
def test_try_except_multiple(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable
a = 2
except Exception2: # pylint:disable=undefined-variable
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'return a')),
(('a = 1', 'a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
('a = 1', 'ExceptHandler:6', 'return a'),
),
)
def test_try_except_finally(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable
a = 2
except Exception2: # pylint:disable=undefined-variable
a = 3
finally:
a = 4
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'a = 4')),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', 'return a'),
('a = 4', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'a = 4'),
('a = 1', 'ExceptHandler:6', 'a = 4'),
),
)
def test_try_in_if(self):
def test_fn(a):
try:
if a > 0:
a = 1
else:
a = 2
except Exception1: # pylint:disable=undefined-variable
a = 3
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', ('a = 3', 'a = 4')),
('(a > 0)', 'a = 2', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'a = 3', 'a = 4'),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'a = 4'),
('a', 'If:3', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'ExceptHandler:7', 'a = 4'),
),
)
def test_try_in_if_all_branches_exit(self):
def test_fn(a, b):
try:
if a > 0:
raise b
else:
return 0
except b:
return 1
graph, = self._build_cfg(test_fn).values()
# TODO(mdan): raise and return should have an edge to the except blocks.
self.assertGraphMatches(
graph,
(
('a, b', '(a > 0)', ('raise b', 'return 0')),
('(a > 0)', 'raise b', None),
('(a > 0)', 'return 0', None),
(None, 'return 1', None),
),
)
self.assertStatementEdges(
graph,
(
('a, b', 'Try:2', None),
('a, b', 'If:3', None),
(None, 'ExceptHandler:7', None),
),
)
def test_list_comprehension(self):
def test_fn(a):
c = [b for b in a]
return c
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'c = [b for b in a]', 'return c'),
('c = [b for b in a]', 'return c', None),
),
)
def test_class_definition_empty(self):
def test_fn(a, b):
class C(a(b)):
pass
return C
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'class C', 'return C'),
('class C', 'return C', None),
),
)
def test_class_definition_with_members(self):
def test_fn(a, b):
class C(a(b)):
d = 1
return C
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'class C', 'return C'),
('class C', 'return C', None),
),
)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/cfg_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from absl.testing import parameterized
import gast
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names as qn
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.platform import test
class _CtxClearer(gast.NodeTransformer):
def visit(self, node):
super(_CtxClearer, self).visit(node)
if hasattr(node, 'ctx'):
node.ctx = None
return node
def _parse_with_unset_ctx(expr_source):
ast_node = parser.parse_expression(expr_source)
_CtxClearer().visit(ast_node)
return ast_node
class _CtxChecker(gast.NodeTransformer):
def __init__(self, test_instance, expected_ctx):
self.at_top_level = True
self.test_instance = test_instance
self.expected_ctx = expected_ctx
def visit(self, node):
if hasattr(node, 'ctx'):
self.test_instance.assertIsInstance(node.ctx, self.expected_ctx)
if self.at_top_level:
self.at_top_level = False
self.expected_ctx = gast.Load
return super(_CtxChecker, self).visit(node)
class TemplatesTest(test.TestCase, parameterized.TestCase):
def assertExpectedCtxSet(self, node, ctx):
"""Assert that node has ctx=ctx at top and ctx=gast.Load everywhere else."""
checker = _CtxChecker(self, ctx)
checker.visit(node)
def test_replace_tuple(self):
template = """
def test_fn(a, c):
return b,
"""
node = templates.replace(template, b=('a', 'c'))[0]
result, _, _ = compiler.ast_to_object(node)
self.assertEqual((2, 3), result.test_fn(2, 3))
def test_replace_variable(self):
template = """
def test_fn(a):
a += 1
a = 2 * a + 1
return b
"""
node = templates.replace(template, a='b')[0]
result, _, _ = compiler.ast_to_object(node)
self.assertEqual(7, result.test_fn(2))
def test_replace_function_name(self):
template = """
def fname(a):
a += 1
a = 2 * a + 1
return a
"""
node = templates.replace(template, fname='test_fn')[0]
result, _, _ = compiler.ast_to_object(node)
self.assertEqual(7, result.test_fn(2))
def test_replace_code_block(self):
template = """
def test_fn(a):
block
return a
"""
class ShouldBeReplaced(object):
pass
node = templates.replace(
template,
block=[
gast.Assign(
[
gast.Name(
'a',
ctx=ShouldBeReplaced,
annotation=None,
type_comment=None)
],
gast.BinOp(
gast.Name(
'a',
ctx=ShouldBeReplaced,
annotation=None,
type_comment=None), gast.Add(),
gast.Constant(1, kind=None)),
),
] * 2)[0]
result, _, _ = compiler.ast_to_object(node)
self.assertEqual(3, result.test_fn(1))
def test_replace_attribute(self):
template = """
def test_fn(a):
return a.foo
"""
node = templates.replace(template, foo='b')[0]
result, _, _ = compiler.ast_to_object(node)
mod = imp.new_module('test')
mod.b = 3
self.assertEqual(3, result.test_fn(mod))
with self.assertRaises(ValueError):
templates.replace(template, foo=1)
def test_replace_attribute_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(
template,
foo=parser.parse_expression('a.b.c'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].value.ctx, gast.Load)
self.assertIsInstance(node.body[0].targets[0].value.value.ctx, gast.Load)
def test_replace_list_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(template, foo=parser.parse_expression('[a, b]'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[1].ctx, gast.Store)
def test_replace_tuple_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(template, foo=parser.parse_expression('(a, b)'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[1].ctx, gast.Store)
def test_replace_expression_context(self):
template = """
def test_fn():
foo
"""
node = templates.replace(
template, foo=parser.parse_expression('a + 2 * b / -c'))[0]
self.assertIsInstance(node.body[0].left.ctx, gast.Load)
self.assertIsInstance(node.body[0].right.left.right.ctx, gast.Load)
def test_replace_complex_context(self):
template = """
def test_fn():
foo = 0
"""
node = templates.replace(
template, foo=parser.parse_expression('bar(([a, b],)).baz'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
function_call_arg = node.body[0].targets[0].value.args[0]
self.assertIsInstance(function_call_arg.elts[0].ctx, gast.Load)
self.assertIsInstance(function_call_arg.elts[0].elts[0].ctx, gast.Load)
self.assertIsInstance(function_call_arg.elts[0].elts[1].ctx, gast.Load)
def test_replace_index(self):
template = """
def test_fn():
foo = 0
"""
node = templates.replace(
template, foo=parser.parse_expression('foo(a[b]).bar'))[0]
function_call_arg = node.body[0].targets[0].value.args[0]
self.assertIsInstance(function_call_arg.ctx, gast.Load)
self.assertIsInstance(function_call_arg.slice.value.ctx, gast.Load)
def test_replace_call_keyword(self):
template = """
def test_fn():
def f(a, d, f):
return a + d + f
return f(1, kws=None)
"""
source = parser.parse_expression('f(d=3, f=5)')
node = templates.replace(template, kws=source.keywords)[0]
result, _, _ = compiler.ast_to_object(node)
self.assertEqual(9, result.test_fn())
with self.assertRaises(ValueError):
templates.replace(template, kws=[])
templates.replace(template, kws=1)
def test_replace_name_with_call(self):
template = """
def test_fn():
b = 5
def g(a):
return 3 * a
def f():
return g
return foo
"""
source = parser.parse_expression('f()(b)')
node = templates.replace(template, foo=source)[0]
result, _, _ = compiler.ast_to_object(node)
self.assertEqual(15, result.test_fn())
def test_replace_name_with_dict(self):
template = """
def test_fn():
return foo['bar']
"""
source = parser.parse_expression('{\'bar\': 3}')
node = templates.replace(template, foo=source)[0]
result, _, _ = compiler.ast_to_object(node)
self.assertEqual(3, result.test_fn())
def test_replace_as_expression(self):
template = """
foo(a)
"""
node = templates.replace_as_expression(template, foo='bar', a='baz')
self.assertIsInstance(node, gast.Call)
self.assertEqual(node.func.id, 'bar')
self.assertEqual(node.args[0].id, 'baz')
def test_replace_as_expression_restrictions(self):
template = """
foo(a)
bar(b)
"""
with self.assertRaises(ValueError):
templates.replace_as_expression(template)
def test_function_call_in_list(self):
template = """
foo(bar)
"""
source = parser.parse_expression('[a(b(1))]')
templates.replace_as_expression(template, bar=source)
def test_star_comprehension_in_function_call(self):
template = """
a = foo(func, args)
"""
source = parser.parse_expression('bar(*[i for i in range(j)])')
node = templates.replace(template, func=source.func, args=source.args)
arg_node = node[0].value.args[1].value
self.assertIsInstance(arg_node.generators[0].target.ctx, gast.Store)
self.assertIsInstance(arg_node.elt.ctx, gast.Load)
def test_lambda_in_function_call(self):
template = """
a = foo(arg)
"""
source = parser.parse_expression('[lambda i: i]')
node = templates.replace(template, arg=source)
lambda_arg = node[0].value.args[0].elts[0]
self.assertIsInstance(lambda_arg.args.args[0].ctx, gast.Param)
self.assertIsInstance(lambda_arg.body.ctx, gast.Load)
def test_replace_name_with_subscript(self):
template = """
foo = bar
"""
replacement = qn.QN(qn.QN('dictionary'), subscript=qn.QN('key'))
node = templates.replace(template, foo=replacement)[0].targets[0]
self.assertIsInstance(node.ctx, gast.Store)
self.assertIsInstance(node.value.ctx, gast.Load)
@parameterized.named_parameters([
('mixed_attr_subscript', 'a.b["c"]'),
('mixed_subscript_attr', 'a[b.c]'),
('nested_subscript', 'a[b[c]]'),
('repeated_subscript', 'a[b][c]'),
])
def test_replace_name_mixed_attr_subscript(self, expression_source):
template = 'foo = bar'
replacement = _parse_with_unset_ctx(expression_source)
target_node = templates.replace(template, foo=replacement)[0].targets[0]
self.assertExpectedCtxSet(target_node, gast.Store)
value_node = templates.replace(template, bar=replacement)[0].value
self.assertExpectedCtxSet(value_node, gast.Load)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/templates_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python source code transformation library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parser module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.platform import test
class ParserTest(test.TestCase):
def test_parse_entity(self):
def f(x):
return x + 1
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def test_parse_entity_print_function(self):
def f(x):
print(x)
node, _ = parser.parse_entity(f, future_features=('print_function',))
self.assertEqual('f', node.name)
def test_parse_comments(self):
def f():
# unindented comment
pass
with self.assertRaises(ValueError):
parser.parse_entity(f, future_features=())
def test_parse_multiline_strings(self):
def f():
print("""
some
multiline
string""")
with self.assertRaises(ValueError):
parser.parse_entity(f, future_features=())
def test_parse_expression(self):
node = parser.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/parser_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live entity inspection utilities.
This module contains whatever inspect doesn't offer out of the box.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import itertools
import linecache
import sys
import threading
import types
import six
from tensorflow.python.util import tf_inspect
# This lock seems to help avoid linecache concurrency errors.
_linecache_lock = threading.Lock()
# These functions test negative for isinstance(*, types.BuiltinFunctionType)
# and inspect.isbuiltin, and are generally not visible in globals().
# TODO(mdan): Remove this.
SPECIAL_BUILTINS = {
'dict': dict,
'enumerate': enumerate,
'float': float,
'int': int,
'len': len,
'list': list,
'print': print,
'range': range,
'tuple': tuple,
'type': type,
'zip': zip
}
if six.PY2:
SPECIAL_BUILTINS['xrange'] = xrange
def islambda(f):
if not tf_inspect.isfunction(f):
return False
if not hasattr(f, '__name__'):
return False
return f.__name__ == '<lambda>'
def isnamedtuple(f):
"""Returns True if the argument is a namedtuple-like."""
if not (tf_inspect.isclass(f) and issubclass(f, tuple)):
return False
if not hasattr(f, '_fields'):
return False
fields = getattr(f, '_fields')
if not isinstance(fields, tuple):
return False
if not all(isinstance(f, str) for f in fields):
return False
return True
def isbuiltin(f):
"""Returns True if the argument is a built-in function."""
if any(f is builtin for builtin in six.moves.builtins.__dict__.values()):
return True
elif isinstance(f, types.BuiltinFunctionType):
return True
elif inspect.isbuiltin(f):
return True
elif f is eval:
return True
else:
return False
def _fix_linecache_record(obj):
"""Fixes potential corruption of linecache in the presence of functools.wraps.
functools.wraps modifies the target object's __module__ field, which seems
to confuse linecache in special instances, for example when the source is
loaded from a .par file (see https://google.github.io/subpar/subpar.html).
This function simply triggers a call to linecache.updatecache when a mismatch
was detected between the object's __module__ property and the object's source
file.
Args:
obj: Any
"""
if hasattr(obj, '__module__'):
obj_file = inspect.getfile(obj)
obj_module = obj.__module__
# A snapshot of the loaded modules helps avoid "dict changed size during
# iteration" errors.
loaded_modules = tuple(sys.modules.values())
for m in loaded_modules:
if hasattr(m, '__file__') and m.__file__ == obj_file:
if obj_module is not m:
linecache.updatecache(obj_file, m.__dict__)
def getimmediatesource(obj):
"""A variant of inspect.getsource that ignores the __wrapped__ property."""
with _linecache_lock:
_fix_linecache_record(obj)
lines, lnum = inspect.findsource(obj)
return ''.join(inspect.getblock(lines[lnum:]))
def getnamespace(f):
"""Returns the complete namespace of a function.
Namespace is defined here as the mapping of all non-local variables to values.
This includes the globals and the closure variables. Note that this captures
the entire globals collection of the function, and may contain extra symbols
that it does not actually use.
Args:
f: User defined function.
Returns:
A dict mapping symbol names to values.
"""
namespace = dict(six.get_function_globals(f))
closure = six.get_function_closure(f)
freevars = six.get_function_code(f).co_freevars
if freevars and closure:
for name, cell in zip(freevars, closure):
namespace[name] = cell.cell_contents
return namespace
def getqualifiedname(namespace, object_, max_depth=5, visited=None):
"""Returns the name by which a value can be referred to in a given namespace.
If the object defines a parent module, the function attempts to use it to
locate the object.
This function will recurse inside modules, but it will not search objects for
attributes. The recursion depth is controlled by max_depth.
Args:
namespace: Dict[str, Any], the namespace to search into.
object_: Any, the value to search.
max_depth: Optional[int], a limit to the recursion depth when searching
inside modules.
visited: Optional[Set[int]], ID of modules to avoid visiting.
Returns: Union[str, None], the fully-qualified name that resolves to the value
o, or None if it couldn't be found.
"""
if visited is None:
visited = set()
# Copy the dict to avoid "changed size error" during concurrent invocations.
# TODO(mdan): This is on the hot path. Can we avoid the copy?
namespace = dict(namespace)
for name in namespace:
# The value may be referenced by more than one symbol, case in which
# any symbol will be fine. If the program contains symbol aliases that
# change over time, this may capture a symbol that will later point to
# something else.
# TODO(mdan): Prefer the symbol that matches the value type name.
if object_ is namespace[name]:
return name
# If an object is not found, try to search its parent modules.
parent = tf_inspect.getmodule(object_)
if (parent is not None and parent is not object_ and
parent is not namespace):
# No limit to recursion depth because of the guard above.
parent_name = getqualifiedname(
namespace, parent, max_depth=0, visited=visited)
if parent_name is not None:
name_in_parent = getqualifiedname(
parent.__dict__, object_, max_depth=0, visited=visited)
assert name_in_parent is not None, (
'An object should always be found in its owner module')
return '{}.{}'.format(parent_name, name_in_parent)
if max_depth:
# Iterating over a copy prevents "changed size due to iteration" errors.
# It's unclear why those occur - suspecting new modules may load during
# iteration.
for name in namespace.keys():
value = namespace[name]
if tf_inspect.ismodule(value) and id(value) not in visited:
visited.add(id(value))
name_in_module = getqualifiedname(value.__dict__, object_,
max_depth - 1, visited)
if name_in_module is not None:
return '{}.{}'.format(name, name_in_module)
return None
def _get_unbound_function(m):
# TODO(mdan): Figure out why six.get_unbound_function fails in some cases.
# The failure case is for tf.keras.Model.
if hasattr(m, '__func__'):
return m.__func__
if hasattr(m, 'im_func'):
return m.im_func
return m
def getdefiningclass(m, owner_class):
"""Resolves the class (e.g. one of the superclasses) that defined a method."""
# Normalize bound functions to their respective unbound versions.
m = _get_unbound_function(m)
for superclass in reversed(inspect.getmro(owner_class)):
if hasattr(superclass, m.__name__):
superclass_m = getattr(superclass, m.__name__)
if _get_unbound_function(superclass_m) is m:
return superclass
elif hasattr(m, '__self__') and m.__self__ == owner_class:
# Python 3 class methods only work this way it seems :S
return superclass
return owner_class
def istfmethodtarget(m):
"""Tests whether an object is a `function.TfMethodTarget`."""
# See eager.function.TfMethodTarget for more details.
return (hasattr(m, '__self__') and
hasattr(m.__self__, 'weakrefself_target__') and
hasattr(m.__self__, 'weakrefself_func__') and
hasattr(m, '__module__') and
(m.__module__ != 'mock'))
def getmethodself(m):
"""An extended version of inspect.getmethodclass."""
if not hasattr(m, '__self__'):
return None
if m.__self__ is None:
return None
# A fallback allowing methods to be actually bound to a type different
# than __self__. This is useful when a strong reference from the method
# to the object is not desired, for example when caching is involved.
if istfmethodtarget(m):
return m.__self__.target
return m.__self__
def getmethodclass(m):
"""Resolves a function's owner, e.g. a method's class.
Note that this returns the object that the function was retrieved from, not
necessarily the class where it was defined.
This function relies on Python stack frame support in the interpreter, and
has the same limitations that inspect.currentframe.
Limitations. This function will only work correctly if the owned class is
visible in the caller's global or local variables.
Args:
m: A user defined function
Returns:
The class that this function was retrieved from, or None if the function
is not an object or class method, or the class that owns the object or
method is not visible to m.
Raises:
ValueError: if the class could not be resolved for any unexpected reason.
"""
# Callable objects: return their own class.
if (not hasattr(m, '__name__') and hasattr(m, '__class__') and
hasattr(m, '__call__')):
if isinstance(m.__class__, six.class_types):
return m.__class__
# Instance method and class methods: return the class of "self".
m_self = getmethodself(m)
if m_self is not None:
if tf_inspect.isclass(m_self):
return m_self
return m_self.__class__
# Class, static and unbound methods: search all defined classes in any
# namespace. This is inefficient but more robust method.
owners = []
caller_frame = tf_inspect.currentframe().f_back
try:
# TODO(mdan): This doesn't consider cell variables.
# TODO(mdan): This won't work if the owner is hidden inside a container.
# Cell variables may be pulled using co_freevars and the closure.
for v in itertools.chain(caller_frame.f_locals.values(),
caller_frame.f_globals.values()):
if hasattr(v, m.__name__):
candidate = getattr(v, m.__name__)
# Py2 methods may be bound or unbound, extract im_func to get the
# underlying function.
if hasattr(candidate, 'im_func'):
candidate = candidate.im_func
if hasattr(m, 'im_func'):
m = m.im_func
if candidate is m:
owners.append(v)
finally:
del caller_frame
if owners:
if len(owners) == 1:
return owners[0]
# If multiple owners are found, and are not subclasses, raise an error.
owner_types = tuple(o if tf_inspect.isclass(o) else type(o) for o in owners)
for o in owner_types:
if tf_inspect.isclass(o) and issubclass(o, tuple(owner_types)):
return o
raise ValueError('Found too many owners of %s: %s' % (m, owners))
return None
def getfutureimports(entity):
"""Detects what future imports are necessary to safely execute entity source.
Args:
entity: Any object
Returns:
A tuple of future strings
"""
if not (tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity)):
return tuple()
return tuple(sorted(name for name, value in entity.__globals__.items()
if getattr(value, '__module__', None) == '__future__'))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/inspect_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inspect_utils module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import imp
import textwrap
import types
import weakref
import six
from tensorflow.python import lib
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct.testing import basic_definitions
from tensorflow.python.autograph.pyct.testing import decorators
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
def decorator(f):
return f
def function_decorator():
def dec(f):
return f
return dec
def wrapping_decorator():
def dec(f):
def replacement(*_):
return None
@functools.wraps(f)
def wrapper(*args, **kwargs):
return replacement(*args, **kwargs)
return wrapper
return dec
class TestClass(object):
def member_function(self):
pass
@decorator
def decorated_member(self):
pass
@function_decorator()
def fn_decorated_member(self):
pass
@wrapping_decorator()
def wrap_decorated_member(self):
pass
@staticmethod
def static_method():
pass
@classmethod
def class_method(cls):
pass
def free_function():
pass
def factory():
return free_function
def free_factory():
def local_function():
pass
return local_function
class InspectUtilsTest(test.TestCase):
def test_islambda(self):
def test_fn():
pass
self.assertTrue(inspect_utils.islambda(lambda x: x))
self.assertFalse(inspect_utils.islambda(test_fn))
def test_isnamedtuple(self):
nt = collections.namedtuple('TestNamedTuple', ['a', 'b'])
class NotANamedTuple(tuple):
pass
self.assertTrue(inspect_utils.isnamedtuple(nt))
self.assertFalse(inspect_utils.isnamedtuple(NotANamedTuple))
def test_isnamedtuple_confounder(self):
"""This test highlights false positives when detecting named tuples."""
class NamedTupleLike(tuple):
_fields = ('a', 'b')
self.assertTrue(inspect_utils.isnamedtuple(NamedTupleLike))
def test_isnamedtuple_subclass(self):
"""This test highlights false positives when detecting named tuples."""
class NamedTupleSubclass(collections.namedtuple('Test', ['a', 'b'])):
pass
self.assertTrue(inspect_utils.isnamedtuple(NamedTupleSubclass))
def assertSourceIdentical(self, actual, expected):
self.assertEqual(
textwrap.dedent(actual).strip(),
textwrap.dedent(expected).strip()
)
def test_getimmediatesource_basic(self):
def test_decorator(f):
def f_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return f_wrapper
expected = """
def f_wrapper(*args, **kwargs):
return f(*args, **kwargs)
"""
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_noop_decorator(self):
def test_decorator(f):
return f
expected = '''
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
'''
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_functools_wrapper(self):
def wrapper_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
expected = textwrap.dedent("""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@wrapper_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_functools_wrapper_different_module(self):
expected = textwrap.dedent("""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.wrapping_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_normal_decorator_different_module(self):
expected = textwrap.dedent("""
def standalone_wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.standalone_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_normal_functional_decorator_different_module(
self):
expected = textwrap.dedent("""
def functional_wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.functional_decorator()
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getnamespace_globals(self):
ns = inspect_utils.getnamespace(factory)
self.assertEqual(ns['free_function'], free_function)
def test_getnamespace_hermetic(self):
# Intentionally hiding the global function to make sure we don't overwrite
# it in the global namespace.
free_function = object() # pylint:disable=redefined-outer-name
def test_fn():
return free_function
ns = inspect_utils.getnamespace(test_fn)
globs = six.get_function_globals(test_fn)
self.assertTrue(ns['free_function'] is free_function)
self.assertFalse(globs['free_function'] is free_function)
def test_getnamespace_locals(self):
def called_fn():
return 0
closed_over_list = []
closed_over_primitive = 1
def local_fn():
closed_over_list.append(1)
local_var = 1
return called_fn() + local_var + closed_over_primitive
ns = inspect_utils.getnamespace(local_fn)
self.assertEqual(ns['called_fn'], called_fn)
self.assertEqual(ns['closed_over_list'], closed_over_list)
self.assertEqual(ns['closed_over_primitive'], closed_over_primitive)
self.assertTrue('local_var' not in ns)
def test_getqualifiedname(self):
foo = object()
qux = imp.new_module('quxmodule')
bar = imp.new_module('barmodule')
baz = object()
bar.baz = baz
ns = {
'foo': foo,
'bar': bar,
'qux': qux,
}
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertEqual(inspect_utils.getqualifiedname(ns, foo), 'foo')
self.assertEqual(inspect_utils.getqualifiedname(ns, bar), 'bar')
self.assertEqual(inspect_utils.getqualifiedname(ns, baz), 'bar.baz')
def test_getqualifiedname_efficiency(self):
foo = object()
# We create a densely connected graph consisting of a relatively small
# number of modules and hide our symbol in one of them. The path to the
# symbol is at least 10, and each node has about 10 neighbors. However,
# by skipping visited modules, the search should take much less.
ns = {}
prev_level = []
for i in range(10):
current_level = []
for j in range(10):
mod_name = 'mod_{}_{}'.format(i, j)
mod = imp.new_module(mod_name)
current_level.append(mod)
if i == 9 and j == 9:
mod.foo = foo
if prev_level:
# All modules at level i refer to all modules at level i+1
for prev in prev_level:
for mod in current_level:
prev.__dict__[mod.__name__] = mod
else:
for mod in current_level:
ns[mod.__name__] = mod
prev_level = current_level
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertIsNotNone(
inspect_utils.getqualifiedname(ns, foo, max_depth=10000000000))
def test_getqualifiedname_cycles(self):
foo = object()
# We create a graph of modules that contains circular references. The
# search process should avoid them. The searched object is hidden at the
# bottom of a path of length roughly 10.
ns = {}
mods = []
for i in range(10):
mod = imp.new_module('mod_{}'.format(i))
if i == 9:
mod.foo = foo
# Module i refers to module i+1
if mods:
mods[-1].__dict__[mod.__name__] = mod
else:
ns[mod.__name__] = mod
# Module i refers to all modules j < i.
for prev in mods:
mod.__dict__[prev.__name__] = prev
mods.append(mod)
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertIsNotNone(
inspect_utils.getqualifiedname(ns, foo, max_depth=10000000000))
def test_getqualifiedname_finds_via_parent_module(self):
# TODO(mdan): This test is vulnerable to change in the lib module.
# A better way to forge modules should be found.
self.assertEqual(
inspect_utils.getqualifiedname(
lib.__dict__, lib.io.file_io.FileIO, max_depth=1),
'io.file_io.FileIO')
def test_getmethodclass(self):
self.assertEqual(
inspect_utils.getmethodclass(free_function), None)
self.assertEqual(
inspect_utils.getmethodclass(free_factory()), None)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.member_function),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.fn_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.wrap_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.static_method),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.class_method),
TestClass)
test_obj = TestClass()
self.assertEqual(
inspect_utils.getmethodclass(test_obj.member_function),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.fn_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.wrap_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.static_method),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.class_method),
TestClass)
def test_getmethodclass_locals(self):
def local_function():
pass
class LocalClass(object):
def member_function(self):
pass
@decorator
def decorated_member(self):
pass
@function_decorator()
def fn_decorated_member(self):
pass
@wrapping_decorator()
def wrap_decorated_member(self):
pass
self.assertEqual(
inspect_utils.getmethodclass(local_function), None)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.member_function),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.fn_decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.wrap_decorated_member),
LocalClass)
test_obj = LocalClass()
self.assertEqual(
inspect_utils.getmethodclass(test_obj.member_function),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.fn_decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.wrap_decorated_member),
LocalClass)
def test_getmethodclass_callables(self):
class TestCallable(object):
def __call__(self):
pass
c = TestCallable()
self.assertEqual(inspect_utils.getmethodclass(c), TestCallable)
def test_getmethodclass_weakref_mechanism(self):
test_obj = TestClass()
def test_fn(self):
return self
bound_method = types.MethodType(
test_fn,
function.TfMethodTarget(
weakref.ref(test_obj), test_obj.member_function))
self.assertEqual(inspect_utils.getmethodclass(bound_method), TestClass)
def test_getmethodclass_no_bool_conversion(self):
tensor = constant_op.constant([1])
self.assertEqual(
inspect_utils.getmethodclass(tensor.get_shape), type(tensor))
def test_getdefiningclass(self):
class Superclass(object):
def foo(self):
pass
def bar(self):
pass
@classmethod
def class_method(cls):
pass
class Subclass(Superclass):
def foo(self):
pass
def baz(self):
pass
self.assertTrue(
inspect_utils.getdefiningclass(Subclass.foo, Subclass) is Subclass)
self.assertTrue(
inspect_utils.getdefiningclass(Subclass.bar, Subclass) is Superclass)
self.assertTrue(
inspect_utils.getdefiningclass(Subclass.baz, Subclass) is Subclass)
self.assertTrue(
inspect_utils.getdefiningclass(Subclass.class_method, Subclass) is
Superclass)
def test_isbuiltin(self):
self.assertTrue(inspect_utils.isbuiltin(enumerate))
self.assertTrue(inspect_utils.isbuiltin(eval))
self.assertTrue(inspect_utils.isbuiltin(float))
self.assertTrue(inspect_utils.isbuiltin(int))
self.assertTrue(inspect_utils.isbuiltin(len))
self.assertTrue(inspect_utils.isbuiltin(range))
self.assertTrue(inspect_utils.isbuiltin(zip))
self.assertFalse(inspect_utils.isbuiltin(function_decorator))
def test_getfutureimports_functions(self):
self.assertEqual(
inspect_utils.getfutureimports(basic_definitions.function_with_print),
('absolute_import', 'division', 'print_function', 'with_statement'))
def test_getfutureimports_lambdas(self):
self.assertEqual(
inspect_utils.getfutureimports(basic_definitions.simple_lambda),
('absolute_import', 'division', 'print_function', 'with_statement'))
def test_getfutureimports_methods(self):
self.assertEqual(
inspect_utils.getfutureimports(
basic_definitions.SimpleClass.method_with_print),
('absolute_import', 'division', 'print_function', 'with_statement'))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/inspect_utils_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST manipulation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.util import tf_inspect
class CleanCopier(object):
"""NodeTransformer-like visitor that copies an AST."""
def __init__(self, preserve_annos):
super(CleanCopier, self).__init__()
self.preserve_annos = preserve_annos
def copy(self, node):
"""Returns a deep copy of node (excluding some fields, see copy_clean)."""
if isinstance(node, list):
return [self.copy(n) for n in node]
elif isinstance(node, tuple):
return tuple(self.copy(n) for n in node)
elif not isinstance(node, (gast.AST, ast.AST)):
# Assuming everything that's not an AST, list or tuple is a value type
# and may simply be assigned.
return node
assert isinstance(node, (gast.AST, ast.AST))
new_fields = {}
for f in node._fields:
if not f.startswith('__') and hasattr(node, f):
new_fields[f] = self.copy(getattr(node, f))
new_node = type(node)(**new_fields)
if self.preserve_annos:
for k in self.preserve_annos:
anno.copyanno(node, new_node, k)
return new_node
def copy_clean(node, preserve_annos=None):
"""Creates a deep copy of an AST.
The copy will not include fields that are prefixed by '__', with the
exception of user-specified annotations.
Args:
node: ast.AST
preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
copy
Returns:
ast.AST
"""
return CleanCopier(preserve_annos).copy(node)
class SymbolRenamer(gast.NodeTransformer):
"""Transformer that can rename symbols to a simple names."""
def __init__(self, name_map):
self.name_map = name_map
def _process(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
new_node = gast.Name(
str(self.name_map[qn]),
ctx=node.ctx,
annotation=None,
type_comment=None)
# All annotations get carried over.
for k in anno.keys(node):
anno.copyanno(node, new_node, k)
return new_node
return self.generic_visit(node)
def visit_Name(self, node):
return self._process(node)
def visit_Attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
return self._process(node)
# Attributes of dynamic objects will not have a QN.
return self.generic_visit(node)
def rename_symbols(node, name_map):
"""Renames symbols in an AST. Requires qual_names annotations."""
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
elif isinstance(node, tuple):
return tuple(renamer.visit(n) for n in node)
return renamer.visit(node)
def keywords_to_dict(keywords):
"""Converts a list of ast.keyword objects to a dict."""
keys = []
values = []
for kw in keywords:
keys.append(gast.Constant(kw.arg, kind=None))
values.append(kw.value)
return gast.Dict(keys=keys, values=values)
class PatternMatcher(gast.NodeVisitor):
"""Matches a node against a pattern represented by a node."""
def __init__(self, pattern):
self.pattern = pattern
self.pattern_stack = []
self.matches = True
def compare_and_visit(self, node, pattern):
self.pattern_stack.append(self.pattern)
self.pattern = pattern
self.generic_visit(node)
self.pattern = self.pattern_stack.pop()
def no_match(self):
self.matches = False
return False
def is_wildcard(self, p):
if isinstance(p, (list, tuple)) and len(p) == 1:
p, = p
if isinstance(p, gast.Name) and p.id == '_':
return True
if p == '_':
return True
return False
def generic_visit(self, node):
if not self.matches:
return
pattern = self.pattern
for f in node._fields:
if f.startswith('__'):
continue
if not hasattr(node, f):
if hasattr(pattern, f) and getattr(pattern, f):
return self.no_match()
else:
continue
if not hasattr(pattern, f):
return self.no_match()
v = getattr(node, f)
p = getattr(pattern, f)
if self.is_wildcard(p):
continue
if isinstance(v, (list, tuple)):
if not isinstance(p, (list, tuple)) or len(v) != len(p):
return self.no_match()
for v_item, p_item in zip(v, p):
self.compare_and_visit(v_item, p_item)
elif isinstance(v, (gast.AST, ast.AST)):
if not isinstance(v, type(p)) and not isinstance(p, type(v)):
return self.no_match()
self.compare_and_visit(v, p)
else:
# Assume everything else is a value type.
if v != p:
return self.no_match()
def matches(node, pattern):
"""Basic pattern matcher for AST.
The pattern may contain wildcards represented by the symbol '_'. A node
matches a pattern if for every node in the tree, either there is a node of
the same type in pattern, or a Name node with id='_'.
Args:
node: ast.AST
pattern: ast.AST
Returns:
bool
"""
if isinstance(pattern, str):
pattern = parser.parse_str(pattern)
matcher = PatternMatcher(pattern)
matcher.visit(node)
return matcher.matches
# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
def apply_to_single_assignments(targets, values, apply_fn):
"""Applies a function to each individual assignment.
This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
It tries to break down the unpacking if possible. In effect, it has the same
effect as passing the assigned values in SSA form to apply_fn.
Examples:
The following will result in apply_fn(a, c), apply_fn(b, d):
a, b = c, d
The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
a, b = c
The following will result in apply_fn(a, (b, c)):
a = b, c
It uses the visitor pattern to allow subclasses to process single
assignments individually.
Args:
targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be
used with the targets field of an ast.Assign node
values: ast.AST
apply_fn: Callable[[ast.AST, ast.AST], None], called with the
respective nodes of each single assignment
"""
if not isinstance(targets, (list, tuple)):
targets = (targets,)
for target in targets:
if isinstance(target, (gast.Tuple, gast.List)):
for i in range(len(target.elts)):
target_el = target.elts[i]
if isinstance(values, (gast.Tuple, gast.List)):
value_el = values.elts[i]
else:
idx = parser.parse_expression(str(i))
value_el = gast.Subscript(values, gast.Index(idx), ctx=gast.Load())
apply_to_single_assignments(target_el, value_el, apply_fn)
else:
apply_fn(target, values)
def parallel_walk(node, other):
"""Walks two ASTs in parallel.
The two trees must have identical structure.
Args:
node: Union[ast.AST, Iterable[ast.AST]]
other: Union[ast.AST, Iterable[ast.AST]]
Yields:
Tuple[ast.AST, ast.AST]
Raises:
ValueError: if the two trees don't have identical structure.
"""
if isinstance(node, (list, tuple)):
node_stack = list(node)
else:
node_stack = [node]
if isinstance(other, (list, tuple)):
other_stack = list(other)
else:
other_stack = [other]
while node_stack and other_stack:
assert len(node_stack) == len(other_stack)
n = node_stack.pop()
o = other_stack.pop()
if ((not isinstance(n, (ast.AST, gast.AST, str)) and n is not None) or
(not isinstance(o, (ast.AST, gast.AST, str)) and n is not None) or
n.__class__.__name__ != o.__class__.__name__):
raise ValueError('inconsistent nodes: {} ({}) and {} ({})'.format(
n, n.__class__.__name__, o, o.__class__.__name__))
yield n, o
if isinstance(n, str):
assert isinstance(o, str), 'The check above should have ensured this'
continue
if n is None:
assert o is None, 'The check above should have ensured this'
continue
for f in n._fields:
n_child = getattr(n, f, None)
o_child = getattr(o, f, None)
if f.startswith('__') or n_child is None or o_child is None:
continue
if isinstance(n_child, (list, tuple)):
if (not isinstance(o_child, (list, tuple)) or
len(n_child) != len(o_child)):
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
node_stack.extend(n_child)
other_stack.extend(o_child)
elif isinstance(n_child, (gast.AST, ast.AST)):
node_stack.append(n_child)
other_stack.append(o_child)
elif n_child != o_child:
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
class LambdaDefinitionMatcher(gast.NodeVisitor):
"""Finds lambda nodes that match a given lambda's signature."""
def __init__(self, fn):
self.fn = fn
self.matching_nodes = []
def _arg_name(self, node):
if node is None:
return None
if isinstance(node, gast.Name):
return node.id
assert isinstance(node, str)
return node
def _argspec_matches(self, node):
arg_spec = tf_inspect.getfullargspec(self.fn)
node_args = tuple(self._arg_name(arg) for arg in node.args.args)
if node_args != tuple(arg_spec.args):
return False
if arg_spec.varargs != self._arg_name(node.args.vararg):
return False
if arg_spec.varkw != self._arg_name(node.args.kwarg):
return False
node_kwonlyargs = tuple(self._arg_name(arg) for arg in node.args.kwonlyargs)
if node_kwonlyargs != tuple(arg_spec.kwonlyargs):
return False
return True
def visit_Lambda(self, node):
self.generic_visit(node)
if self.fn.__name__ != '<lambda>':
return
if not self._argspec_matches(node):
return
self.matching_nodes.append(node)
def find_matching_definitions(node, f):
matcher = LambdaDefinitionMatcher(f)
matcher.visit(node)
return tuple(matcher.matching_nodes)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/ast_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pretty_printer module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import textwrap
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.platform import test
class PrettyPrinterTest(test.TestCase):
def test_unicode_bytes(self):
source = textwrap.dedent('''
def f():
return b'b', u'u', 'depends_py2_py3'
''')
node = ast.parse(source)
self.assertIsNotNone(pretty_printer.fmt(node))
def test_format(self):
node = ast.FunctionDef(
name='f',
args=ast.arguments(
args=[ast.Name(id='a', ctx=ast.Param())],
vararg=None,
kwarg=None,
defaults=[]),
body=[
ast.Return(
ast.BinOp(
op=ast.Add(),
left=ast.Name(id='a', ctx=ast.Load()),
right=ast.Num(1)))
],
decorator_list=[],
returns=None)
# Just checking for functionality, the color control characters make it
# difficult to inspect the result.
self.assertIsNotNone(pretty_printer.fmt(node))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/pretty_printer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting code to AST.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import astunparse
import gast
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.util import tf_inspect
STANDARD_PREAMBLE = textwrap.dedent("""
from __future__ import division
from __future__ import print_function
""")
STANDARD_PREAMBLE_LEN = 2
def parse_entity(entity, future_features):
"""Returns the AST and source code of given entity.
Args:
entity: Any, Python function/method/class
future_features: Iterable[Text], future features to use (e.g.
'print_statement'). See
https://docs.python.org/2/reference/simple_stmts.html#future
Returns:
gast.AST, Text: the parsed AST node; the source code that was parsed to
generate the AST (including any prefixes that this function may have added).
"""
try:
original_source = inspect_utils.getimmediatesource(entity)
except (IOError, OSError) as e:
raise ValueError(
'Unable to locate the source code of {}. Note that functions defined'
' in certain environments, like the interactive Python shell do not'
' expose their source code. If that is the case, you should to define'
' them in a .py source file. If you are certain the code is'
' graph-compatible, wrap the call using'
' @tf.autograph.do_not_convert. Original error: {}'.format(entity, e))
def raise_parse_failure(comment):
raise ValueError(
'Failed to parse source code of {}, which Python reported as:\n{}\n'
'{}'.format(entity, original_source, comment))
# Comments and multiline strings can appear at arbitrary indentation levels,
# causing textwrap.dedent to not correctly dedent source code.
# TODO(b/115884650): Automatic handling of comments/multiline strings.
source = textwrap.dedent(original_source)
future_statements = tuple(
'from __future__ import {}'.format(name) for name in future_features)
source = '\n'.join(future_statements + (source,))
try:
return parse_str(source, preamble_len=len(future_features)), source
except IndentationError:
# The text below lists the causes of this error known to us. There may
# be more.
raise_parse_failure(
'This may be caused by multiline strings or comments not indented at'
' the same level as the code.')
except SyntaxError as e:
if not tf_inspect.isfunction(entity) or entity.__name__ != '<lambda>':
raise
# Certain entities, like lambdas, only hold the raw code lines which defined
# them, which may include surrounding tokens and may be syntactically
# invalid out of context. For example:
#
# l = (
# lambda x: x,)[0]
#
# will have the dedented source "lambda x: x,)[0]"
# Here we make an attempt to stip away the garbage by looking at the
# information in the syntax error.
lines = source.split('\n')
lineno, offset = e.lineno, e.offset # 1-based
# Give up if there's nothing we can chip away.
if len(lines) == lineno and len(lines[-1]) == offset:
raise_parse_failure(
'If this is a lambda function, the error may be avoided by creating'
' the lambda in a standalone statement.')
# Drop all lines following the error location
# TODO(mdan): What's with the pylint errors?
lines = lines[:lineno] # pylint:disable=invalid-slice-index
# Drop all characters following the error location
lines[-1] = lines[-1][:offset - 1] # pylint:disable=invalid-slice-index
source = '\n'.join(lines)
try:
return parse_str(source, preamble_len=len(future_features)), source
except SyntaxError as e:
raise_parse_failure(
'If this is a lambda function, the error may be avoided by creating'
' the lambda in a standalone statement.')
# TODO(mdan): This should take futures as input instead.
def parse_str(src, preamble_len=0, single_node=True):
"""Returns the AST of given piece of code.
Args:
src: Text
preamble_len: Int, indicates leading nodes in the parsed AST which should be
dropped.
single_node: Bool, whether `src` is assumed to be represented by exactly one
AST node.
Returns:
ast.AST
"""
module_node = gast.parse(src)
nodes = module_node.body
if preamble_len:
nodes = nodes[preamble_len:]
if single_node:
if len(nodes) != 1:
raise ValueError('expected exactly one node node, found {}'.format(nodes))
return nodes[0]
return nodes
def parse_expression(src):
"""Returns the AST of given identifier.
Args:
src: A piece of code that represents a single Python expression
Returns:
A gast.AST object.
Raises:
ValueError: if src does not consist of a single Expression.
"""
src = STANDARD_PREAMBLE + src.strip()
node = parse_str(src, preamble_len=STANDARD_PREAMBLE_LEN, single_node=True)
if __debug__:
if not isinstance(node, gast.Expr):
raise ValueError(
'expected a single expression, found instead {}'.format(node))
return node.value
def unparse(node, indentation=None, include_encoding_marker=True):
"""Returns the source code of given AST.
Args:
node: The code to compile, as an AST object.
indentation: Unused, deprecated. The returning code will always be indented
at 4 spaces.
include_encoding_marker: Bool, thether to include a comment on the first
line to explicitly specify UTF-8 encoding.
Returns:
code: The source code generated from the AST object
source_mapping: A mapping between the user and AutoGraph generated code.
"""
del indentation # astunparse doesn't allow configuring it.
if not isinstance(node, (list, tuple)):
node = (node,)
codes = []
if include_encoding_marker:
codes.append('# coding=utf-8')
for n in node:
if isinstance(n, gast.AST):
n = gast.gast_to_ast(n)
codes.append(astunparse.unparse(n).strip())
return '\n'.join(codes)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/parser.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST conversion templates.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
class ContextAdjuster(gast.NodeTransformer):
"""Adjusts the ctx field of nodes to ensure consistency.
This transformer can change the ctx fields of a variable, tuple and other
AST elements that allow one, based on whether the element is being read or
written.
"""
def __init__(self, override_value):
self._ctx_override = override_value
def visit(self, node):
original_override = self._ctx_override
node = super(ContextAdjuster, self).visit(node)
if hasattr(node, 'ctx'):
assert node.ctx is not None, 'node {} has ctx unset'.format(node)
self._ctx_override = original_override
return node
def _apply_override(self, node):
if self._ctx_override is not None:
node.ctx = self._ctx_override()
def visit_Attribute(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node = self.generic_visit(node)
return node
def visit_Tuple(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_List(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Name(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Call(self, node):
self._apply_override(node)
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Dict(self, node):
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Subscript(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node.value = self.visit(node.value)
return self.generic_visit(node)
def visit_comprehension(self, node):
# We may be able to override some of these, but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Lambda(self, node):
# We may be able to override some of these, but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
class ReplaceTransformer(gast.NodeTransformer):
"""Replace AST nodes."""
def __init__(self, replacements):
"""Create a new ReplaceTransformer.
Args:
replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by.
"""
self.replacements = replacements
self.in_replacements = False
self.preserved_annos = {
anno.Basic.ORIGIN,
anno.Basic.SKIP_PROCESSING,
anno.Static.ORIG_DEFINITIONS,
'extra_test',
'function_context_name',
}
def _prepare_replacement(self, replaced, key):
"""Prepares a replacement AST that's safe to swap in for a node.
Args:
replaced: ast.AST, the node being replaced
key: Hashable, the key of the replacement AST
Returns:
ast.AST, the replacement AST
"""
repl = self.replacements[key]
new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
if isinstance(new_nodes, gast.AST):
new_nodes = [new_nodes]
return new_nodes
def visit_Expr(self, node):
# When replacing a placeholder with an entire statement, the replacement
# must stand on its own and not be wrapped in an Expr.
new_value = self.visit(node.value)
if new_value is node.value:
return node
return new_value
def visit_keyword(self, node):
if node.arg not in self.replacements:
return self.generic_visit(node)
repl = self._prepare_replacement(node, node.arg)
if isinstance(repl, gast.keyword):
return repl
elif (repl and isinstance(repl, (list, tuple)) and
all(isinstance(r, gast.keyword) for r in repl)):
return repl
# TODO(mdan): We may allow replacing with a string as well.
# For example, if one wanted to replace foo with bar in foo=baz, then
# we could allow changing just node arg, so that we end up with bar=baz.
raise ValueError(
'a keyword argument may only be replaced by another keyword or a '
'non-empty list of keywords. Found: {} for keyword {}'.format(
repl, node.arg))
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
if node.name not in self.replacements:
return node
repl = self.replacements[node.name]
if not isinstance(repl, (gast.Name, ast.Name)):
raise ValueError(
'a function name can only be replaced by a Name node. Found: %s' %
repl)
node.name = repl.id
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if node.attr not in self.replacements:
return node
repl = self.replacements[node.attr]
if not isinstance(repl, gast.Name):
raise ValueError(
'An attribute can only be replaced by a Name node. Found: %s' % repl)
node.attr = repl.id
return node
def visit_Name(self, node):
if node.id not in self.replacements:
return node
new_nodes = self._prepare_replacement(node, node.id)
if not new_nodes:
return new_nodes
# Preserve the target context.
adjuster = ContextAdjuster(type(node.ctx))
for n in new_nodes:
if hasattr(n, 'ctx'):
adjuster.visit(n)
if len(new_nodes) == 1:
new_nodes, = new_nodes
return new_nodes
def _convert_to_ast(n):
"""Converts from a known data type to AST."""
# Note: When generating AST nodes from strings/QNs in isolation, ctx is
# unknown. ctx must be filled in according to the template being used.
# See ReplaceTransformer.visit_Name.
if isinstance(n, str):
return gast.Name(id=n, ctx=None, annotation=None, type_comment=None)
if isinstance(n, qual_names.QN):
return n.ast()
if isinstance(n, list):
return [_convert_to_ast(e) for e in n]
if isinstance(n, tuple):
return tuple(_convert_to_ast(e) for e in n)
return n
def replace(template, **replacements):
"""Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
contain Name children), then the caller is responsible for setting the
appropriate context.
Args:
template: A string representing Python code. Any symbol name can be used
that appears in the template code can be used as placeholder.
**replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by. String values are also
supported as a shorthand for AST Name nodes with the respective ID.
Returns:
An AST node or list of AST nodes with the replacements made. If the
template was a function, a list will be returned. If the template was a
node, the same node will be returned. If the template was a string, an
AST node will be returned (a `Module` node in the case of a multi-line
string, an `Expr` node otherwise).
Raises:
ValueError: if the arguments are incorrect.
"""
if not isinstance(template, str):
raise ValueError('Expected string template, got %s' % type(template))
for k in replacements:
replacements[k] = _convert_to_ast(replacements[k])
template_str = parser.STANDARD_PREAMBLE + textwrap.dedent(template)
nodes = parser.parse_str(
template_str,
preamble_len=parser.STANDARD_PREAMBLE_LEN,
single_node=False)
results = []
for node in nodes:
node = ReplaceTransformer(replacements).visit(node)
if isinstance(node, (list, tuple)):
results.extend(node)
else:
results.append(node)
results = [qual_names.resolve(r) for r in results]
return results
def replace_as_expression(template, **replacements):
"""Variant of replace that generates expressions, instead of code blocks."""
replacement = replace(template, **replacements)
if len(replacement) != 1:
raise ValueError(
'single expression expected; for more general templates use replace')
node, = replacement
if isinstance(node, gast.Expr):
return node.value
elif isinstance(node, gast.Name):
return node
raise ValueError(
'the template is expected to generate an expression or a name node;'
' instead found %s' % node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/templates.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Print an AST tree in a form more readable than ast.dump."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
import six
import termcolor
class PrettyPrinter(gast.NodeVisitor):
"""Print AST nodes."""
def __init__(self, color, noanno):
self.indent_lvl = 0
self.result = ''
self.color = color
self.noanno = noanno
def _color(self, string, color, attrs=None):
if self.color:
return termcolor.colored(string, color, attrs=attrs)
return string
def _type(self, node):
return self._color(node.__class__.__name__, None, ['bold'])
def _field(self, name):
return self._color(name, 'blue')
def _value(self, name):
return self._color(name, 'magenta')
def _warning(self, name):
return self._color(name, 'red')
def _indent(self):
return self._color('| ' * self.indent_lvl, None, ['dark'])
def _print(self, s):
self.result += s
self.result += '\n'
def generic_visit(self, node, name=None):
# In very rare instances, a list can contain something other than a Node.
# e.g. Global contains a list of strings.
if isinstance(node, str):
if name:
self._print('%s%s="%s"' % (self._indent(), name, node))
else:
self._print('%s"%s"' % (self._indent(), node))
return
if node._fields:
cont = ':'
else:
cont = '()'
if name:
self._print('%s%s=%s%s' % (self._indent(), self._field(name),
self._type(node), cont))
else:
self._print('%s%s%s' % (self._indent(), self._type(node), cont))
self.indent_lvl += 1
for f in node._fields:
if self.noanno and f.startswith('__'):
continue
if not hasattr(node, f):
self._print('%s%s' % (self._indent(), self._warning('%s=<unset>' % f)))
continue
v = getattr(node, f)
if isinstance(v, list):
if v:
self._print('%s%s=[' % (self._indent(), self._field(f)))
self.indent_lvl += 1
for n in v:
if n is not None:
self.generic_visit(n)
else:
self._print('%sNone' % (self._indent()))
self.indent_lvl -= 1
self._print('%s]' % (self._indent()))
else:
self._print('%s%s=[]' % (self._indent(), self._field(f)))
elif isinstance(v, tuple):
if v:
self._print('%s%s=(' % (self._indent(), self._field(f)))
self.indent_lvl += 1
for n in v:
if n is not None:
self.generic_visit(n)
else:
self._print('%sNone' % (self._indent()))
self.indent_lvl -= 1
self._print('%s)' % (self._indent()))
else:
self._print('%s%s=()' % (self._indent(), self._field(f)))
elif isinstance(v, gast.AST):
self.generic_visit(v, f)
elif isinstance(v, six.binary_type):
self._print('%s%s=%s' % (self._indent(), self._field(f),
self._value('b"%s"' % v)))
elif isinstance(v, six.text_type):
self._print('%s%s=%s' % (self._indent(), self._field(f),
self._value('u"%s"' % v)))
else:
self._print('%s%s=%s' % (self._indent(), self._field(f),
self._value(v)))
self.indent_lvl -= 1
def fmt(node, color=True, noanno=False):
printer = PrettyPrinter(color, noanno)
if isinstance(node, (list, tuple)):
for n in node:
printer.visit(n)
else:
printer.visit(node)
return printer.result
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/pretty_printer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_util module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.platform import test
class AstUtilTest(test.TestCase):
def setUp(self):
super(AstUtilTest, self).setUp()
self._invocation_counts = collections.defaultdict(lambda: 0)
def test_rename_symbols_basic(self):
node = parser.parse_str('a + b')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
self.assertIsInstance(node.value.left.id, str)
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), '(renamed_a + b)')
def test_rename_symbols_attributes(self):
node = parser.parse_str('b.c = b.c.d')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
source = compiler.ast_to_source(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
def test_rename_symbols_annotations(self):
node = parser.parse_str('a[i]')
node = qual_names.resolve(node)
anno.setanno(node, 'foo', 'bar')
orig_anno = anno.getanno(node, 'foo')
node = ast_util.rename_symbols(node,
{qual_names.QN('a'): qual_names.QN('b')})
self.assertIs(anno.getanno(node, 'foo'), orig_anno)
def test_copy_clean(self):
node = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + 1
"""))
setattr(node, '__foo', 'bar')
new_node = ast_util.copy_clean(node)
self.assertIsNot(new_node, node)
self.assertFalse(hasattr(new_node, '__foo'))
def test_copy_clean_preserves_annotations(self):
node = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + 1
"""))
anno.setanno(node, 'foo', 'bar')
anno.setanno(node, 'baz', 1)
new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
self.assertEqual(anno.getanno(new_node, 'foo'), 'bar')
self.assertFalse(anno.hasanno(new_node, 'baz'))
def test_keywords_to_dict(self):
keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
d = ast_util.keywords_to_dict(keywords)
# Make sure we generate a usable dict node by attaching it to a variable and
# compiling everything.
node = parser.parse_str('def f(b): pass')
node.body.append(ast.Return(d))
result, _, _ = compiler.ast_to_object(node)
self.assertDictEqual(result.f(3), {'a': 3, 'c': 1, 'd': 'e'})
def assertMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertTrue(ast_util.matches(node, pattern))
def assertNoMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertFalse(ast_util.matches(node, pattern))
def test_matches_symbols(self):
self.assertMatch('foo', '_')
self.assertNoMatch('foo()', '_')
self.assertMatch('foo + bar', 'foo + _')
self.assertNoMatch('bar + bar', 'foo + _')
self.assertNoMatch('foo - bar', 'foo + _')
def test_matches_function_args(self):
self.assertMatch('super(Foo, self).__init__(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super().__init__()', 'super(_).__init__(_)')
self.assertNoMatch('super(Foo, self).bar(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super(Foo, self).__init__()', 'super(Foo, _).__init__(_)')
self.assertNoMatch('super(Foo, self).__init__()',
'super(Bar, _).__init__(_)')
def _mock_apply_fn(self, target, source):
target = compiler.ast_to_source(target, include_encoding_marker=False)
source = compiler.ast_to_source(source, include_encoding_marker=False)
self._invocation_counts[(target.strip(), source.strip())] += 1
def test_apply_to_single_assignments_dynamic_unpack(self):
node = parser.parse_str('a, b, c = d')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd[0]'): 1,
('b', 'd[1]'): 1,
('c', 'd[2]'): 1,
})
def test_apply_to_single_assignments_static_unpack(self):
node = parser.parse_str('a, b, c = d, e, f')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd'): 1,
('b', 'e'): 1,
('c', 'f'): 1,
})
def test_parallel_walk(self):
src = """
def f(a):
return a + 1
"""
node = parser.parse_str(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_string_leaves(self):
src = """
def f(a):
global g
"""
node = parser.parse_str(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_inconsistent_trees(self):
node_1 = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + 1
"""))
node_2 = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + (a * 2)
"""))
node_3 = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + 2
"""))
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_2):
pass
# There is not particular reason to reject trees that differ only in the
# value of a constant.
# TODO(mdan): This should probably be allowed.
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_3):
pass
def assertLambdaNodes(self, matching_nodes, expected_bodies):
self.assertEqual(len(matching_nodes), len(expected_bodies))
for node in matching_nodes:
self.assertIsInstance(node, gast.Lambda)
self.assertIn(
compiler.ast_to_source(node.body,
include_encoding_marker=False).strip(),
expected_bodies)
def test_find_matching_definitions_lambda(self):
node = parser.parse_str(
textwrap.dedent("""
f = lambda x: 1
"""))
f = lambda x: x
nodes = ast_util.find_matching_definitions(node, f)
self.assertLambdaNodes(nodes, ('1',))
def test_find_matching_definitions_lambda_multiple_matches(self):
node = parser.parse_str(
textwrap.dedent("""
f = lambda x: 1, lambda x: 2
"""))
f = lambda x: x
nodes = ast_util.find_matching_definitions(node, f)
self.assertLambdaNodes(nodes, ('1', '2'))
def test_find_matching_definitions_lambda_uses_arg_names(self):
node = parser.parse_str(
textwrap.dedent("""
f = lambda x: 1, lambda y: 2
"""))
f = lambda x: x
nodes = ast_util.find_matching_definitions(node, f)
self.assertLambdaNodes(nodes, ('1',))
f = lambda y: y
nodes = ast_util.find_matching_definitions(node, f)
self.assertLambdaNodes(nodes, ('2',))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/ast_util_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for origin_info module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import textwrap
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct.testing import basic_definitions
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
class OriginInfoTest(test.TestCase):
def test_create_source_map(self):
source = """
def test_fn(x):
return x + 1
"""
source = textwrap.dedent(source)
node = parser.parse_str(source)
fake_origin = origin_info.OriginInfo(
loc=origin_info.Location('fake_filename', 3, 7),
function_name='fake_function_name',
source_code_line='fake source line',
comment=None)
anno.setanno(node, anno.Basic.ORIGIN, fake_origin)
source_map = origin_info.create_source_map(node, source, 'test_filename')
loc = origin_info.LineLocation('test_filename', 2)
self.assertIn(loc, source_map)
self.assertIs(source_map[loc], fake_origin)
def _create_source_map(self, test_fn):
node, source = parser.parse_entity(test_fn, ())
origin_info.resolve_entity(node, source, test_fn)
# Creating a source map with the source code as output will create
# an identity map.
return origin_info.create_source_map(node, source, 'test_filename')
def test_create_source_map_identity(self):
test_fn = basic_definitions.simple_function
source_map = self._create_source_map(test_fn)
module_path = tf_inspect.getsourcefile(test_fn)
# Origin line numbers below should match those in basic_definitions.py
definition_loc = origin_info.LineLocation('test_filename', 1)
self.assertIn(definition_loc, source_map)
self.assertEqual(source_map[definition_loc].loc.lineno, 23)
self.assertEqual(source_map[definition_loc].loc.filename, module_path)
self.assertEqual(source_map[definition_loc].function_name,
'simple_function')
def test_create_source_map_multiline_call(self):
test_fn = basic_definitions.function_with_multiline_call
source_map = self._create_source_map(test_fn)
module_path = tf_inspect.getsourcefile(test_fn)
# Origin line numbers below should match those in basic_definitions.py
call_loc = origin_info.LineLocation('test_filename', 3)
self.assertIn(call_loc, source_map)
self.assertEqual(source_map[call_loc].loc.lineno, 55)
self.assertEqual(source_map[call_loc].loc.filename, module_path)
self.assertEqual(source_map[call_loc].function_name,
'function_with_multiline_call')
self.assertEqual(source_map[call_loc].source_code_line, ' return range(')
second_arg_loc = origin_info.LineLocation('test_filename', 5)
self.assertIn(second_arg_loc, source_map)
self.assertEqual(source_map[second_arg_loc].loc.lineno, 57)
self.assertEqual(source_map[second_arg_loc].loc.filename, module_path)
self.assertEqual(source_map[second_arg_loc].function_name,
'function_with_multiline_call')
self.assertEqual(source_map[second_arg_loc].source_code_line,
' x + 1,')
def test_create_source_map_no_origin_info(self):
test_fn = basic_definitions.simple_function
node, _ = parser.parse_entity(test_fn,
inspect_utils.getfutureimports(test_fn))
# No origin information should result in an empty map.
test_fn_lines, _ = tf_inspect.getsourcelines(test_fn)
source_map = origin_info.create_source_map(node, '\n'.join(test_fn_lines),
test_fn)
self.assertEmpty(source_map)
def test_resolve(self):
source = """
def test_fn(x):
'''Docstring.'''
return x # comment
"""
source = textwrap.dedent(source)
node = parser.parse_str(source)
origin_info.resolve(node, source, 'test_file', 10, 10)
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(def_origin.loc.lineno, 10)
self.assertEqual(def_origin.loc.col_offset, 10)
self.assertEqual(def_origin.source_code_line, 'def test_fn(x):')
self.assertIsNone(def_origin.comment)
docstring_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(docstring_origin.loc.lineno, 11)
self.assertEqual(docstring_origin.loc.col_offset, 12)
self.assertEqual(docstring_origin.source_code_line, " '''Docstring.'''")
self.assertIsNone(docstring_origin.comment)
ret_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(ret_origin.loc.lineno, 12)
self.assertEqual(ret_origin.loc.col_offset, 12)
self.assertEqual(ret_origin.source_code_line, ' return x # comment')
self.assertEqual(ret_origin.comment, 'comment')
def test_resolve_entity(self):
test_fn = basic_definitions.simple_function
node, source = parser.parse_entity(
test_fn, inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.lineno, 23)
self.assertEqual(def_origin.loc.col_offset, 0)
self.assertEqual(def_origin.source_code_line, 'def simple_function(x):')
self.assertIsNone(def_origin.comment)
docstring_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(docstring_origin.loc.lineno, 24)
self.assertEqual(docstring_origin.loc.col_offset, 2)
self.assertEqual(docstring_origin.source_code_line, ' """Docstring."""')
self.assertIsNone(docstring_origin.comment)
ret_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(ret_origin.loc.lineno, 25)
self.assertEqual(ret_origin.loc.col_offset, 2)
self.assertEqual(ret_origin.source_code_line, ' return x # comment')
self.assertEqual(ret_origin.comment, 'comment')
def test_resolve_entity_nested_function(self):
test_fn = basic_definitions.nested_functions
node, source = parser.parse_entity(
test_fn, inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
inner_def_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(inner_def_origin.loc.lineno, 31)
self.assertEqual(inner_def_origin.loc.col_offset, 2)
self.assertEqual(inner_def_origin.source_code_line, ' def inner_fn(y):')
self.assertIsNone(inner_def_origin.comment)
inner_ret_origin = anno.getanno(node.body[1].body[0], anno.Basic.ORIGIN)
self.assertEqual(inner_ret_origin.loc.lineno, 32)
self.assertEqual(inner_ret_origin.loc.col_offset, 4)
self.assertEqual(inner_ret_origin.source_code_line, ' return y')
self.assertIsNone(inner_ret_origin.comment)
def test_resolve_entity_indented_block(self):
test_fn = basic_definitions.SimpleClass.simple_method
node, source = parser.parse_entity(
test_fn, inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.lineno, 46)
self.assertEqual(def_origin.loc.col_offset, 2)
self.assertEqual(def_origin.source_code_line, 'def simple_method(self):')
self.assertIsNone(def_origin.comment)
ret_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(ret_origin.loc.lineno, 47)
self.assertEqual(ret_origin.loc.col_offset, 4)
self.assertEqual(ret_origin.source_code_line, ' return self')
self.assertIsNone(ret_origin.comment)
def test_resolve_entity_decorated_function(self):
test_fn = basic_definitions.decorated_function
node, source = parser.parse_entity(
test_fn, inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
if sys.version_info >= (3, 8):
self.assertEqual(def_origin.loc.lineno, 67)
self.assertEqual(
def_origin.source_code_line, 'def decorated_function(x):')
else:
self.assertEqual(def_origin.loc.lineno, 65)
self.assertEqual(def_origin.source_code_line, '@basic_decorator')
self.assertEqual(def_origin.loc.col_offset, 0)
self.assertIsNone(def_origin.comment)
if_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(if_origin.loc.lineno, 68)
self.assertEqual(if_origin.loc.col_offset, 2)
self.assertEqual(if_origin.source_code_line, ' if x > 0:')
self.assertIsNone(if_origin.comment)
ret1_origin = anno.getanno(node.body[0].body[0], anno.Basic.ORIGIN)
self.assertEqual(ret1_origin.loc.lineno, 69)
self.assertEqual(ret1_origin.loc.col_offset, 4)
self.assertEqual(ret1_origin.source_code_line, ' return 1')
self.assertIsNone(ret1_origin.comment)
ret2_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(ret2_origin.loc.lineno, 70)
self.assertEqual(ret2_origin.loc.col_offset, 2)
self.assertEqual(ret2_origin.source_code_line, ' return 2')
self.assertIsNone(ret2_origin.comment)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/origin_info_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A node transformer that includes utilities for SCT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.pyct import templates
# TODO(znado): Use namedtuple.
class Context(object):
"""Contains information about a source code transformation.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
info: EntityInfo, immutable.
current_origin: origin_info.OriginInfo, holds the OriginInfo of the last
AST node to be processed successfully. Useful for error handling.
"""
def __init__(self, info):
self.info = info
self.current_origin = None
# TODO(mdan): Move to a standalone file.
class EntityInfo(
collections.namedtuple(
'EntityInfo',
('source_code', 'source_file', 'future_features', 'namespace'))):
"""Contains information about a Python entity.
Immutable.
Examples of entities include functions and classes.
Attributes:
source_code: The entity's source code.
source_file: The entity's source file.
future_features: Tuple[Text], the future features that this entity was
compiled with. See
https://docs.python.org/2/reference/simple_stmts.html#future.
namespace: Dict[str, ], containing symbols visible to the entity (excluding
parameters).
"""
pass
class _StateStack(object):
"""Typed stack abstraction.
This class provides syntactic sugar for a stack of objects of known
type. It allows accessing attributes of the object at the top of the stack
directly against this object, which allows for very terse syntax.
For example, this code:
stack = _StateStack(Foo)
stack.enter()
stack.bar
Is equivalent to:
stack = []
stack.append(Foo())
foo = stack[-1]
foo.bar
See _State for more on how this is used.
Attributes:
type: Any, the type of objects that this stack holds
level: int, the current stack depth
stack: List[Any], the actual stack
value: Any, the instance of the object at the top of the stack
"""
def __init__(self, type_):
# Because we override __setattr__, we need to attach these attributes using
# the superclass' setattr.
object.__setattr__(self, 'type', type_)
object.__setattr__(self, '_stack', [])
if not hasattr(type_, 'no_root'):
self.enter()
def enter(self):
self._stack.append(self.type())
def exit(self):
return self._stack.pop()
@property
def stack(self):
return self._stack
@property
def level(self):
return len(self._stack)
@property
def value(self):
return self._stack[-1]
def __iter__(self):
return iter(self._stack)
def __getattr__(self, key):
return getattr(self._stack[-1], key)
def __setattr__(self, key, value):
setattr(self._stack[-1], key, value)
class _State(object):
"""Supporting class for nested scope variable space for converter.Base.
This structure offers syntactic sugar over a dict of stacks of objects
of known type. These structures are useful to keep state during AST walks.
Multiple different scopes can be tracked in parallel. For example:
s = _State()
s[foo].enter()
s[bar].enter() # this will not affect s[foo]
Element access has special semantics:
* keys are a data type
* element values are _StateStack(type=key) objects
* missing elements are automatically added, similarly to defaultdict
For example, the following block :
_State s
s[Foo]
Is equivalent to:
s = {}
if Foo not in s:
s[Foo] = Foo()
s[Foo]
See Base for how it's used.
"""
def __init__(self):
self._value = {}
def __getitem__(self, key):
if key not in self._value:
self._value[key] = _StateStack(key)
return self._value[key]
class Base(gast.NodeTransformer):
"""Base class for general-purpose code transformers transformers.
This is an extension of ast.NodeTransformer that provides a few additional
functions, like state tracking within the scope of arbitrary node, helpers
for processing code blocks, debugging, mapping of transformed code to
original code, and others.
Scope-local state tracking: to keep state across nodes, at the level of
(possibly nested) scopes, use enter/exit_local_scope and set/get_local.
You must call enter/exit_local_scope manually, but the transformer detects
when they are not properly paired.
The transformer allows keeping state across calls to visit_* that is local to
arbitrary nodes and their descendants, using the self.state attribute.
Multiple independent scopes are allowed and automatically constructed.
For example, to keep track of the If node that encloses any Name node, one can
write:
class FooType(object):
def __init__(self):
self.foo_property = None
class DummyTransformer(Base):
def visit_If(self, node):
self.state[FooType].enter()
self.state[FooType].foo_property = node
def visit_Name(self, node):
self.state[FooType].foo_property # will hold the innermost enclosing if
"""
# TODO(mdan): Document all extra features.
def __init__(self, ctx):
"""Initialize the transformer.
Subclasses should call this.
Args:
ctx: A Context object.
"""
self._lineno = 0
self._col_offset = 0
self.ctx = ctx
self._enclosing_entities = []
# A stack that allows keeping mutable, scope-local state where scopes may be
# nested. For example, it can be used to track the usage of break
# statements in each loop, where loops may be nested.
self._local_scope_state = []
self.enter_local_scope()
# Allows scoping of local variables to keep state across calls to visit_*
# methods. Multiple scope hierchies may exist and are keyed by tag. A scope
# is valid at one or more nodes and all its children. Scopes created in
# child nodes supersede their parent. Scopes are isolated from one another.
self.state = _State()
@property
def enclosing_entities(self):
return tuple(self._enclosing_entities)
@property
def local_scope_level(self):
return len(self._local_scope_state)
def enter_local_scope(self, inherit=None):
"""Deprecated.
Use self.state instead.
Marks entry into a new local scope.
Args:
inherit: Optional enumerable of variable names to copy from the parent
scope.
"""
scope_entered = {}
if inherit:
this_scope = self._local_scope_state[-1]
for name in inherit:
if name in this_scope:
scope_entered[name] = this_scope[name]
self._local_scope_state.append(scope_entered)
def exit_local_scope(self, keep=None):
"""Deprecated.
Use self.state instead.
Marks exit from the current local scope.
Args:
keep: Optional enumerable of variable names to copy into the parent scope.
Returns:
A dict containing the scope that has just been exited.
"""
scope_left = self._local_scope_state.pop()
if keep:
this_scope = self._local_scope_state[-1]
for name in keep:
if name in scope_left:
this_scope[name] = scope_left[name]
return scope_left
def set_local(self, name, value):
"""Deprecated. Use self.state instead."""
self._local_scope_state[-1][name] = value
def get_local(self, name, default=None):
"""Deprecated. Use self.state instead."""
return self._local_scope_state[-1].get(name, default)
def debug_print(self, node):
"""Helper method useful for debugging. Prints the AST."""
if __debug__:
print(pretty_printer.fmt(node))
return node
def debug_print_src(self, node):
"""Helper method useful for debugging. Prints the AST as code."""
if __debug__:
print(compiler.ast_to_source(node))
return node
def create_assignment(self, target, expression):
template = """
target = expression
"""
return templates.replace(template, target=target, expression=expression)
def visit_block(self, nodes, before_visit=None, after_visit=None):
"""A more powerful version of generic_visit for statement blocks.
An example of a block is the body of an if statement.
This function allows specifying a postprocessing callback (the
after_visit argument) argument which can be used to move nodes to a new
destination. This is done by after_visit by returning a non-null
second return value, e.g. return new_node, new_destination.
For example, a transformer could perform the following move:
foo()
bar()
baz()
foo()
if cond:
bar()
baz()
The above could be done with a postprocessor of this kind:
def after_visit(node):
if node_is_function_call(bar):
new_container_node = build_cond()
new_container_node.body.append(node)
return new_container_node, new_container_node.body
else:
# Once we set a new destination, all subsequent items will be
# moved to it, so we don't need to explicitly handle baz.
return node, None
Args:
nodes: enumerable of AST node objects. If None, the function returns None.
before_visit: optional callable that is called before visiting each item
in nodes
after_visit: optional callable that takes in an AST node and returns a
tuple (new_node, new_destination). It is called after visiting each item
in nodes. Is used in the same was as the
visit_* methods: new_node will replace the node; if not None,
new_destination must be a list, and subsequent nodes will be placed
in this list instead of the list returned by visit_block.
Returns:
A list of AST node objects containing the transformed items fron nodes,
except those nodes that have been relocated using after_visit.
"""
if nodes is None:
return None
results = []
node_destination = results
for node in nodes:
if before_visit:
# TODO(mdan): We can modify node here too, if ever needed.
before_visit()
replacement = self.visit(node)
if after_visit and replacement:
replacement, new_destination = after_visit(replacement)
else:
new_destination = None
if replacement:
if isinstance(replacement, (list, tuple)):
node_destination.extend(replacement)
else:
node_destination.append(replacement)
# Allow the postprocessor to reroute the remaining nodes to a new list.
if new_destination is not None:
node_destination = new_destination
return results
# TODO(mdan): Remove.
def apply_to_single_assignments(self, targets, values, apply_fn):
"""Applies a function to each individual assignment.
This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
It tries to break down the unpacking if possible. In effect, it has the same
effect as passing the assigned values in SSA form to apply_fn.
Examples:
The following will result in apply_fn(a, c), apply_fn(b, d):
a, b = c, d
The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
a, b = c
The following will result in apply_fn(a, (b, c)):
a = b, c
It uses the visitor pattern to allow subclasses to process single
assignments individually.
Args:
targets: list, tuple of or individual AST node. Should be used with the
targets field of an ast.Assign node.
values: an AST node.
apply_fn: a function of a single argument, which will be called with the
respective nodes of each single assignment. The signature is
apply_fn(target, value), no return value.
"""
if not isinstance(targets, (list, tuple)):
targets = (targets,)
for target in targets:
if isinstance(target, (gast.Tuple, gast.List)):
for i in range(len(target.elts)):
target_el = target.elts[i]
if isinstance(values, (gast.Tuple, gast.List)):
value_el = values.elts[i]
else:
value_el = gast.Subscript(values, gast.Index(i), ctx=gast.Store())
self.apply_to_single_assignments(target_el, value_el, apply_fn)
else:
# TODO(mdan): Look into allowing to rewrite the AST here.
apply_fn(target, values)
def _get_source(self, node):
try:
source, _ = compiler.ast_to_source(node)
return source
# pylint: disable=broad-except
# This function is used for error reporting. If an exception occurs here,
# it should be suppressed, in favor of emitting as informative a message
# about the original error as possible.
except Exception:
return '<could not convert AST to source>'
def visit(self, node):
if not isinstance(node, gast.AST):
# This is not that uncommon a mistake: various node bodies are lists, for
# example, posing a land mine for transformers that need to recursively
# call `visit`. The error needs to be raised before the exception handler
# below is installed, because said handler will mess up if `node` is not,
# in fact, a node.
msg = ('invalid value for "node": expected "ast.AST", got "{}"; to'
' visit lists of nodes, use "visit_block" instead').format(
type(node))
raise ValueError(msg)
did_enter_function = False
local_scope_size_at_entry = len(self._local_scope_state)
processing_expr_node = False
parent_origin = self.ctx.current_origin
if isinstance(node, (gast.FunctionDef, gast.ClassDef, gast.Lambda)):
did_enter_function = True
elif isinstance(node, gast.Expr):
processing_expr_node = True
if did_enter_function:
self._enclosing_entities.append(node)
if anno.hasanno(node, anno.Basic.ORIGIN):
self.ctx.current_origin = anno.getanno(node, anno.Basic.ORIGIN)
if processing_expr_node:
entry_expr_value = node.value
if not anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
result = super(Base, self).visit(node)
self.ctx.current_origin = parent_origin
# Adjust for consistency: replacing the value of an Expr with
# an Assign node removes the need for the Expr node.
if processing_expr_node:
if isinstance(result, gast.Expr) and result.value != entry_expr_value:
# When the replacement is a list, it is assumed that the list came
# from a template that contained a number of statements, which
# themselves are standalone and don't require an enclosing Expr.
if isinstance(result.value,
(list, tuple, gast.Assign, gast.AugAssign)):
result = result.value
# By default, all replacements receive the origin info of the replaced node.
if result is not node and result is not None:
nodes_to_adjust = result
if isinstance(result, (list, tuple)):
nodes_to_adjust = result
else:
nodes_to_adjust = (result,)
for n in nodes_to_adjust:
if not anno.hasanno(n, anno.Basic.ORIGIN):
inherited_origin = anno.getanno(
node, anno.Basic.ORIGIN, default=parent_origin)
if inherited_origin is not None:
anno.setanno(n, anno.Basic.ORIGIN, inherited_origin)
# On exception, the local scope integrity is not guaranteed.
if did_enter_function:
self._enclosing_entities.pop()
if local_scope_size_at_entry != len(self._local_scope_state):
raise AssertionError(
'Inconsistent local scope stack. Before entering node %s, the'
' stack had length %d, after exit it has length %d. This'
' indicates enter_local_scope and exit_local_scope are not'
' well paired.' % (node, local_scope_size_at_entry,
len(self._local_scope_state)))
return result
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/transformer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for qual_names module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct.qual_names import QN
from tensorflow.python.autograph.pyct.qual_names import resolve
from tensorflow.python.platform import test
class QNTest(test.TestCase):
def test_from_str(self):
a = QN('a')
b = QN('b')
a_dot_b = QN(a, attr='b')
a_sub_b = QN(a, subscript=b)
self.assertEqual(qual_names.from_str('a.b'), a_dot_b)
self.assertEqual(qual_names.from_str('a'), a)
self.assertEqual(qual_names.from_str('a[b]'), a_sub_b)
def test_basic(self):
a = QN('a')
self.assertEqual(a.qn, ('a',))
self.assertEqual(str(a), 'a')
self.assertEqual(a.ssf(), 'a')
self.assertEqual(a.ast().id, 'a')
self.assertFalse(a.is_composite())
with self.assertRaises(ValueError):
_ = a.parent
a_b = QN(a, attr='b')
self.assertEqual(a_b.qn, (a, 'b'))
self.assertEqual(str(a_b), 'a.b')
self.assertEqual(a_b.ssf(), 'a_b')
self.assertEqual(a_b.ast().value.id, 'a')
self.assertEqual(a_b.ast().attr, 'b')
self.assertTrue(a_b.is_composite())
self.assertEqual(a_b.parent.qn, ('a',))
def test_subscripts(self):
a = QN('a')
b = QN('b')
a_sub_b = QN(a, subscript=b)
self.assertEqual(a_sub_b.qn, (a, b))
self.assertEqual(str(a_sub_b), 'a[b]')
self.assertEqual(a_sub_b.ssf(), 'a_sub_b')
self.assertEqual(a_sub_b.ast().value.id, 'a')
self.assertEqual(a_sub_b.ast().slice.value.id, 'b')
self.assertTrue(a_sub_b.is_composite())
self.assertTrue(a_sub_b.has_subscript())
self.assertEqual(a_sub_b.parent.qn, ('a',))
c = QN('c')
b_sub_c = QN(b, subscript=c)
a_sub_b_sub_c = QN(a, subscript=b_sub_c)
self.assertEqual(a_sub_b_sub_c.qn, (a, b_sub_c))
self.assertTrue(a_sub_b.is_composite())
self.assertTrue(a_sub_b_sub_c.is_composite())
self.assertTrue(a_sub_b.has_subscript())
self.assertTrue(a_sub_b_sub_c.has_subscript())
self.assertEqual(b_sub_c.qn, (b, c))
self.assertEqual(str(a_sub_b_sub_c), 'a[b[c]]')
self.assertEqual(a_sub_b_sub_c.ssf(), 'a_sub_b_sub_c')
self.assertEqual(a_sub_b_sub_c.ast().value.id, 'a')
self.assertEqual(a_sub_b_sub_c.ast().slice.value.value.id, 'b')
self.assertEqual(a_sub_b_sub_c.ast().slice.value.slice.value.id, 'c')
self.assertEqual(b_sub_c.ast().slice.value.id, 'c')
self.assertEqual(a_sub_b_sub_c.parent.qn, ('a',))
with self.assertRaises(ValueError):
QN('a', 'b')
def test_equality(self):
a = QN('a')
a2 = QN('a')
a_b = QN(a, attr='b')
self.assertEqual(a2.qn, ('a',))
with self.assertRaises(ValueError):
_ = a.parent
a_b2 = QN(a, attr='b')
self.assertEqual(a_b2.qn, (a, 'b'))
self.assertEqual(a_b2.parent.qn, ('a',))
self.assertTrue(a2 == a)
self.assertFalse(a2 is a)
self.assertTrue(a_b.parent == a)
self.assertTrue(a_b2.parent == a)
self.assertTrue(a_b2 == a_b)
self.assertFalse(a_b2 is a_b)
self.assertFalse(a_b2 == a)
a_sub_b = QN(a, subscript='b')
a_sub_b2 = QN(a, subscript='b')
self.assertTrue(a_sub_b == a_sub_b2)
self.assertFalse(a_sub_b == a_b)
def test_nested_attrs_subscripts(self):
a = QN('a')
b = QN('b')
c = QN('c')
b_sub_c = QN(b, subscript=c)
a_sub_b_sub_c = QN(a, subscript=b_sub_c)
b_dot_c = QN(b, attr='c')
a_sub__b_dot_c = QN(a, subscript=b_dot_c)
a_sub_b = QN(a, subscript=b)
a_sub_b__dot_c = QN(a_sub_b, attr='c')
a_dot_b = QN(a, attr='b')
a_dot_b_sub_c = QN(a_dot_b, subscript=c)
self.assertEqual(str(a_sub_b_sub_c), 'a[b[c]]')
self.assertEqual(str(a_sub__b_dot_c), 'a[b.c]')
self.assertEqual(str(a_sub_b__dot_c), 'a[b].c')
self.assertEqual(str(a_dot_b_sub_c), 'a.b[c]')
self.assertNotEqual(a_sub_b_sub_c, a_sub__b_dot_c)
self.assertNotEqual(a_sub_b_sub_c, a_sub_b__dot_c)
self.assertNotEqual(a_sub_b_sub_c, a_dot_b_sub_c)
self.assertNotEqual(a_sub__b_dot_c, a_sub_b__dot_c)
self.assertNotEqual(a_sub__b_dot_c, a_dot_b_sub_c)
self.assertNotEqual(a_sub_b__dot_c, a_dot_b_sub_c)
def test_hashable(self):
d = {QN('a'): 'a', QN('b'): 'b'}
self.assertEqual(d[QN('a')], 'a')
self.assertEqual(d[QN('b')], 'b')
self.assertNotIn(QN('c'), d)
def test_literals(self):
a = QN('a')
a_sub_str_b = QN(a, subscript=QN(qual_names.StringLiteral('b')))
a_sub_b = QN(a, subscript=QN('b'))
self.assertNotEqual(a_sub_str_b, a_sub_b)
self.assertNotEqual(hash(a_sub_str_b), hash(a_sub_b))
a_sub_three = QN(a, subscript=QN(qual_names.NumberLiteral(3)))
self.assertEqual(a_sub_three.ast().slice.value.value, 3)
def test_support_set(self):
a = QN('a')
b = QN('b')
c = QN('c')
a_sub_b = QN(a, subscript=b)
a_dot_b = QN(a, attr='b')
a_dot_b_dot_c = QN(a_dot_b, attr='c')
a_dot_b_sub_c = QN(a_dot_b, subscript=c)
self.assertSetEqual(a.support_set, set((a,)))
self.assertSetEqual(a_sub_b.support_set, set((a, b)))
self.assertSetEqual(a_dot_b.support_set, set((a,)))
self.assertSetEqual(a_dot_b_dot_c.support_set, set((a,)))
self.assertSetEqual(a_dot_b_sub_c.support_set, set((a, c)))
class QNResolverTest(test.TestCase):
def assertQNStringIs(self, node, qn_str):
self.assertEqual(str(anno.getanno(node, anno.Basic.QN)), qn_str)
def test_resolve(self):
samples = """
a
a.b
(c, d.e)
[f, (g.h.i)]
j(k, l)
"""
nodes = parser.parse_str(textwrap.dedent(samples), single_node=False)
nodes = tuple(resolve(node).value for node in nodes)
self.assertQNStringIs(nodes[0], 'a')
self.assertQNStringIs(nodes[1], 'a.b')
self.assertQNStringIs(nodes[2].elts[0], 'c')
self.assertQNStringIs(nodes[2].elts[1], 'd.e')
self.assertQNStringIs(nodes[3].elts[0], 'f')
self.assertQNStringIs(nodes[3].elts[1], 'g.h.i')
self.assertQNStringIs(nodes[4].func, 'j')
self.assertQNStringIs(nodes[4].args[0], 'k')
self.assertQNStringIs(nodes[4].args[1], 'l')
def test_subscript_resolve(self):
samples = """
x[i]
x[i.b]
a.b[c]
a.b[x.y]
a[z[c]]
a[b[c[d]]]
a[b].c
a.b.c[d].e.f
a.b[c[d]].e.f
a.b[c[d.e.f].g].h
"""
nodes = parser.parse_str(textwrap.dedent(samples), single_node=False)
nodes = tuple(resolve(node).value for node in nodes)
self.assertQNStringIs(nodes[0], 'x[i]')
self.assertQNStringIs(nodes[1], 'x[i.b]')
self.assertQNStringIs(nodes[2], 'a.b[c]')
self.assertQNStringIs(nodes[3], 'a.b[x.y]')
self.assertQNStringIs(nodes[4], 'a[z[c]]')
self.assertQNStringIs(nodes[5], 'a[b[c[d]]]')
self.assertQNStringIs(nodes[6], 'a[b].c')
self.assertQNStringIs(nodes[7], 'a.b.c[d].e.f')
self.assertQNStringIs(nodes[8], 'a.b[c[d]].e.f')
self.assertQNStringIs(nodes[9], 'a.b[c[d.e.f].g].h')
def test_function_calls(self):
samples = """
a.b
a.b()
a().b
z[i]
z[i]()
z()[i]
"""
nodes = parser.parse_str(textwrap.dedent(samples), single_node=False)
nodes = tuple(resolve(node).value for node in nodes)
self.assertQNStringIs(nodes[0], 'a.b')
self.assertQNStringIs(nodes[1].func, 'a.b')
self.assertQNStringIs(nodes[2].value.func, 'a')
self.assertQNStringIs(nodes[3], 'z[i]')
self.assertQNStringIs(nodes[4].func, 'z[i]')
self.assertQNStringIs(nodes[5].value.func, 'z')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/qual_names_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anno module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.platform import test
# TODO(mdan): Consider strong types instead of primitives.
class AnnoTest(test.TestCase):
def test_basic(self):
node = ast.Name()
self.assertEqual(anno.keys(node), set())
self.assertFalse(anno.hasanno(node, 'foo'))
with self.assertRaises(AttributeError):
anno.getanno(node, 'foo')
anno.setanno(node, 'foo', 3)
self.assertEqual(anno.keys(node), {'foo'})
self.assertTrue(anno.hasanno(node, 'foo'))
self.assertEqual(anno.getanno(node, 'foo'), 3)
self.assertEqual(anno.getanno(node, 'bar', default=7), 7)
anno.delanno(node, 'foo')
self.assertEqual(anno.keys(node), set())
self.assertFalse(anno.hasanno(node, 'foo'))
with self.assertRaises(AttributeError):
anno.getanno(node, 'foo')
self.assertIsNone(anno.getanno(node, 'foo', default=None))
def test_copy(self):
node_1 = ast.Name()
anno.setanno(node_1, 'foo', 3)
node_2 = ast.Name()
anno.copyanno(node_1, node_2, 'foo')
anno.copyanno(node_1, node_2, 'bar')
self.assertTrue(anno.hasanno(node_2, 'foo'))
self.assertFalse(anno.hasanno(node_2, 'bar'))
def test_duplicate(self):
node = ast.If(
test=ast.Num(1),
body=[ast.Expr(ast.Name('bar', ast.Load()))],
orelse=[])
anno.setanno(node, 'spam', 1)
anno.setanno(node, 'ham', 1)
anno.setanno(node.body[0], 'ham', 1)
anno.dup(node, {'spam': 'eggs'})
self.assertTrue(anno.hasanno(node, 'spam'))
self.assertTrue(anno.hasanno(node, 'ham'))
self.assertTrue(anno.hasanno(node, 'eggs'))
self.assertFalse(anno.hasanno(node.body[0], 'eggs'))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/anno_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code transformation exceptions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from tensorflow.python.autograph.pyct import origin_info
class FrameInfo(
collections.namedtuple(
'FrameInfo',
('filename', 'lineno', 'function_name', 'code', 'converted'))):
pass
def _stack_trace_inside_mapped_code(tb, source_map):
"""Summarizes inner traceback frames up to the call to a given function.
This functions locates the innermost (i.e. most recent) frame that corresponds
to code that can be mapped by source_map originated from, and returns a
translated stack trace ending at that frame. If no such frame is found, the
entire stack trace is summarized.
For example, the following code:
def f():
for i in tf.range(1):
z = y + i # z only defined here
Would generate this traceback:
<converted code>
ag__.for_stmt(...)
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Which is then processed into:
<f>
for i in tf.range(1):
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Args:
tb: List[Tuple], the traceback corresponding to an error; typically,
the output of traceback.extract_tb.
source_map: Dict[LineLocation, OriginInfo], a source map as created by
origin_info.create_source_map.
Returns:
List[FrameInfo]
"""
result_frames = []
for filename, line_number, function_name, text in reversed(tb):
loc = origin_info.LineLocation(filename=filename, lineno=line_number)
if loc in source_map:
origin = source_map[loc]
origin_frame_info = FrameInfo(
filename=origin.loc.filename,
lineno=origin.loc.lineno,
function_name=origin.function_name,
code=origin.source_code_line,
converted=True)
result_frames.append(origin_frame_info)
break
fi = FrameInfo(
filename=filename,
lineno=line_number,
function_name=function_name,
code=text,
converted=False)
result_frames.append(fi)
return tuple(result_frames)
KNOWN_STRING_CONSTRUCTOR_ERRORS = (
AssertionError,
AttributeError,
NameError,
NotImplementedError,
RuntimeError,
StopIteration,
TypeError,
ValueError,
)
# KeyError escapes newlines in strings. We create a special subclass
# that doesn't do that. Overriding the name for display purposes; hopefully
# that won't create too many surprises.
class MultilineMessageKeyError(KeyError):
def __init__(self, message, original_key):
super(MultilineMessageKeyError, self).__init__(original_key)
self.__message = message
def __str__(self):
return self.__message
MultilineMessageKeyError.__name__ = KeyError.__name__
class ErrorMetadataBase(object):
"""Container objects attached to exceptions in converted code.
This metadata allows re-raising exceptions that occur in generated code, with
a custom error message that includes a stack trace relative to user-readable
code from which the generated code originated.
"""
def __init__(self, callsite_tb, cause_metadata, cause_message, source_map):
translated_stack = _stack_trace_inside_mapped_code(callsite_tb, source_map)
if cause_metadata is None:
self.translated_stack = translated_stack
self.cause_message = cause_message
else:
# Daisy chain the translated stacks.
self.translated_stack = (
cause_metadata.translated_stack + (translated_stack[-1],))
self.cause_message = cause_metadata.cause_message
def get_message(self):
"""Returns the message for the underlying exception."""
all_paths = tuple(fi.filename for fi in self.translated_stack)
if len(all_paths) > 1:
common_path = os.path.dirname(os.path.commonprefix(all_paths))
if common_path == os.path.sep:
common_path = ''
if common_path:
path_idx = len(common_path) + 1
else:
path_idx = 0
else:
common_path = ''
path_idx = 0
lines = []
lines.append('in converted code:')
if common_path:
lines.append(' relative to {}:'.format(common_path))
lines.append('')
for frame_info in reversed(self.translated_stack):
lines.append(' {}:{} {}{}'.format(
frame_info.filename[path_idx:],
frame_info.lineno,
frame_info.function_name,
' *' if frame_info.converted else '',
))
if frame_info.code is None:
code_snippet = '<source unavailable>'
else:
code_snippet = frame_info.code.strip()
lines.append(' {}'.format(code_snippet))
lines.append('')
message_lines = self.cause_message.split('\n')
for i in range(len(message_lines)):
message_lines[i] = ' ' + message_lines[i]
lines.extend(message_lines)
lines.append('')
return '\n'.join(lines)
def create_exception(self, source_error):
preferred_type = type(source_error)
if preferred_type.__init__ is Exception.__init__:
return preferred_type(self.get_message())
if preferred_type in KNOWN_STRING_CONSTRUCTOR_ERRORS:
return preferred_type(self.get_message())
elif preferred_type is KeyError:
return MultilineMessageKeyError(self.get_message(), self.cause_message)
return None
def to_exception(self, source_error):
exc = self.create_exception(source_error)
exc.__suppress_context__ = True
exc.ag_error_metadata = self
return exc
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/errors.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gast compatibility library. Supports 0.2.2 and 0.3.2."""
# TODO(mdan): Remove this file once it's safe to break compatibility.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import gast
GAST2 = hasattr(gast, 'Str')
GAST3 = not GAST2
def _is_constant_gast_2(node):
return isinstance(node, (gast.Num, gast.Str, gast.Bytes, gast.Ellipsis,
gast.NameConstant))
def _is_constant_gast_3(node):
return isinstance(node, gast.Constant)
def is_literal(node):
"""Tests whether node represents a Python literal."""
# Normal literals, True/False/None/Etc. in Python3
if is_constant(node):
return True
# True/False/None/Etc. in Python2
if isinstance(node, gast.Name) and node.id in ['True', 'False', 'None']:
return True
return False
def _is_ellipsis_gast_2(node):
return isinstance(node, gast.Ellipsis)
def _is_ellipsis_gast_3(node):
return isinstance(node, gast.Constant) and node.value == Ellipsis
if GAST2:
is_constant = _is_constant_gast_2
is_ellipsis = _is_ellipsis_gast_2
Module = gast.Module
Name = gast.Name
Str = gast.Str
elif GAST3:
is_constant = _is_constant_gast_3
is_ellipsis = _is_ellipsis_gast_3
Module = functools.partial(gast.Module, type_ignores=None) # pylint:disable=invalid-name
Name = functools.partial(gast.Name, type_comment=None) # pylint:disable=invalid-name
Str = functools.partial(gast.Constant, kind=None) # pylint:disable=invalid-name
else:
assert False
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/gast_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST node annotation support.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
# pylint:disable=g-bad-import-order
import gast
# pylint:enable=g-bad-import-order
# TODO(mdan): Shorten the names.
# These names are heavily used, and anno.blaa
# TODO(mdan): Replace the attr-dict mechanism with a more typed solution.
class NoValue(enum.Enum):
def __repr__(self):
return self.name
class Basic(NoValue):
"""Container for basic annotation keys.
The enum values are used strictly for documentation purposes.
"""
QN = 'Qualified name, as it appeared in the code. See qual_names.py.'
SKIP_PROCESSING = (
'This node should be preserved as is and not processed any further.')
INDENT_BLOCK_REMAINDER = (
'When a node is annotated with this, the remainder of the block should'
' be indented below it. The annotation contains a tuple'
' (new_body, name_map), where `new_body` is the new indented block and'
' `name_map` allows renaming symbols.')
ORIGIN = ('Information about the source code that converted code originated'
' from. See origin_information.py.')
class Static(NoValue):
"""Container for static analysis annotation keys.
The enum values are used strictly for documentation purposes.
"""
# Symbols
# These flags are boolean.
IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
# Scopes
# Scopes are represented by objects of type activity.Scope.
SCOPE = 'The scope for the annotated node. See activity.py.'
# TODO(mdan): Drop these in favor of accessing the child's SCOPE.
ARGS_SCOPE = 'The scope for the argument list of a function call.'
COND_SCOPE = 'The scope for the test node of a conditional statement.'
BODY_SCOPE = (
'The scope for the main body of a statement (True branch for if '
'statements, main body for loops).')
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
# Static analysis annotations.
DEFINITIONS = (
'Reaching definition information. See reaching_definitions.py.')
ORIG_DEFINITIONS = (
'The value of DEFINITIONS that applied to the original code before any'
' conversion.')
DEFINED_VARS_IN = (
'Symbols defined when entering the node. See reaching_definitions.py.')
LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.')
LIVE_VARS_IN = ('Symbols live when entering the node. See liveness.py.')
FAIL = object()
def keys(node, field_name='___pyct_anno'):
if not hasattr(node, field_name):
return frozenset()
return frozenset(getattr(node, field_name).keys())
def getanno(node, key, default=FAIL, field_name='___pyct_anno'):
if (default is FAIL or (hasattr(node, field_name) and
(key in getattr(node, field_name)))):
return getattr(node, field_name)[key]
else:
return default
def hasanno(node, key, field_name='___pyct_anno'):
return hasattr(node, field_name) and key in getattr(node, field_name)
def setanno(node, key, value, field_name='___pyct_anno'):
annotations = getattr(node, field_name, {})
setattr(node, field_name, annotations)
annotations[key] = value
# So that the annotations survive gast_to_ast() and ast_to_gast()
if field_name not in node._fields:
node._fields += (field_name,)
def delanno(node, key, field_name='___pyct_anno'):
annotations = getattr(node, field_name)
del annotations[key]
if not annotations:
delattr(node, field_name)
node._fields = tuple(f for f in node._fields if f != field_name)
def copyanno(from_node, to_node, key, field_name='___pyct_anno'):
if hasanno(from_node, key, field_name=field_name):
setanno(
to_node,
key,
getanno(from_node, key, field_name=field_name),
field_name=field_name)
def dup(node, copy_map, field_name='___pyct_anno'):
"""Recursively copies annotations in an AST tree.
Args:
node: ast.AST
copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
key. All annotations with the source key will be copied to identical
annotations with the destination key.
field_name: str
"""
for n in gast.walk(node):
for k in copy_map:
if hasanno(n, k, field_name):
setanno(n, copy_map[k], getanno(n, k, field_name), field_name)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/anno.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Container for origin source code information before AutoGraph compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import difflib
import os
import tokenize
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.utils import ag_logging as logging
from tensorflow.python.util import tf_inspect
class LineLocation(
collections.namedtuple('LineLocation', ('filename', 'lineno'))):
"""Similar to Location, but without column information.
Attributes:
filename: Text
lineno: int, 1-based
"""
pass
class Location(
collections.namedtuple('Location', ('filename', 'lineno', 'col_offset'))):
"""Encodes code location information.
Attributes:
filename: Text
lineno: int, 1-based
col_offset: int
line_loc: LineLocation
"""
@property
def line_loc(self):
return LineLocation(self.filename, self.lineno)
class OriginInfo(
collections.namedtuple(
'OriginInfo',
('loc', 'function_name', 'source_code_line', 'comment'))):
"""Container for information about the source code before conversion.
Attributes:
loc: Location
function_name: Optional[Text]
source_code_line: Text
comment: Optional[Text]
"""
def as_frame(self):
"""Returns a 4-tuple consistent with the return of traceback.extract_tb."""
return (self.loc.filename, self.loc.lineno, self.function_name,
self.source_code_line)
def __repr__(self):
if self.loc.filename:
return '{}:{}:{}'.format(
os.path.split(self.loc.filename)[1], self.loc.lineno,
self.loc.col_offset)
return '<no file>:{}:{}'.format(self.loc.lineno, self.loc.col_offset)
# TODO(mdan): This source map should be a class - easier to refer to.
def create_source_map(nodes, code, filepath):
"""Creates a source map between an annotated AST and the code it compiles to.
Note: this function assumes nodes nodes, code and filepath correspond to the
same code.
Args:
nodes: Iterable[ast.AST, ...], one or more AST modes.
code: Text, the source code in which nodes are found.
filepath: Text
Returns:
Dict[LineLocation, OriginInfo], mapping locations in code to locations
indicated by origin annotations in node.
"""
reparsed_nodes = parser.parse_str(code, preamble_len=0, single_node=False)
for node in reparsed_nodes:
resolve(node, code, filepath, node.lineno, node.col_offset)
source_map = {}
try:
for before, after in ast_util.parallel_walk(nodes, reparsed_nodes):
# Note: generated code might not be mapped back to its origin.
# TODO(mdan): Generated code should always be mapped to something.
origin_info = anno.getanno(before, anno.Basic.ORIGIN, default=None)
final_info = anno.getanno(after, anno.Basic.ORIGIN, default=None)
if origin_info is None or final_info is None:
continue
# Note: the keys are by line only, excluding the column offset.
line_loc = LineLocation(final_info.loc.filename, final_info.loc.lineno)
existing_origin = source_map.get(line_loc)
if existing_origin is not None:
# Overlaps may exist because of child nodes, but almost never to
# different line locations. Exception make decorated functions, where
# both lines are mapped to the same line in the AST.
# Line overlaps: keep bottom node.
if existing_origin.loc.line_loc == origin_info.loc.line_loc:
if existing_origin.loc.lineno >= origin_info.loc.lineno:
continue
# In case of column overlaps, keep the leftmost node.
if existing_origin.loc.col_offset <= origin_info.loc.col_offset:
continue
source_map[line_loc] = origin_info
except ValueError:
if logging.has_verbosity(3):
for n, rn in zip(nodes, reparsed_nodes):
nodes_str = pretty_printer.fmt(n, color=False, noanno=True)
reparsed_nodes_str = pretty_printer.fmt(rn, color=False, noanno=True)
diff = difflib.context_diff(
nodes_str.split('\n'),
reparsed_nodes_str.split('\n'),
fromfile='Original nodes',
tofile='Reparsed nodes',
n=7)
diff = '\n'.join(diff)
logging.log(3, 'AST seems to lack integrity. Diff:\n%s', diff)
raise
return source_map
class _Function(object):
def __init__(self, name):
self.name = name
class OriginResolver(gast.NodeVisitor):
"""Annotates an AST with additional source information like file name."""
def __init__(self, root_node, source_lines, comments_map,
context_lineno, context_col_offset,
filepath):
self._source_lines = source_lines
self._comments_map = comments_map
if (hasattr(root_node, 'decorator_list') and root_node.decorator_list and
hasattr(root_node.decorator_list[0], 'lineno')):
# Typical case: functions. The line number of the first decorator
# is more accurate than the line number of the function itself in
# 3.8+. In earier versions they coincide.
self._lineno_offset = context_lineno - root_node.decorator_list[0].lineno
else:
# Fall back to the line number of the root node.
self._lineno_offset = context_lineno - root_node.lineno
self._col_offset = context_col_offset - root_node.col_offset
self._filepath = filepath
self._function_stack = []
def _absolute_lineno(self, node):
return node.lineno + self._lineno_offset
def _absolute_col_offset(self, node):
return node.col_offset + self._col_offset
def _attach_origin_info(self, node):
if self._function_stack:
function_name = self._function_stack[-1].name
else:
function_name = None
source_code_line = self._source_lines[node.lineno - 1]
comment = self._comments_map.get(node.lineno)
loc = Location(self._filepath, self._absolute_lineno(node),
self._absolute_col_offset(node))
origin = OriginInfo(loc, function_name, source_code_line, comment)
anno.setanno(node, 'lineno', node.lineno)
anno.setanno(node, anno.Basic.ORIGIN, origin)
def visit(self, node):
entered_function = False
if isinstance(node, gast.FunctionDef):
entered_function = True
self._function_stack.append(_Function(node.name))
if hasattr(node, 'lineno'):
self._attach_origin_info(node)
self.generic_visit(node)
if entered_function:
self._function_stack.pop()
def resolve(node, source, context_filepath, context_lineno, context_col_offset):
"""Adds origin information to an AST, based on the source it was loaded from.
This allows us to map the original source code line numbers to generated
source code.
Note: the AST may be a part of a larger context (e.g. a function is part of
a module that may contain other things). However, this function does not
assume the source argument contains the entire context, nor that it contains
only code corresponding to node itself. However, it assumes that node was
parsed from the given source code.
For this reason, two extra arguments are required, and they indicate the
location of the node in the original context.
Args:
node: gast.AST, the AST to annotate.
source: Text, the source code representing node.
context_filepath: Text
context_lineno: int
context_col_offset: int
"""
# TODO(mdan): Pull this to a separate utility.
code_reader = six.StringIO(source)
comments_map = {}
for token in tokenize.generate_tokens(code_reader.readline):
tok_type, tok_string, loc, _, _ = token
srow, _ = loc
if tok_type == tokenize.COMMENT:
comments_map[srow] = tok_string.strip()[1:].strip()
source_lines = source.split('\n')
visitor = OriginResolver(node, source_lines, comments_map,
context_lineno, context_col_offset,
context_filepath)
visitor.visit(node)
def resolve_entity(node, source, entity):
"""Like resolve, but extracts the context informartion from an entity."""
lines, lineno = tf_inspect.getsourcelines(entity)
filepath = tf_inspect.getsourcefile(entity)
# Poor man's attempt at guessing the column offset: count the leading
# whitespace. This might not work well with tabs.
definition_line = lines[0]
col_offset = len(definition_line) - len(definition_line.lstrip())
resolve(node, source, filepath, lineno, col_offset)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/origin_info.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for manipulating qualified names.
A qualified name is a uniform way to refer to simple (e.g. 'foo') and composite
(e.g. 'foo.bar') syntactic symbols.
This is *not* related to the __qualname__ attribute used by inspect, which
refers to scopes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class CallerMustSetThis(object):
pass
class Symbol(collections.namedtuple('Symbol', ['name'])):
"""Represents a Python symbol."""
class StringLiteral(collections.namedtuple('StringLiteral', ['value'])):
"""Represents a Python string literal."""
def __str__(self):
return '\'%s\'' % self.value
def __repr__(self):
return str(self)
class NumberLiteral(collections.namedtuple('NumberLiteral', ['value'])):
"""Represents a Python numeric literal."""
def __str__(self):
return '%s' % self.value
def __repr__(self):
return str(self)
# TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans.
class QN(object):
"""Represents a qualified name."""
def __init__(self, base, attr=None, subscript=None):
if attr is not None and subscript is not None:
raise ValueError('A QN can only be either an attr or a subscript, not '
'both: attr={}, subscript={}.'.format(attr, subscript))
self._has_attr = False
self._has_subscript = False
if attr is not None:
if not isinstance(base, QN):
raise ValueError(
'for attribute QNs, base must be a QN; got instead "%s"' % base)
if not isinstance(attr, str):
raise ValueError('attr may only be a string; got instead "%s"' % attr)
self._parent = base
# TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now.
self.qn = (base, attr)
self._has_attr = True
elif subscript is not None:
if not isinstance(base, QN):
raise ValueError('For subscript QNs, base must be a QN.')
self._parent = base
self.qn = (base, subscript)
self._has_subscript = True
else:
if not isinstance(base, (str, StringLiteral, NumberLiteral)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
'for simple QNs, base must be a string or a Literal object;'
' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
def is_symbol(self):
return isinstance(self.qn[0], str)
def is_simple(self):
return len(self.qn) <= 1
def is_composite(self):
return len(self.qn) > 1
def has_subscript(self):
return self._has_subscript
def has_attr(self):
return self._has_attr
@property
def parent(self):
if self._parent is None:
raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0])
return self._parent
@property
def owner_set(self):
"""Returns all the symbols (simple or composite) that own this QN.
In other words, if this symbol was modified, the symbols in the owner set
may also be affected.
Examples:
'a.b[c.d]' has two owners, 'a' and 'a.b'
"""
owners = set()
if self.has_attr() or self.has_subscript():
owners.add(self.parent)
owners.update(self.parent.owner_set)
return owners
@property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
This would be the smallest set of symbols necessary for the QN to
statically resolve (assuming properties and index ranges are verified
at runtime).
Examples:
'a.b' has only one support symbol, 'a'
'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
if self.has_attr():
roots.update(self.parent.support_set)
elif self.has_subscript():
roots.update(self.parent.support_set)
roots.update(self.qn[1].support_set)
else:
roots.add(self)
return roots
def __hash__(self):
return hash(self.qn + (self._has_attr, self._has_subscript))
def __eq__(self, other):
return (isinstance(other, QN) and self.qn == other.qn and
self.has_subscript() == other.has_subscript() and
self.has_attr() == other.has_attr())
def __str__(self):
if self.has_subscript():
return str(self.qn[0]) + '[' + str(self.qn[1]) + ']'
if self.has_attr():
return '.'.join(map(str, self.qn))
else:
return str(self.qn[0])
def __repr__(self):
return str(self)
def ssf(self):
"""Simple symbol form."""
ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn]
ssf_string = ''
for i in range(0, len(self.qn) - 1):
if self.has_subscript():
delimiter = '_sub_'
else:
delimiter = '_'
ssf_string += ssfs[i] + delimiter
return ssf_string + ssfs[-1]
def ast(self):
"""AST representation."""
# The caller must adjust the context appropriately.
if self.has_subscript():
return gast.Subscript(
value=self.parent.ast(),
slice=gast.Index(self.qn[-1].ast()),
ctx=CallerMustSetThis)
if self.has_attr():
return gast.Attribute(
value=self.parent.ast(), attr=self.qn[-1], ctx=CallerMustSetThis)
base = self.qn[0]
if isinstance(base, str):
return gast.Name(
base, ctx=CallerMustSetThis, annotation=None, type_comment=None)
elif isinstance(base, StringLiteral):
return gast.Constant(base.value, kind=None)
elif isinstance(base, NumberLiteral):
return gast.Constant(base.value, kind=None)
else:
assert False, ('the constructor should prevent types other than '
'str, StringLiteral and NumberLiteral')
class QnResolver(gast.NodeTransformer):
"""Annotates nodes with QN information.
Note: Not using NodeAnnos to avoid circular dependencies.
"""
def visit_Name(self, node):
node = self.generic_visit(node)
anno.setanno(node, anno.Basic.QN, QN(node.id))
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr))
return node
def visit_Subscript(self, node):
# TODO(mdan): This may no longer apply if we overload getitem.
node = self.generic_visit(node)
s = node.slice
if not isinstance(s, gast.Index):
# TODO(mdan): Support range and multi-dimensional indices.
# Continuing silently because some demos use these.
return node
if isinstance(s.value, gast.Constant):
subscript = QN(NumberLiteral(s.value.value))
else:
# The index may be an expression, case in which a name doesn't make sense.
if anno.hasanno(node.slice.value, anno.Basic.QN):
subscript = anno.getanno(node.slice.value, anno.Basic.QN)
else:
return node
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN),
subscript=subscript))
return node
def resolve(node):
return QnResolver().visit(node)
def from_str(qn_str):
node = parser.parse_expression(qn_str)
node = resolve(node)
return anno.getanno(node, anno.Basic.QN)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/qual_names.py
|
# python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reaching_definitions module, that only run in Python 3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions_test
from tensorflow.python.platform import test
class ReachingDefinitionsAnalyzerTest(
reaching_definitions_test.ReachingDefinitionsAnalyzerTestBase):
"""Tests which can only run in Python 3."""
def test_nonlocal_symbol(self):
nonlocal_a = 3
nonlocal_b = 13
def test_fn():
nonlocal nonlocal_a
nonlocal nonlocal_b
if nonlocal_a:
nonlocal_b = []
return nonlocal_a, nonlocal_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[2].test, 1)
self.assertHasDefs(fn_body[2].body[0].targets[0], 1)
self.assertHasDefs(fn_body[3].value.elts[0], 1)
self.assertHasDefs(fn_body[3].value.elts[1], 2)
self.assertSameDef(fn_body[2].test, fn_body[3].value.elts[0])
self.assertHasDefinedIn(fn_body[2], ('nonlocal_a', 'nonlocal_b'))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/reaching_definitions_py3_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for liveness module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.platform import test
global_a = 7
global_b = 17
class LivenessAnalyzerTestBase(test.TestCase):
def _parse_and_analyze(self, test_fn):
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
source_code=source, source_file=None, future_features=(), namespace={})
node = qual_names.resolve(node)
ctx = transformer.Context(entity_info)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
liveness.resolve(node, ctx, graphs)
return node
def assertHasLiveOut(self, node, expected):
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
live_out_strs = set(str(v) for v in live_out)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_out_strs, set(expected))
def assertHasLiveIn(self, node, expected):
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_in_strs = set(str(v) for v in live_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_in_strs, set(expected))
class LivenessAnalyzerTest(LivenessAnalyzerTestBase):
def test_live_out_try_block(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0], 'x')
def test_live_out_if_inside_except(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
if b > 0:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0].handlers[0].body[0], 'x')
def test_live_out_stacked_if(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('a', 'x'))
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_stacked_if_else(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
else:
x = 2
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'a')
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_for_basic(self):
def test_fn(x, a):
for i in range(a):
x += i
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
def test_live_out_for_iterate(self):
def test_fn(x, a):
for i in range(a):
x += i
return x, i # pylint:disable=undefined-loop-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('x', 'i'))
def test_live_out_attributes(self):
def test_fn(x, a):
if a > 0:
x.y = 0
return x.y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
def test_live_out_nested_functions(self):
def test_fn(a, b):
if b:
a = []
def foo():
return a
foo()
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'a')
def test_live_out_nested_functions_isolation(self):
def test_fn(b):
if b:
a = 0 # pylint:disable=unused-variable
def child():
max(a) # pylint:disable=used-before-assignment
a = 1
return a
child()
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'max')
def test_live_out_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ())
def test_live_in_pass(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_return_statement(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
return x
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_try_block(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_try_orelse(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
pass
else:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_if_inside_except(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except
if b > 0:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0].handlers[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_stacked_if(self):
def test_fn(x, a, b, c):
if a > 0:
x = b
if c > 1:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'x'))
self.assertHasLiveIn(fn_body[1], ('c', 'x'))
def test_live_in_stacked_if_else(self):
def test_fn(x, a, b, c, d):
if a > 1:
x = b
else:
x = c
if d > 0:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'd'))
self.assertHasLiveIn(fn_body[1], ('d', 'x'))
def test_live_in_for_basic(self):
def test_fn(x, y, a):
for i in a:
x = i
y += x
z = 0
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_for_nested(self):
def test_fn(x, y, a):
for i in a:
for j in i:
x = i
y += x
z = j
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x', 'y'))
def test_live_in_generator_comprehension(self):
def test_fn(y):
if all(x for x in y):
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('all', 'y'))
def test_live_in_list_comprehension(self):
def test_fn(y):
if [x for x in y]:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_list_comprehension_expression(self):
def test_fn(y, s):
s += foo([x for x in y]) # pylint:disable=undefined-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y', 'foo', 's'))
def test_live_in_set_comprehension(self):
def test_fn(y):
if {x for x in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_dict_comprehension(self):
def test_fn(y):
if {k: v for k, v in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_global_symbol(self):
def test_fn(c):
global global_a
global global_b
if global_a:
global_b = c
else:
global_b = c
return global_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[2], ('global_b',))
self.assertHasLiveIn(fn_body[2], ('global_a', 'c'))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/liveness_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live variable analysis.
See https://en.wikipedia.org/wiki/Live_variable_analysis for a definition of
the following idioms: live variable, live in, live out, which are used
throughout this file.
This analysis attaches the following:
* symbols that are live at the exit of control flow statements
* symbols that are live at the entry of control flow statements
Requires activity analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import annos
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that performs liveness analysis at statement level."""
def __init__(self, graph):
super(Analyzer, self).__init__(graph)
# This allows communicating that nodes generate extra symbols,
# e.g. those that a function definition closes over.
self.extra_gen = {}
def init_state(self, _):
return set()
def visit_node(self, node):
prev_live_in = self.in_[node]
if anno.hasanno(node.ast_node, anno.Static.SCOPE):
node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
gen = node_scope.read | self.extra_gen.get(node.ast_node, frozenset())
# TODO(mdan): verify whether composites' parents need to be added.
# E.g. whether x needs to be added if x.y is live. Theoretically the
# activity analysis should have both so that wouldn't be needed.
kill = node_scope.modified | node_scope.deleted
live_out = set()
for n in node.next:
live_out |= self.in_[n]
live_in = gen | (live_out - kill)
else:
# Nodes that don't have a scope annotation are assumed not to touch any
# symbols.
# This Name node below is a literal name, e.g. False
assert isinstance(node.ast_node,
(gast.Name, gast.Continue, gast.Break, gast.Pass,
gast.Global, gast.Nonlocal)), type(node.ast_node)
live_out = set()
for n in node.next:
live_out |= self.in_[n]
live_in = live_out
self.in_[node] = live_in
self.out[node] = live_out
# TODO(mdan): Move this to the superclass?
return prev_live_in != live_in
class WholeTreeAnalyzer(transformer.Base):
"""Runs liveness analysis on each of the functions defined in the AST.
If a function defined other local functions, those will have separate CFGs.
However, dataflow analysis needs to tie up these CFGs to properly emulate the
effect of closures. In the case of liveness, the parent function's live
variables must account for the variables that are live at the entry of each
subfunction. For example:
def foo():
# baz is live here
def bar():
print(baz)
This analyzer runs liveness analysis on each individual function, accounting
for the effect above.
"""
def __init__(self, source_info, graphs):
super(WholeTreeAnalyzer, self).__init__(source_info)
self.graphs = graphs
self.current_analyzer = None
self.analyzers = {}
def visit_FunctionDef(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
# Postorder tree processing makes this a bit complicated:
# 1. construct an analyzer object and put it on stack
# 2. recursively walk the subtree; this will initialize the analyzer's
# in_ state properly (done in a block below)
# 3. run the final analysis
analyzer = Analyzer(subgraph)
self.current_analyzer = analyzer
node = self.generic_visit(node)
analyzer.visit_reverse()
if parent_analyzer is not None:
# Wire the state between the two subgraphs' analyzers.
child_in_state = analyzer.in_[subgraph.entry]
# Exception: symbols modified in the child function are local to it
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
for qn in body_scope.modified:
# Note: a function modifying the symbol doesn't make that symbol
# live at the function's entry. In fact when that happens it is
# probably a case of undefined assignment, like this:
#
# bar = 0
# def foo():
# print(bar) # bar is undefined here!
# bar = 1
#
# Hence we use discard and not remove below.
child_in_state.discard(qn)
parent_analyzer.extra_gen[node] = frozenset(child_in_state,)
self.analyzers[node] = analyzer
self.current_analyzer = parent_analyzer
return node
class Annotator(transformer.Base):
"""AST visitor that annotates each control flow block with live symbols."""
# Note: additional nodes may be added as needed.
def __init__(self, source_info, cross_function_analyzer):
super(Annotator, self).__init__(source_info)
self.cross_function_analyzer = cross_function_analyzer
self.current_analyzer = None
def visit(self, node):
node = super(Annotator, self).visit(node)
if (self.current_analyzer is not None and
isinstance(node, gast.stmt) and
node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.LIVE_VARS_IN,
frozenset(self.current_analyzer.in_[cfg_node]))
return node
def visit_FunctionDef(self, node):
parent_analyzer = self.current_analyzer
self.current_analyzer = self.cross_function_analyzer.analyzers[node]
node = self.generic_visit(node)
self.current_analyzer = parent_analyzer
return node
def _block_statement_live_out(self, node):
successors = self.current_analyzer.graph.stmt_next[node]
stmt_live_out = set()
for s in successors:
stmt_live_out.update(self.current_analyzer.in_[s])
anno.setanno(node, anno.Static.LIVE_VARS_OUT, frozenset(stmt_live_out))
return node
def _block_statement_live_in(self, node, entry_node):
if entry_node in self.current_analyzer.graph.index:
cfg_node = self.current_analyzer.graph.index[entry_node]
stmt_live_in = frozenset(self.current_analyzer.in_[cfg_node])
else:
assert anno.hasanno(entry_node, anno.Static.LIVE_VARS_IN), (
'If not matching a CFG node, must be a block statement:'
' {}'.format(entry_node))
stmt_live_in = anno.getanno(entry_node, anno.Static.LIVE_VARS_IN)
anno.setanno(node, anno.Static.LIVE_VARS_IN, stmt_live_in)
return node
def visit_If(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.test)
def visit_For(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.iter)
def visit_While(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.test)
def visit_Try(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.body[0])
def visit_ExceptHandler(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.body[0])
def visit_With(self, node):
node = self.generic_visit(node)
return self._block_statement_live_in(node, node.items[0])
def visit_Expr(self, node):
node = self.generic_visit(node)
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.LIVE_VARS_OUT,
frozenset(self.current_analyzer.out[cfg_node]))
return node
def resolve(node, source_info, graphs):
"""Resolves the live symbols at the exit of control flow statements.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
Returns:
ast.AST
"""
cross_function_analyzer = WholeTreeAnalyzer(source_info, graphs)
node = cross_function_analyzer.visit(node)
visitor = Annotator(source_info, cross_function_analyzer)
node = visitor.visit(node)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/liveness.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.platform import test
QN = qual_names.QN
NodeAnno = annos.NodeAnno
global_a = 7
global_b = 17
class ScopeTest(test.TestCase):
def assertMissing(self, qn, scope):
self.assertNotIn(qn, scope.read)
self.assertNotIn(qn, scope.modified)
def assertReadOnly(self, qn, scope):
self.assertIn(qn, scope.read)
self.assertNotIn(qn, scope.modified)
def assertWriteOnly(self, qn, scope):
self.assertNotIn(qn, scope.read)
self.assertIn(qn, scope.modified)
def assertReadWrite(self, qn, scope):
self.assertIn(qn, scope.read)
self.assertIn(qn, scope.modified)
def test_basic(self):
scope = activity.Scope(None)
self.assertMissing(QN('foo'), scope)
scope.mark_read(QN('foo'))
self.assertReadOnly(QN('foo'), scope)
scope.mark_modified(QN('foo'))
self.assertReadWrite(QN('foo'), scope)
def test_copy_from(self):
scope = activity.Scope(None)
scope.mark_modified(QN('foo'))
other = activity.Scope(None)
other.copy_from(scope)
self.assertWriteOnly(QN('foo'), other)
scope.mark_modified(QN('bar'))
scope.copy_from(other)
self.assertMissing(QN('bar'), scope)
scope.mark_modified(QN('bar'))
scope.merge_from(other)
self.assertWriteOnly(QN('bar'), scope)
self.assertMissing(QN('bar'), other)
def test_copy_of(self):
scope = activity.Scope(None)
scope.mark_read(QN('foo'))
other = activity.Scope.copy_of(scope)
self.assertReadOnly(QN('foo'), other)
child_scope = activity.Scope(scope)
child_scope.mark_read(QN('bar'))
other = activity.Scope.copy_of(child_scope)
self.assertReadOnly(QN('bar'), other)
def test_referenced(self):
scope = activity.Scope(None)
scope.mark_read(QN('a'))
child = activity.Scope(scope)
child.mark_read(QN('b'))
child2 = activity.Scope(child, isolated=False)
child2.mark_read(QN('c'))
self.assertTrue(QN('c') in child2.referenced)
self.assertTrue(QN('b') in child2.referenced)
self.assertFalse(QN('a') in child2.referenced)
self.assertTrue(QN('c') in child.referenced)
self.assertTrue(QN('b') in child.referenced)
self.assertFalse(QN('a') in child.referenced)
class ActivityAnalyzerTestBase(test.TestCase):
def _parse_and_analyze(self, test_fn):
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
source_code=source, source_file=None, future_features=(), namespace={})
node = qual_names.resolve(node)
ctx = transformer.Context(entity_info)
node = activity.resolve(node, ctx)
return node, entity_info
def assertSymbolSetsAre(self, expected, actual, name):
expected = set(expected)
actual = set(str(s) for s in actual)
self.assertSetEqual(
expected, actual, 'for symbol set: %s\n'
' Expected: %s\n'
' Got: %s\n'
' Missing: %s\n'
' Extra: %s\n' % (name.upper(), expected, actual,
expected - actual, actual - expected))
def assertScopeIs(self, scope, used, modified):
"""Assert the scope contains specific used, modified & created variables."""
self.assertSymbolSetsAre(used, scope.read, 'read')
self.assertSymbolSetsAre(modified, scope.modified, 'modified')
class ActivityAnalyzerTest(ActivityAnalyzerTestBase):
def test_print_statement(self):
def test_fn(a):
b = 0
c = 1
print(a, b)
return c
node, _ = self._parse_and_analyze(test_fn)
print_node = node.body[2]
if isinstance(print_node, gast.Print):
# Python 2
print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)
else:
# Python 3
assert isinstance(print_node, gast.Expr)
# The call node should be the one being annotated.
print_node = print_node.value
print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)
# We basically need to detect which variables are captured by the call
# arguments.
self.assertScopeIs(print_args_scope, ('a', 'b'), ())
def test_call_args(self):
def test_fn(a):
b = 0
c = 1
foo(a, b) # pylint:disable=undefined-variable
return c
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[2].value
# We basically need to detect which variables are captured by the call
# arguments.
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'b'), ())
def test_call_args_attributes(self):
def foo(*_):
pass
def test_fn(a):
a.c = 0
foo(a.b, a.c)
return a.d
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[1].value
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'a.b', 'a.c'), ())
def test_call_args_subscripts(self):
def foo(*_):
pass
def test_fn(a):
b = 1
c = 2
foo(a[0], a[b])
return a[c]
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[2].value
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE),
('a', 'a[0]', 'a[b]', 'b'), ())
def test_while(self):
def test_fn(a):
b = a
while b > 0:
c = b
b -= 1
return b, c
node, _ = self._parse_and_analyze(test_fn)
while_node = node.body[1]
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),
('b', 'c'))
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.COND_SCOPE), ('b',), ())
def test_for(self):
def test_fn(a):
b = a
for _ in a:
c = b
b -= 1
return b, c
node, _ = self._parse_and_analyze(test_fn)
for_node = node.body[1]
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.ITERATE_SCOPE), (), ('_'))
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),
('b', 'c', '_'))
def test_if(self):
def test_fn(x):
if x > 0:
x = -x
y = 2 * x
z = -y
else:
x = 2 * x
y = -x
u = -y
return z, u
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('x', 'y', 'z'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE).parent, ('x', 'y', 'z', 'u'),
('x', 'y', 'z', 'u'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('x', 'y'),
('x', 'y', 'u'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
('x', 'y', 'z', 'u'), ('x', 'y', 'z', 'u'))
def test_if_attributes(self):
def test_fn(a):
if a > 0:
a.b = -a.c
d = 2 * a
else:
a.b = a.c
d = 1
return d
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('a', 'a.c'), ('a.b', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('a', 'a.c'),
('a.b', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE).parent, ('a', 'a.c', 'd'),
('a.b', 'd'))
def test_if_subscripts(self):
def test_fn(a, b, c, e):
if a > 0:
a[b] = -a[c]
d = 2 * a
else:
a[0] = e
d = 1
return d
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('a', 'b', 'c', 'a[c]'),
('a[b]', 'd'))
# TODO(mdan): Should subscript writes (a[0] = 1) be considered to read "a"?
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('a', 'e'), ('a[0]', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
('a', 'b', 'c', 'd', 'e', 'a[c]'), ('d', 'a[b]', 'a[0]'))
def test_nested_if(self):
def test_fn(b):
if b > 0:
if b < 5:
a = b
else:
a = b * b
return a
node, _ = self._parse_and_analyze(test_fn)
inner_if_node = node.body[0].body[0]
self.assertScopeIs(
anno.getanno(inner_if_node, NodeAnno.BODY_SCOPE), ('b',), ('a',))
self.assertScopeIs(
anno.getanno(inner_if_node, NodeAnno.ORELSE_SCOPE), ('b',), ('a',))
def test_nested_function(self):
def test_fn(a):
def f(x):
y = x * x
return y
b = a
for i in a:
c = b
b -= f(i)
return b, c
node, _ = self._parse_and_analyze(test_fn)
fn_def_node = node.body[0]
self.assertScopeIs(
anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('y',))
def test_constructor_attributes(self):
class TestClass(object):
def __init__(self, a):
self.b = a
self.b.c = 1
node, _ = self._parse_and_analyze(TestClass)
init_node = node.body[0]
self.assertScopeIs(
anno.getanno(init_node, NodeAnno.BODY_SCOPE), ('self', 'a', 'self.b'),
('self', 'self.b', 'self.b.c'))
def test_aug_assign_subscripts(self):
def test_fn(a):
a[0] += 1
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'a[0]'), ('a[0]',))
def test_return_vars_are_read(self):
def test_fn(a, b, c): # pylint: disable=unused-argument
return c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ())
def test_aug_assign(self):
def test_fn(a, b):
a += b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'b'), ('a'))
def test_aug_assign_rvalues(self):
a = dict(bar=3)
def foo():
return a
def test_fn(x):
foo()['bar'] += x
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('foo', 'x'), ())
def test_params(self):
def test_fn(a, b): # pylint: disable=unused-argument
return b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b',), ())
self.assertScopeIs(body_scope.parent, ('b',), ('a', 'b'))
args_scope = anno.getanno(fn_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('a', 'b'), args_scope.params.keys(), 'params')
def test_lambda_captures_reads(self):
def test_fn(a, b):
return lambda: a + b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'b'), ())
# Nothing local to the lambda is tracked.
self.assertSymbolSetsAre((), body_scope.params.keys(), 'params')
def test_lambda_params_are_isolated(self):
def test_fn(a, b): # pylint: disable=unused-argument
return lambda a: a + b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b',), ())
self.assertSymbolSetsAre((), body_scope.params.keys(), 'params')
def test_lambda_complex(self):
def test_fn(a, b, c, d): # pylint: disable=unused-argument
a = (lambda a, b, c: a + b + c)(d, 1, 2) + b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b', 'd'), ('a',))
self.assertSymbolSetsAre((), body_scope.params.keys(), 'params')
def test_lambda_nested(self):
def test_fn(a, b, c, d, e): # pylint: disable=unused-argument
a = lambda a, b: d(lambda b: a + b + c) # pylint: disable=undefined-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('c', 'd'), ('a',))
self.assertSymbolSetsAre((), body_scope.params.keys(), 'params')
def test_comprehension_targets_are_isolated(self):
def test_fn(a):
b = [c for c in a] # pylint:disable=unused-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
if six.PY2:
self.assertScopeIs(body_scope, ('a',), ('b', 'c'))
else:
self.assertScopeIs(body_scope, ('a',), ('b',))
def test_comprehension_targets_are_isolated_in_augassign(self):
def test_fn(a, b):
b += [c for c in a] # pylint:disable=unused-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
if six.PY2:
self.assertScopeIs(body_scope, ('a', 'b'), ('b', 'c'))
else:
self.assertScopeIs(body_scope, ('a', 'b'), ('b',))
def test_global_symbol(self):
def test_fn(c):
global global_a
global global_b
global_a = global_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('global_b', 'c'), ('global_a',))
def test_class_definition_basic(self):
def test_fn(a, b):
class C(a(b)):
d = 1
return C
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'b', 'C'), ('C',))
def test_class_definition_isolates_method_writes_but_not_reads(self):
def test_fn(a, b, c):
class C(a(b)):
d = 1
def e(self):
f = c + 1
return f
return C
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
# Note: 'f' is in there because we cannot detect thattically that it
# is local to the function itself.
self.assertScopeIs(body_scope, ('a', 'b', 'c', 'f', 'C'), ('C',))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/activity_test.py
|
# python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for liveness module, that only run in Python 3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.autograph.pyct.static_analysis import liveness_test
from tensorflow.python.platform import test
NodeAnno = annos.NodeAnno
class LivenessAnalyzerTest(liveness_test.LivenessAnalyzerTestBase):
"""Tests which can only run in Python 3."""
def test_nonlocal_symbol(self):
nonlocal_a = 3
nonlocal_b = 13
def test_fn(c):
nonlocal nonlocal_a
nonlocal nonlocal_b
if nonlocal_a:
nonlocal_b = c
else:
nonlocal_b = c
return nonlocal_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[2], ('nonlocal_b',))
self.assertHasLiveIn(fn_body[2], ('nonlocal_a', 'c'))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/liveness_py3_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Activity analysis.
Requires qualified name annotations (see qual_names.py).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import weakref
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
class Scope(object):
"""Encloses local symbol definition and usage information.
This can track for instance whether a symbol is modified in the current scope.
Note that scopes do not necessarily align with Python's scopes. For example,
the body of an if statement may be considered a separate scope.
Caution - the AST references held by this object are weak.
Attributes:
modified: Set[qual_names.QN], identifiers modified in this scope
read: Set[qual_names.QN], identifiers read in this scope
deleted: Set[qual_names.QN], identifiers deleted in this scope
params: WeakValueDictionary[qual_names.QN, ast.Node], function arguments
visible in this scope, mapped to the function node that defines them
Note - simple statements may never delete and modify a symbol at the same
time. However, compound ones like if statements can. In that latter case, it's
undefined whether the symbol is actually modified or deleted upon statement
exit. Certain analyses like reaching definitions need to be careful about
this.
"""
def __init__(self, parent, isolated=True, add_unknown_symbols=False):
"""Create a new scope.
Args:
parent: A Scope or None.
isolated: Whether the scope is isolated, that is, whether variables
modified in this scope should be considered modified in the parent
scope.
add_unknown_symbols: Whether to handle attributed and subscripts
without having first seen the base name.
E.g., analyzing the statement 'x.y = z' without first having seen 'x'.
"""
self.isolated = isolated
self.parent = parent
self.add_unknown_symbols = add_unknown_symbols
self.modified = set()
self.read = set()
self.deleted = set()
self.params = weakref.WeakValueDictionary()
@property
def affects_parent(self):
return not self.isolated and self.parent is not None
@property
def referenced(self):
if self.affects_parent:
return self.read | self.parent.referenced
return self.read
def __repr__(self):
return 'Scope{r=%s, w=%s}' % (tuple(self.read), tuple(self.modified))
def copy_from(self, other):
"""Recursively copies the contents of this scope from another scope."""
if (self.parent is None) != (other.parent is None):
raise ValueError('cannot copy scopes of different structures')
if other.parent is not None:
self.parent.copy_from(other.parent)
self.isolated = other.isolated
self.modified = copy.copy(other.modified)
self.read = copy.copy(other.read)
self.params = copy.copy(other.params)
@classmethod
def copy_of(cls, other):
if other.parent is not None:
parent = cls.copy_of(other.parent)
else:
parent = None
new_copy = cls(parent)
new_copy.copy_from(other)
return new_copy
def merge_from(self, other):
if (self.parent is None) != (other.parent is None):
raise ValueError('cannot merge scopes of different structures')
if other.parent is not None:
self.parent.merge_from(other.parent)
self.modified |= other.modified
self.read |= other.read
self.params.update(other.params)
def mark_read(self, name):
self.read.add(name)
if self.parent is not None and name not in self.params:
self.parent.mark_read(name)
def mark_modified(self, name):
self.modified.add(name)
if self.affects_parent:
self.parent.mark_modified(name)
def mark_deleted(self, name):
self.deleted.add(name)
def mark_param(self, name, owner):
# Assumption: all AST nodes have the same life span. This lets us use
# a weak reference to mark the connection between a symbol node and the
# function node whose argument that symbol is.
self.params[name] = owner
class _Lambda(object):
no_root = True
def __init__(self):
self.args = set()
class _Comprehension(object):
no_root = True
def __init__(self):
self.targets = set()
class ActivityAnalyzer(transformer.Base):
"""Annotates nodes with local scope information.
See Scope.
The use of this class requires that qual_names.resolve() has been called on
the node. This class will ignore nodes have not been
annotated with their qualified names.
"""
def __init__(self, context, parent_scope=None, add_unknown_symbols=False):
super(ActivityAnalyzer, self).__init__(context)
self.scope = Scope(parent_scope, None, add_unknown_symbols)
# Note: all these flags crucially rely on the respective nodes are
# leaves in the AST, that is, they cannot contain other statements.
self._in_aug_assign = False
self._in_function_def_args = False
@property
def _in_constructor(self):
if len(self.enclosing_entities) > 1:
innermost = self.enclosing_entities[-1]
parent = self.enclosing_entities[-2]
return isinstance(parent, gast.ClassDef) and innermost.name == '__init__'
return False
def _node_sets_self_attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
# TODO(mdan): The 'self' argument is not guaranteed to be called 'self'.
if qn.has_attr and qn.parent.qn == ('self',):
return True
return False
def _track_symbol(self, node, composite_writes_alter_parent=False):
# A QN may be missing when we have an attribute (or subscript) on a function
# call. Example: a().b
if not anno.hasanno(node, anno.Basic.QN):
return
qn = anno.getanno(node, anno.Basic.QN)
# When inside a lambda, ignore any of the lambda's arguments.
# This includes attributes or slices of those arguments.
for l in self.state[_Lambda]:
if qn in l.args:
return
if qn.owner_set & set(l.args):
return
# When inside a comprehension, ignore reads to any of the comprehensions's
# targets. This includes attributes or slices of those arguments.
for l in self.state[_Comprehension]:
if qn in l.targets:
return
if qn.owner_set & set(l.targets):
return
if isinstance(node.ctx, gast.Store):
# In comprehensions, modified symbols are the comprehension targets.
if self.state[_Comprehension].level > 0:
self.state[_Comprehension].targets.add(qn)
# Comprehension targets are completely isolated in Python 3.
if six.PY2 or self.state[_Comprehension].level == 0:
self.scope.mark_modified(qn)
if qn.is_composite and composite_writes_alter_parent:
self.scope.mark_modified(qn.parent)
if self._in_aug_assign:
self.scope.mark_read(qn)
elif isinstance(node.ctx, gast.Load):
self.scope.mark_read(qn)
elif isinstance(node.ctx, gast.Param):
if self._in_function_def_args:
# In function defs have the meaning of defining a variable.
self.scope.mark_modified(qn)
self.scope.mark_param(qn, self.enclosing_entities[-1])
elif self.state[_Lambda].level:
# In lambdas, they are tracked separately.
self.state[_Lambda].args.add(qn)
else:
# TODO(mdan): Is this case possible at all?
raise NotImplementedError(
'Param "{}" outside a function arguments or lambda.'.format(qn))
elif isinstance(node.ctx, gast.Del):
# The read matches the Python semantics - attempting to delete an
# undefined symbol is illegal.
self.scope.mark_read(qn)
self.scope.mark_deleted(qn)
else:
raise ValueError('Unknown context {} for node "{}".'.format(
type(node.ctx), qn))
def _enter_scope(self, isolated):
self.scope = Scope(self.scope, isolated=isolated)
def _exit_scope(self):
self.scope = self.scope.parent
def _process_statement(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
anno.setanno(node, anno.Static.SCOPE, self.scope)
self._exit_scope()
return node
def visit_Expr(self, node):
return self._process_statement(node)
def visit_Return(self, node):
return self._process_statement(node)
def visit_Assign(self, node):
return self._process_statement(node)
def visit_AnnAssign(self, node):
return self._process_statement(node)
def visit_AugAssign(self, node):
# Special rules for AugAssign. Here, the AST only shows the target as
# written, when it is in fact also read.
self._enter_scope(False)
self._in_aug_assign = True
node.target = self.visit(node.target)
self._in_aug_assign = False
node.op = self.visit(node.op)
node.value = self.visit(node.value)
anno.setanno(node, anno.Static.SCOPE, self.scope)
self._exit_scope()
return node
def visit_Delete(self, node):
return self._process_statement(node)
def visit_Name(self, node):
node = self.generic_visit(node)
self._track_symbol(node)
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if self._in_constructor and self._node_sets_self_attribute(node):
self._track_symbol(node, composite_writes_alter_parent=True)
else:
self._track_symbol(node)
return node
def visit_Subscript(self, node):
node = self.generic_visit(node)
# Subscript writes (e.g. a[b] = "value") are considered to modify
# both the element itself (a[b]) and its parent (a).
self._track_symbol(node)
return node
def visit_Print(self, node):
self._enter_scope(False)
node.values = self.visit_block(node.values)
anno.setanno(node, anno.Static.SCOPE, self.scope)
anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
self._exit_scope()
return node
def visit_Assert(self, node):
return self._process_statement(node)
def visit_Call(self, node):
self._enter_scope(False)
node.args = self.visit_block(node.args)
node.keywords = self.visit_block(node.keywords)
# TODO(mdan): Account starargs, kwargs
anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
self._exit_scope()
node.func = self.visit(node.func)
return node
def _process_block_node(self, node, block, scope_name):
self._enter_scope(False)
block = self.visit_block(block)
anno.setanno(node, scope_name, self.scope)
self._exit_scope()
return node
def _process_parallel_blocks(self, parent, children):
# Because the scopes are not isolated, processing any child block
# modifies the parent state causing the other child blocks to be
# processed incorrectly. So we need to checkpoint the parent scope so that
# each child sees the same context.
before_parent = Scope.copy_of(self.scope)
after_children = []
for child, scope_name in children:
self.scope.copy_from(before_parent)
parent = self._process_block_node(parent, child, scope_name)
after_child = Scope.copy_of(self.scope)
after_children.append(after_child)
for after_child in after_children:
self.scope.merge_from(after_child)
return parent
def visit_Lambda(self, node):
assert not self._in_function_def_args
self.state[_Lambda].enter()
node = self.generic_visit(node)
anno.setanno(node, anno.Static.SCOPE, self.scope)
self.state[_Lambda].exit()
return node
def _process_iterable_comprehension(self, node):
# This handles ListComp, SetComp, GeneratorExp.
self.state[_Comprehension].enter()
# Note: it's important to visit the generators first to properly account
# for the variables local to these generators. Example: `x` is local to the
# expression `x for x in y`.
# It is important to visit the generators in reverse order when targets of
# outer comprehensions are accessed by inner generators.
node.generators = self.visit_block(reversed(node.generators))
node.elt = self.visit(node.elt)
self.state[_Comprehension].exit()
return node
def visit_comprehension(self, node):
# It is important to visit the target first so that it's properly tracked as
# comprehension target.
node.target = self.visit(node.target)
return self.generic_visit(node)
def visit_DictComp(self, node):
# Identical to _process_iterable_comprehension, different node names.
self.state[_Comprehension].enter()
node.generators = self.visit_block(node.generators)
node.key = self.visit(node.key)
node.value = self.visit(node.value)
self.state[_Comprehension].exit()
return node
def visit_ListComp(self, node):
return self._process_iterable_comprehension(node)
def visit_SetComp(self, node):
return self._process_iterable_comprehension(node)
def visit_GeneratorExp(self, node):
return self._process_iterable_comprehension(node)
def visit_arguments(self, node):
return self._process_statement(node)
def visit_ClassDef(self, node):
# The ClassDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
self.scope.mark_modified(qual_names.QN(node.name))
anno.setanno(node, anno.Static.SCOPE, self.scope)
self._exit_scope()
# A separate Scope tracks the actual class definition.
self._enter_scope(True)
assert not (self._in_function_def_args or self.state[_Lambda].level)
node = self.generic_visit(node)
self._exit_scope()
return node
def visit_FunctionDef(self, node):
# The FunctionDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
self.scope.mark_modified(qual_names.QN(node.name))
anno.setanno(node, anno.Static.SCOPE, self.scope)
self._exit_scope()
# A separate Scope tracks the actual function definition.
self._enter_scope(True)
assert not (self._in_function_def_args or self.state[_Lambda].level)
self._in_function_def_args = True
node.args = self.visit(node.args)
self._in_function_def_args = False
# Track the body separately. This is for compatibility reasons, it may not
# be strictly needed.
self._enter_scope(False)
node.body = self.visit_block(node.body)
anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
self._exit_scope()
self._exit_scope()
return node
def visit_With(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
self._exit_scope()
return node
def visit_withitem(self, node):
return self._process_statement(node)
def visit_If(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
anno.setanno(node.test, anno.Static.SCOPE, self.scope)
self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_For(self, node):
self._enter_scope(False)
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
anno.setanno(node.iter, anno.Static.SCOPE, self.scope)
self._exit_scope()
self._enter_scope(False)
self.visit(node.target)
anno.setanno(node, NodeAnno.ITERATE_SCOPE, self.scope)
self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_While(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
anno.setanno(node.test, anno.Static.SCOPE, self.scope)
self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def resolve(node, context, parent_scope=None):
return ActivityAnalyzer(context, parent_scope).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/activity.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Static information resolution.
This module contains utilities to help annotate AST nodes with as much runtime
information as can be possibly extracted without actually executing the code,
under that assumption that the context in which the code will run is known.
Overall, the different analyses have the functions listed below:
* activity: inventories symbols read, written to, params, etc. at different
levels
* liveness, reaching_definitions: dataflow analyses based on the program's CFG
and using the symbol information gathered by activity analysis
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reaching_definitions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.platform import test
global_a = 7
global_b = 17
class ReachingDefinitionsAnalyzerTestBase(test.TestCase):
def _parse_and_analyze(self, test_fn):
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
source_code=source, source_file=None, future_features=(), namespace={})
node = qual_names.resolve(node)
ctx = transformer.Context(entity_info)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_definitions.resolve(node, ctx, graphs,
reaching_definitions.Definition)
return node
def assertHasDefs(self, node, num):
defs = anno.getanno(node, anno.Static.DEFINITIONS)
self.assertEqual(len(defs), num)
for r in defs:
self.assertIsInstance(r, reaching_definitions.Definition)
def assertHasDefinedIn(self, node, expected):
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
defined_in_str = set(str(v) for v in defined_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(defined_in_str, set(expected))
def assertSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIs(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
def assertNotSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIsNot(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
class ReachingDefinitionsAnalyzerTest(ReachingDefinitionsAnalyzerTestBase):
def test_conditional(self):
def test_fn(a, b):
a = []
if b:
a = []
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
def test_try_in_conditional(self):
def test_fn(a, b): # pylint:disable=unused-argument
a = []
if b:
try:
pass
except: # pylint:disable=bare-except
pass
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
def test_conditional_in_try_in_conditional(self):
def test_fn(a, b):
a = []
if b:
try:
if b:
a = []
except TestException: # pylint:disable=undefined-variable,unused-variable
pass
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
# Note: `TestException` and `e` are not tracked.
self.assertHasDefinedIn(fn_body[1].body[0].body[0], ('a', 'b'))
def test_conditional_in_except_in_conditional(self):
def test_fn(a, b):
a = []
if b:
try:
pass
except TestException as e: # pylint:disable=undefined-variable,unused-variable
if b:
a = []
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
# Note: `TestException` and `e` are not tracked.
self.assertHasDefinedIn(fn_body[1].body[0].handlers[0].body[0], ('a', 'b'))
def test_while(self):
def test_fn(a):
max(a)
while True:
a = a
a = a
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].value.args[0], 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].body[1].targets[0], 1)
self.assertHasDefs(fn_body[1].body[1].value, 1)
# The loop does have an invariant test, but the CFG doesn't know that.
self.assertHasDefs(fn_body[1].body[0].value, 2)
self.assertHasDefs(fn_body[2].value, 2)
def test_while_else(self):
def test_fn(x, i):
y = 0
while x:
x += i
if i:
break
else:
y = 1
return x, y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 2)
self.assertHasDefs(fn_body[1].body[0].target, 1)
self.assertHasDefs(fn_body[1].body[1].test, 1)
self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value.elts[0], 2)
self.assertHasDefs(fn_body[2].value.elts[1], 2)
def test_for_else(self):
def test_fn(x, i):
y = 0
for i in x:
x += i
if i:
break
else:
continue
else:
y = 1
return x, y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].target, 1)
self.assertHasDefs(fn_body[1].body[0].target, 1)
self.assertHasDefs(fn_body[1].body[1].test, 1)
self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value.elts[0], 2)
self.assertHasDefs(fn_body[2].value.elts[1], 2)
def test_nested_functions(self):
def test_fn(a, b):
a = []
if b:
a = []
def foo():
return a
foo()
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
def_of_a_in_if = fn_body[1].body[0].targets[0]
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(def_of_a_in_if, 1)
self.assertHasDefs(fn_body[2].value, 2)
inner_fn_body = fn_body[1].body[1].body
self.assertSameDef(inner_fn_body[0].value, def_of_a_in_if)
def test_nested_functions_isolation(self):
def test_fn(a):
a = 0
def child():
a = 1
return a
child()
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
parent_return = fn_body[3]
child_return = fn_body[1].body[1]
# The assignment `a = 1` makes `a` local to `child`.
self.assertNotSameDef(parent_return.value, child_return.value)
def test_function_call_in_with(self):
def foo(_):
pass
def test_fn(a):
with foo(a):
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].items[0].context_expr.func, 0)
self.assertHasDefs(fn_body[0].items[0].context_expr.args[0], 1)
def test_mutation_subscript(self):
def test_fn(a):
l = []
l[0] = a
return l
node = self._parse_and_analyze(test_fn)
fn_body = node.body
creation = fn_body[0].targets[0]
mutation = fn_body[1].targets[0].value
use = fn_body[2].value
self.assertSameDef(creation, mutation)
self.assertSameDef(creation, use)
def test_deletion_partial(self):
def test_fn(a):
a = 0
if a:
del a
else:
a = 1
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
first_def = fn_body[0].targets[0]
second_def = fn_body[1].orelse[0].targets[0]
use = fn_body[2].value
self.assertNotSameDef(use, first_def)
self.assertSameDef(use, second_def)
def test_deletion_total(self):
def test_fn(a):
if a:
a = 0
else:
a = 1
del a
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
use = fn_body[2].value
self.assertHasDefs(use, 0)
def test_replacement(self):
def foo(a):
return a
def test_fn(a):
a = foo(a)
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
param = node.args.args[0]
source = fn_body[0].value.args[0]
target = fn_body[0].targets[0]
retval = fn_body[1].value
self.assertSameDef(param, source)
self.assertNotSameDef(source, target)
self.assertSameDef(target, retval)
def test_comprehension_leaking(self):
def test_fn(a):
all(x for x in a)
return x # pylint:disable=undefined-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
listcomp_target = fn_body[0].value.args[0].generators[0].target
retval = fn_body[1].value
# Python2 leaks comprehension symbols. Python3 doesn't.
if six.PY2:
self.assertSameDef(retval, listcomp_target)
else:
self.assertHasDefs(retval, 0)
def test_function_definition(self):
def test_fn():
def a():
pass
if a: # pylint:disable=using-constant-test
a = None
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a',))
def test_global(self):
def test_fn():
global global_a
global global_b
if global_a:
global_b = []
return global_a, global_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[2].test, 1)
self.assertHasDefs(fn_body[2].body[0].targets[0], 1)
self.assertHasDefs(fn_body[3].value.elts[0], 1)
self.assertHasDefs(fn_body[3].value.elts[1], 2)
self.assertSameDef(fn_body[2].test, fn_body[3].value.elts[0])
self.assertHasDefinedIn(fn_body[2], ('global_a', 'global_b'))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/reaching_definitions_test.py
|
# python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module, that only run in Python 3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct.static_analysis import activity_test
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.platform import test
NodeAnno = annos.NodeAnno
class ActivityAnalyzerTest(activity_test.ActivityAnalyzerTestBase):
"""Tests which can only run in Python 3."""
def test_nonlocal_symbol(self):
nonlocal_a = 3
nonlocal_b = 13
def test_fn(c):
nonlocal nonlocal_a
nonlocal nonlocal_b
nonlocal_a = nonlocal_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('nonlocal_b', 'c'), ('nonlocal_a',))
def test_annotated_assign(self):
b = int
def test_fn(c):
a: b = c
return a
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b', 'c', 'a'), ('a',))
ann_assign_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(ann_assign_scope, ('b', 'c'), ('a',))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/activity_py3_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Annotations used by the static analyzer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
# TODO(mdan): Remove.
class NoValue(Enum):
def __repr__(self):
return self.name
class NodeAnno(NoValue):
"""Additional annotations used by the static analyzer.
These are in addition to the basic annotations declared in anno.py.
"""
# Symbols
# These flags are boolean.
IS_LOCAL = 'Symbol is local to the function scope being analyzed.'
IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
IS_MODIFIED_SINCE_ENTRY = (
'Symbol has been explicitly replaced in the current function scope.')
# Scopes
# Scopes are represented by objects of type activity.Scope.
ARGS_SCOPE = 'The scope for the argument list of a function call.'
COND_SCOPE = 'The scope for the test node of a conditional statement.'
ITERATE_SCOPE = 'The scope for the iterate assignment of a for loop.'
BODY_SCOPE = (
'The scope for the main body of a statement (True branch for if '
'statements, main body for loops).')
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/annos.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reaching definition analysis.
This analysis attaches a set of a Definition objects to each symbol, one
for each distinct definition that may reach it. The Definition objects are
mutable and may be used by subsequent analyses to further annotate data like
static type and value information.
The analysis also attaches the set of the symbols defined at the entry of
control flow statements.
Requires activity analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import annos
class Definition(object):
"""Definition objects describe a unique definition of a variable.
Subclasses of this may be used by passing an appropriate factory function to
resolve.
Attributes:
param_of: Optional[ast.AST]
"""
def __init__(self):
self.param_of = None
def __repr__(self):
return '%s[%d]' % (self.__class__.__name__, id(self))
class _NodeState(object):
"""Abstraction for the state of the CFG walk for reaching definition analysis.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
their possible definitions
"""
def __init__(self, init_from=None):
if init_from:
if isinstance(init_from, _NodeState):
self.value = {
s: set(other_infos) for s, other_infos in init_from.value.items()
}
elif isinstance(init_from, dict):
self.value = {s: set((init_from[s],)) for s in init_from}
else:
assert False, init_from
else:
self.value = {}
def __eq__(self, other):
if frozenset(self.value.keys()) != frozenset(other.value.keys()):
return False
ret = all(self.value[s] == other.value[s] for s in self.value)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _NodeState)
result = _NodeState(self)
for s, other_infos in other.value.items():
if s in result.value:
result.value[s].update(other_infos)
else:
result.value[s] = set(other_infos)
return result
def __sub__(self, other):
assert isinstance(other, set)
result = _NodeState(self)
for s in other:
result.value.pop(s, None)
return result
def __repr__(self):
return 'NodeState[%s]=%s' % (id(self), repr(self.value))
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that determines reaching definitions at statement level."""
def __init__(self, graph, definition_factory):
self._definition_factory = definition_factory
super(Analyzer, self).__init__(graph)
# This allows communicating that nodes have extra reaching definitions,
# e.g. those that a function closes over.
self.extra_in = {}
self.gen_map = {}
def init_state(self, _):
return _NodeState()
def visit_node(self, node):
prev_defs_out = self.out[node]
defs_in = _NodeState(self.extra_in.get(node.ast_node, None))
for n in node.prev:
defs_in |= self.out[n]
if anno.hasanno(node.ast_node, anno.Static.SCOPE):
node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
# The definition objects created by each node must be singletons because
# their ids are used in equality checks.
if node not in self.gen_map:
node_symbols = {}
for s in node_scope.modified:
def_ = self._definition_factory()
if s in node_scope.params:
def_.param_of = weakref.ref(node_scope.params[s])
node_symbols[s] = def_
self.gen_map[node] = _NodeState(node_symbols)
gen = self.gen_map[node]
kill = node_scope.modified | node_scope.deleted
defs_out = gen | (defs_in - kill)
elif isinstance(node.ast_node, (gast.Global, gast.Nonlocal)):
# Special case for global and nonlocal: they generate a definition,
# but are not tracked by activity analysis.
if node not in self.gen_map:
node_symbols = {}
for s in node.ast_node.names:
qn = qual_names.QN(s)
if qn in defs_in.value:
# In Python 2, this is a syntax warning. In Python 3, it's an error.
raise ValueError(
'"{}" is assigned before global definition'.format(s))
def_ = self._definition_factory()
node_symbols[qn] = def_
self.gen_map[node] = _NodeState(node_symbols)
gen = self.gen_map[node]
defs_out = defs_in | gen
else:
# Nodes that don't have a scope annotation are assumed not to touch any
# symbols.
# This Name node below is a literal name, e.g. False
# This can also happen if activity.py forgot to annotate the node with a
# scope object.
assert isinstance(node.ast_node,
(gast.Name, gast.Break, gast.Continue, gast.Raise,
gast.Pass)), (node.ast_node, node)
defs_out = defs_in
self.in_[node] = defs_in
self.out[node] = defs_out
# TODO(mdan): Move this to the superclass?
return prev_defs_out != defs_out
class TreeAnnotator(transformer.Base):
"""AST visitor that annotates each symbol name with its reaching definitions.
Simultaneously, the visitor runs the dataflow analysis on each function node,
accounting for the effect of closures. For example:
def foo():
bar = 1
def baz():
# bar = 1 reaches here
"""
def __init__(self, source_info, graphs, definition_factory):
super(TreeAnnotator, self).__init__(source_info)
self.definition_factory = definition_factory
self.graphs = graphs
self.current_analyzer = None
self.current_cfg_node = None
def visit_FunctionDef(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
# Preorder tree processing:
# 1. if this is a child function, the parent was already analyzed and it
# has the proper state value for the subgraph's entry
# 2. analyze the current function body
# 2. recursively walk the subtree; child functions will be processed
analyzer = Analyzer(subgraph, self.definition_factory)
if parent_analyzer is not None:
# Wire the state between the two subgraphs' analyzers.
parent_out_state = parent_analyzer.out[parent_analyzer.graph.index[node]]
# Exception: symbols modified in the child function are local to it
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
parent_out_state -= body_scope.modified
analyzer.extra_in[node.args] = parent_out_state
# Complete the analysis for the local function and annotate its body.
analyzer.visit_forward()
# Recursively process any remaining subfunctions.
self.current_analyzer = analyzer
# Note: not visiting name, decorator_list and returns because they don't
# apply to this anlysis.
# TODO(mdan): Should we still process the function name?
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
self.current_analyzer = parent_analyzer
return node
def visit_Name(self, node):
if self.current_analyzer is None:
# Names may appear outside function defs - for example in class
# definitions.
return node
analyzer = self.current_analyzer
cfg_node = self.current_cfg_node
assert cfg_node is not None, ('name node, %s, outside of any statement?'
% node.id)
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
anno.setanno(node, anno.Static.DEFINITIONS,
tuple(analyzer.in_[cfg_node].value.get(qn, ())))
else:
anno.setanno(node, anno.Static.DEFINITIONS,
tuple(analyzer.out[cfg_node].value.get(qn, ())))
return node
def _aggregate_predecessors_defined_in(self, node):
preds = self.current_analyzer.graph.stmt_prev[node]
node_defined_in = set()
for p in preds:
node_defined_in |= set(self.current_analyzer.out[p].value.keys())
anno.setanno(node, anno.Static.DEFINED_VARS_IN, frozenset(node_defined_in))
def visit_If(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_For(self, node):
self._aggregate_predecessors_defined_in(node)
# Manually accounting for the shortcoming described in
# cfg.AstToCfg.visit_For.
parent = self.current_cfg_node
self.current_cfg_node = self.current_analyzer.graph.index[node.iter]
node.target = self.visit(node.target)
self.current_cfg_node = parent
node.iter = self.visit(node.iter)
node.body = self.visit_block(node.body)
node.orelse = self.visit_block(node.orelse)
return node
def visit_While(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_Try(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_ExceptHandler(self, node):
self._aggregate_predecessors_defined_in(node)
# TODO(mdan): Also track the exception type / name symbols.
node.body = self.visit_block(node.body)
return node
def visit(self, node):
parent = self.current_cfg_node
if (self.current_analyzer is not None and
node in self.current_analyzer.graph.index):
self.current_cfg_node = self.current_analyzer.graph.index[node]
node = super(TreeAnnotator, self).visit(node)
self.current_cfg_node = parent
return node
def resolve(node, source_info, graphs, definition_factory):
"""Resolves reaching definitions for each symbol.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
definition_factory: Callable[[], Definition]
Returns:
ast.AST
"""
visitor = TreeAnnotator(source_info, graphs, definition_factory)
node = visitor.visit(node)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conversion to A-normal form.
The general idea of A-normal form is that every intermediate value is
explicitly named with a variable. For more, see
https://en.wikipedia.org/wiki/A-normal_form.
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
import six
from tensorflow.python.autograph.pyct import gast_util
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
class DummyGensym(object):
"""A dumb gensym that suffixes a stem by sequential numbers from 1000."""
def __init__(self, ctx):
del ctx
# A proper implementation needs to account for:
# * ctx.info.namespace
# * all the symbols defined in the AST
# * the symbols generated so far
self._idx = 0
def new_name(self, stem='tmp'):
self._idx += 1
return stem + '_' + str(1000 + self._idx)
REPLACE = lambda _1, _2, _3: True
LEAVE = lambda _1, _2, _3: False
ANY = object()
class ASTEdgePattern(collections.namedtuple(
'ASTEdgePattern', ['parent', 'field', 'child'])):
"""A pattern defining a type of AST edge.
This consists of three components:
- The type of the parent node, checked with isinstance,
- The name of the field, checked with string equality, and
- The type of the child node, also checked with isinstance.
If all three match, the whole pattern is considered to match.
In all three slots, the special value `anf.ANY` is treated as "match
anything". The internal nodes are produced from the `gast` library rather
than the standard `ast` module, which may affect `isinstance` checks.
"""
__slots__ = ()
def matches(self, parent, field, child):
"""Computes whether this pattern matches the given edge."""
if self.parent is ANY or isinstance(parent, self.parent):
pass # OK
else:
return False
if self.field is ANY or field == self.field:
pass # OK
else:
return False
return self.child is ANY or isinstance(child, self.child)
class AnfTransformer(transformer.Base):
"""Performs the conversion to A-normal form (ANF)."""
# The algorithm is a postorder recursive tree walk. Any given node A may, in
# general, require creation of a series B of Assign statements, which compute
# and explicitly name the intermediate values needed to compute the value of
# A. If A was already a statement, it can be replaced with the sequence B +
# [A]. If A was an expression, B needs to be propagated up the tree until a
# statement is encountered. Since the `ast.NodeTransformer` framework makes
# no provision for subtraversals returning side information, this class
# accumulates the sequence B in an instance variable.
# The only other subtlety is that some Python statements (like `if`) have both
# expression fields (`test`) and statement list fields (`body` and `orelse`).
# Any additional assignments needed to name all the intermediate values in the
# `test` can be prepended to the `if` node, but assignments produced by
# processing the `body` and the `orelse` need to be kept together with them,
# and not accidentally lifted out of the `if`.
def __init__(self, ctx, config, gensym_source=None):
"""Creates an ANF transformer.
Args:
ctx: transformer.Context
config: Configuration
gensym_source: An optional object with the same interface as `DummyGensym`
for generating unique names
"""
super(AnfTransformer, self).__init__(ctx)
if config is None:
# These could be pulled out, but are generally considered to already be in
# A-normal form. Thus they are left in by default, but could be pulled
# out if the configuration calls for it.
if gast_util.GAST2:
literal_node_types = (
gast.Num, gast.Str, gast.Bytes, gast.NameConstant,
gast.Name # Name is here to cover True, False, and None in Python 2
)
elif gast_util.GAST3:
literal_node_types = (
gast.Constant,
gast.Name # Name is here to cover True, False, and None in Python 2
)
else:
assert False
self._overrides = [
(ASTEdgePattern(ANY, ANY, literal_node_types), LEAVE),
(ASTEdgePattern(ANY, ANY, gast.expr), REPLACE)]
else:
self._overrides = config
if gensym_source is None:
self._gensym = DummyGensym(ctx)
else:
self._gensym = gensym_source(ctx)
self._pending_statements = []
def _consume_pending_statements(self):
ans = self._pending_statements
self._pending_statements = []
return ans
def _add_pending_statement(self, stmt):
self._pending_statements.append(stmt)
def _match(self, pattern, parent, field, child):
if pattern is ANY:
return True
else:
return pattern.matches(parent, field, child)
def _should_transform(self, parent, field, child):
for pat, result in self._overrides:
if self._match(pat, parent, field, child):
return result(parent, field, child)
# Fell off the end of the pattern list: do not transform
return False
def _do_transform_node(self, node):
temp_name = self._gensym.new_name()
temp_assign = templates.replace(
'temp_name = expr', temp_name=temp_name, expr=node)[0]
self._add_pending_statement(temp_assign)
answer = templates.replace('temp_name', temp_name=temp_name)[0]
return answer
def _ensure_node_in_anf(self, parent, field, node):
"""Puts `node` in A-normal form, by replacing it with a variable if needed.
The exact definition of A-normal form is given by the configuration. The
parent and the incoming field name are only needed because the configuration
may be context-dependent.
Args:
parent: An AST node, the parent of `node`.
field: The field name under which `node` is the child of `parent`.
node: An AST node, potentially to be replaced with a variable reference.
Returns:
node: An AST node; the argument if transformation was not necessary,
or the new variable reference if it was.
"""
if node is None:
return node
if _is_trivial(node):
return node
if isinstance(node, list):
# If something's field was actually a list, e.g., variadic arguments.
return [self._ensure_node_in_anf(parent, field, n) for n in node]
if isinstance(node, gast.keyword):
node.value = self._ensure_node_in_anf(parent, field, node.value)
return node
if isinstance(node, (gast.Starred, gast.withitem, gast.slice)):
# These nodes aren't really extractable in their own right, but their
# subnodes might be. Propagate the parent and field name to the child
# nodes, instead of querying the configuration for children of, e.g.,
# gast.Starred.
return self._ensure_fields_in_anf(node, parent, field)
if self._should_transform(parent, field, node):
return self._do_transform_node(node)
else:
return node
def _ensure_fields_in_anf(self, node, parent=None, super_field=None):
for field in node._fields:
if field.startswith('__'):
continue
parent_supplied = node if parent is None else parent
field_supplied = field if super_field is None else super_field
setattr(node, field, self._ensure_node_in_anf(
parent_supplied, field_supplied, getattr(node, field)))
return node
def _visit_strict_statement(self, node, children_ok_to_transform=True):
assert not self._pending_statements
node = self.generic_visit(node)
if children_ok_to_transform:
self._ensure_fields_in_anf(node)
results = self._consume_pending_statements()
results.append(node)
return results
def _visit_trivial_only_statement(self, node, msg):
assert not self._pending_statements
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
if self._pending_statements:
raise ValueError(msg)
else:
return node
def _visit_strict_expression(self, node):
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
return node
def _visit_trivial_only_expression(self, node, msg):
k = len(self._pending_statements)
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
# This check relies on there being no opportunities to consume pending
# statements while traversing children of an expression.
if len(self._pending_statements) != k:
raise ValueError(msg)
else:
return node
# Note on code order: These are listed in the same order as the grammar
# elements on https://github.com/serge-sans-paille/gast
# FunctionDef, AsyncFunctionDef, and ClassDef should be correct by default.
def visit_Return(self, node):
return self._visit_strict_statement(node)
def visit_Delete(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_Assign(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_AugAssign(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_Print(self, node):
return self._visit_strict_statement(node)
def visit_For(self, node):
assert not self._pending_statements
# It's important to visit node.iter first, because any statements created
# thereby need to live outside the body.
self.visit(node.iter)
node.iter = self._ensure_node_in_anf(node, 'iter', node.iter)
iter_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.iter, but that is correct because by
# this point the node.iter link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.iter, as
# then it may be large and will be uselessly transformed again. This
# behavior is what causes the documented effect that configuration callables
# may be invoked more than once of the same links; if the code is rewritten
# not to do that (anywhere), the docstring of `transform` should be updated.
node = self.generic_visit(node)
assert not self._pending_statements
iter_stmts.append(node)
return iter_stmts
def visit_AsyncFor(self, node):
msg = ('Nontrivial AsyncFor nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_statement(node, msg)
def visit_While(self, node):
assert not self._pending_statements
self.visit(node.test)
node.test = self._ensure_node_in_anf(node, 'test', node.test)
if self._pending_statements:
msg = ('While with nontrivial test not supported yet '
'(need to avoid precomputing the test).')
raise ValueError(msg)
# If traversing node.test yielded no statements extracted, the generic visit
# will do the right thing.
return self.generic_visit(node)
def visit_If(self, node):
assert not self._pending_statements
# It's important to visit node.test first, because any statements created
# thereby need to live outside the body.
self.visit(node.test)
node.test = self._ensure_node_in_anf(node, 'test', node.test)
condition_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.test, but that is correct because by
# this point the node.test link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.test, as
# then it may be large and will be uselessly transformed again. This
# happens in several places.
node = self.generic_visit(node)
assert not self._pending_statements
condition_stmts.append(node)
return condition_stmts
def visit_With(self, node):
assert not self._pending_statements
# It's important to visit node.items first, because any statements created
# thereby need to live outside the body.
for item in node.items:
self.visit(item)
node.items = [self._ensure_node_in_anf(node, 'items', n)
for n in node.items]
contexts_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.items, but that is correct because by
# this point the node.items link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.items, as
# then it may be large and will be uselessly transformed again. This
# happens in several places.
node = self.generic_visit(node)
assert not self._pending_statements
contexts_stmts.append(node)
return contexts_stmts
def visit_AsyncWith(self, node):
msg = ('Nontrivial AsyncWith nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_statement(node, msg)
def visit_Raise(self, node):
return self._visit_strict_statement(node)
# Try should be correct by default.
def visit_Assert(self, node):
msg = ('Nontrivial Assert nodes not supported yet '
'(need to avoid computing the test when assertions are off, and '
'avoid computing the irritant when the assertion does not fire).')
return self._visit_trivial_only_statement(node, msg)
# Import and ImportFrom should be correct by default.
def visit_Exec(self, node):
return self._visit_strict_statement(node)
# Global and Nonlocal should be correct by default.
def visit_Expr(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
# Pass, Break, and Continue should be correct by default.
def visit_BoolOp(self, node):
msg = ('Nontrivial BoolOp nodes not supported yet '
'(need to preserve short-circuiting semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_BinOp(self, node):
return self._visit_strict_expression(node)
def visit_UnaryOp(self, node):
return self._visit_strict_expression(node)
def visit_Lambda(self, node):
msg = ('Nontrivial Lambda nodes not supported '
'(cannot insert statements into lambda bodies).')
return self._visit_trivial_only_expression(node, msg)
def visit_IfExp(self, node):
msg = ('Nontrivial IfExp nodes not supported yet '
'(need to convert to If statement, to evaluate branches lazily '
'and insert statements into them).')
return self._visit_trivial_only_expression(node, msg)
def visit_Dict(self, node):
return self._visit_strict_expression(node)
def visit_Set(self, node):
return self._visit_strict_expression(node)
def visit_ListComp(self, node):
msg = ('ListComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_SetComp(self, node):
msg = ('SetComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_DictComp(self, node):
msg = ('DictComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_GeneratorExp(self, node):
msg = ('GeneratorExp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_Await(self, node):
msg = ('Nontrivial Await nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_Yield(self, node):
return self._visit_strict_expression(node)
def visit_YieldFrom(self, node):
msg = ('Nontrivial YieldFrom nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_Compare(self, node):
if len(node.ops) > 1:
msg = ('Multi-ary compare nodes not supported yet '
'(need to preserve short-circuiting semantics).')
raise ValueError(msg)
return self._visit_strict_expression(node)
def visit_Call(self, node):
return self._visit_strict_expression(node)
def visit_Repr(self, node):
msg = ('Nontrivial Repr nodes not supported yet '
'(need to research their syntax and semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_FormattedValue(self, node):
msg = ('Nontrivial FormattedValue nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_JoinedStr(self, node):
msg = ('Nontrivial JoinedStr nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_Attribute(self, node):
return self._visit_strict_expression(node)
def visit_Subscript(self, node):
return self._visit_strict_expression(node)
# Starred and Name are correct by default, because the right thing to do is to
# just recur.
def visit_List(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_in_anf(node)
return node
def visit_Tuple(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_in_anf(node)
return node
def _is_py2_name_constant(node):
return isinstance(node, gast.Name) and node.id in ['True', 'False', 'None']
def _is_trivial(node):
"""Returns whether to consider the given node 'trivial'.
The definition of 'trivial' is a node that can't meaningfully be pulled out
into its own assignment statement.
This is surprisingly difficult to do robustly across versions of Python and
gast, as the parsing of constants has changed, if I may, constantly.
Args:
node: An AST node to check for triviality
Returns:
trivial: A Python `bool` indicating whether the node is trivial.
"""
trivial_node_types = (
# Variable names
gast.Name,
# Non-nodes that show up as AST fields
bool, six.string_types,
# Binary operators
gast.Add, gast.Sub, gast.Mult, gast.Div, gast.Mod, gast.Pow,
gast.LShift, gast.RShift, gast.BitOr, gast.BitXor, gast.BitAnd,
gast.FloorDiv,
# Unary operators
gast.Invert, gast.Not, gast.UAdd, gast.USub,
# Comparison operators
gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt, gast.GtE,
gast.Is, gast.IsNot, gast.In, gast.NotIn,
# Other leaf nodes that don't make sense standalone.
gast.expr_context,
)
if isinstance(node, trivial_node_types) and not _is_py2_name_constant(node):
return True
if gast_util.is_ellipsis(node):
return True
return False
def transform(node, ctx, config=None, gensym_source=None):
"""Converts the given node to A-normal form (ANF).
The general idea of A-normal form: https://en.wikipedia.org/wiki/A-normal_form
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
What exactly should be considered A-normal form for any given programming
language is not completely obvious. The transformation defined here is
therefore configurable as to which syntax to replace with a fresh variable and
which to leave be. The configuration is intentionally flexible enough to
define very precise variable insertion transformations, should that be
desired.
The configuration is a list of syntax rules, each of which is a 2-tuple:
- An `ASTEdgePattern` (which see) defining a type of AST edge, and
- Whether to transform children of such edges.
The special object `anf.ANY` may be used as a pattern that matches all edges.
Each replacement directive is one of three possible things:
- The object `anf.REPLACE`, meaning "Replace this child node with a variable",
- The object `anf.LEAVE`, meaning "Do not replace this child node with a
variable", or
- A Python callable. If a callable, it is called with the parent node, the
field name, and the child node, and must compute a boolean indicating
whether to transform the child node or not. The callable is free to use
whatever context information it chooses. The callable may be invoked more
than once on the same link, and must produce the same answer each time.
The syntax rules are tested in order, and the first match governs. If no rule
matches, the node is not transformed.
The above rules notwithstanding,
- Variable references are never replaced with (fresh) variables, as that would
accomplish nothing.
- The left-hand children of Assign and AugAssign nodes, and the children of
Del nodes, are never replaced with variables, as that would break their
semantics.
- The right-hand children of Assign nodes are never replaced with variables,
as the original assignment would still have to be present in the result
to define the new variable. (That is, there's no point in transforming
`x = sin(y)` into `tmp = sin(y); x = tmp`.)
- The right-hand children of AugAssign nodes are never replaced with variables
either, but only because the difference from Assign was considered a
potential source of confusion (and it would have been slightly awkward in
the code to treat the RHS differently than the LHS).
- Various special-purpose AST nodes are not exposed to the configuration, lest
the transform produce invalid syntax like, e.g., `tmp = +; x = 1 tmp 2`.
For example, the configuration
```python
[(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)]
```
gives explicit fresh names to all expressions regardless of context (except as
outlined above), whereas
```python
[(anf.ASTEdgePattern(gast.If, "test", anf.ANY), anf.REPLACE)]
```
only transforms the conditionals of `if` statements (but not, e.g., `while`).
If no configuration is supplied, the default behavior is to transform all
expressions except literal constants, which is defined as a configuration as
```python
# For Python 3, and gast library versions before 0.3
literals = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant)
[(anf.ASTEdgePattern(anf.ANY, anf.ANY, literals), anf.LEAVE),
(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)]
```
Args:
node: The node to transform.
ctx: transformer.EntityInfo. TODO(mdan): What information does this
argument provide?
config: Optional ANF configuration. If omitted, ANF replaces all expression
expect literal constants.
gensym_source: An optional object with the same interface as `DummyGensym`
for generating unique names.
"""
return AnfTransformer(ctx, config, gensym_source=gensym_source).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/common_transformers/anf.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anf module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import gast
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.common_transformers import anf
from tensorflow.python.platform import test
class DummyGensym(object):
"""A dumb gensym that suffixes a stem by sequential numbers from 1000."""
def __init__(self, ctx):
del ctx
# A proper implementation needs to account for:
# * ctx.info.namespace
# * all the symbols defined in the AST
# * the symbols generated so far
self._idx = 0
def new_name(self, stem='tmp'):
self._idx += 1
return stem + '_' + str(1000 + self._idx)
# These two test functions have to be top-level, not nested, for compatibility
# with some unknown version of Python 2.7 preceding 2.7.15. Why? Because
# `exec` and nested function definitions _incomaptibly_ change the
# representation of local variables, such that `exec` inside a nested function
# definition is a syntax error in that version. The tuple form of `exec` fixes
# this problem, but apparently that was introduced in some unknown version of
# Python that's more recent than at least one version that we wish to be
# compatible with.
def exec_test_function():
# The point is to test A-normal form conversion of exec
# pylint: disable=exec-used
exec('computed' + 5 + 'stuff', globals(), locals())
def exec_expected_result():
# pylint: disable=exec-used
tmp_1001 = 'computed' + 5
tmp_1002 = tmp_1001 + 'stuff'
tmp_1003 = globals()
tmp_1004 = locals()
exec(tmp_1002, tmp_1003, tmp_1004)
class AnfTestBase(test.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
source_code=None, source_file=None, future_features=(), namespace=None)
return transformer.Context(entity_info)
def assert_same_ast(self, expected_node, node, msg=None):
expected_source = compiler.ast_to_source(expected_node, indentation=' ')
expected_str = textwrap.dedent(expected_source).strip()
got_source = compiler.ast_to_source(node, indentation=' ')
got_str = textwrap.dedent(got_source).strip()
self.assertEqual(expected_str, got_str, msg=msg)
def assert_body_anfs_as_expected(self, expected_fn, test_fn, config=None):
# Testing the code bodies only. Wrapping them in functions so the
# syntax highlights nicely, but Python doesn't try to execute the
# statements.
exp_node, _ = parser.parse_entity(expected_fn, future_features=())
node, _ = parser.parse_entity(test_fn, future_features=())
node = anf.transform(
node, self._simple_context(),
config=config, gensym_source=DummyGensym)
exp_name = exp_node.name
# Ignoring the function names in the result because they can't be
# the same (because both functions have to exist in the same scope
# at the same time).
node.name = exp_name
self.assert_same_ast(exp_node, node)
# Check that ANF is idempotent
node_repeated = anf.transform(
node, self._simple_context(), gensym_source=DummyGensym)
self.assert_same_ast(node_repeated, node)
class AnfTransformerTest(AnfTestBase):
def test_basic(self):
def test_function():
a = 0
return a
node, _ = parser.parse_entity(test_function, future_features=())
node = anf.transform(node, self._simple_context())
result, _, _ = compiler.ast_to_object(node)
self.assertEqual(test_function(), result.test_function())
def test_binop_basic(self):
def test_function(x, y, z):
a = x + y + z
return a
def expected_result(x, y, z):
tmp_1001 = x + y
a = tmp_1001 + z
return a
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_if_basic(self):
def test_function(a, b, c, e, f, g):
if a + b + c:
d = e + f + g
return d
def expected_result(a, b, c, e, f, g):
tmp_1001 = a + b
tmp_1002 = tmp_1001 + c
if tmp_1002:
tmp_1003 = e + f
d = tmp_1003 + g
return d
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_nested_binop_and_return(self):
def test_function(b, c, d, e):
return (2 * b + c) + (d + e)
def expected_result(b, c, d, e):
tmp_1001 = 2 * b
tmp_1002 = tmp_1001 + c
tmp_1003 = d + e
tmp_1004 = tmp_1002 + tmp_1003
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_function_call_and_expr(self):
def test_function(call_something, a, b, y, z, c, d, e, f, g, h, i):
call_something(a + b, y * z, kwarg=c + d, *(e + f), **(g + h + i))
def expected_result(call_something, a, b, y, z, c, d, e, f, g, h, i):
tmp_1001 = g + h
tmp_1002 = a + b
tmp_1003 = y * z
tmp_1004 = e + f
tmp_1005 = c + d
tmp_1006 = tmp_1001 + i
call_something(tmp_1002, tmp_1003, kwarg=tmp_1005, *tmp_1004, **tmp_1006)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_with_and_print(self):
def test_function(a, b, c):
with a + b + c as d:
print(2 * d + 1)
def expected_result(a, b, c):
tmp_1001 = a + b
tmp_1002 = tmp_1001 + c
with tmp_1002 as d:
tmp_1003 = 2 * d
tmp_1004 = tmp_1003 + 1
print(tmp_1004)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_nested_multi_value_assign(self):
def test_function(a, b, c):
x, y = a, a + b
(z, y), x = (c, y + b), x + a
return z, (y, x)
def expected_result(a, b, c):
tmp_1001 = a + b
x, y = a, tmp_1001
tmp_1002 = y + b
tmp_1003 = (c, tmp_1002)
tmp_1004 = x + a
(z, y), x = tmp_1003, tmp_1004
tmp_1005 = y, x
tmp_1006 = z, tmp_1005
return tmp_1006
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_deeply_nested_multi_value_assign(self):
def test_function(a):
[([(b, c), [d, e]], (f, g)), [(h, i, j), k]] = a
return [([(b, c), [d, e]], (f, g)), [(h, i, j), k]]
def expected_result(a):
[([(b, c), [d, e]], (f, g)), [(h, i, j), k]] = a
tmp_1001 = b, c
tmp_1002 = [d, e]
tmp_1003 = [tmp_1001, tmp_1002]
tmp_1004 = f, g
tmp_1005 = h, i, j
tmp_1006 = tmp_1003, tmp_1004
tmp_1007 = [tmp_1005, k]
tmp_1008 = [tmp_1006, tmp_1007]
return tmp_1008
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_local_definition_and_binary_compare(self):
def test_function():
def foo(a, b):
return 2 * a < b
return foo
def expected_result():
def foo(a, b):
tmp_1001 = 2 * a
tmp_1002 = tmp_1001 < b
return tmp_1002
return foo
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_list_literal(self):
def test_function(a, b, c, d, e, f):
return [a + b, c + d, e + f]
def expected_result(a, b, c, d, e, f):
tmp_1001 = a + b
tmp_1002 = c + d
tmp_1003 = e + f
tmp_1004 = [tmp_1001, tmp_1002, tmp_1003]
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_tuple_literal_and_unary(self):
def test_function(a, b, c, d, e, f):
return (a + b, -(c + d), e + f)
def expected_result(a, b, c, d, e, f):
tmp_1001 = c + d
tmp_1002 = a + b
tmp_1003 = -tmp_1001
tmp_1004 = e + f
tmp_1005 = (tmp_1002, tmp_1003, tmp_1004)
return tmp_1005
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_set_literal(self):
def test_function(a, b, c, d, e, f):
return set(a + b, c + d, e + f)
def expected_result(a, b, c, d, e, f):
tmp_1001 = a + b
tmp_1002 = c + d
tmp_1003 = e + f
tmp_1004 = set(tmp_1001, tmp_1002, tmp_1003)
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_dict_literal_and_repr(self):
def test_function(foo, bar, baz):
return repr({foo + bar + baz: 7 | 8})
def expected_result(foo, bar, baz):
tmp_1001 = foo + bar
tmp_1002 = tmp_1001 + baz
tmp_1003 = 7 | 8
tmp_1004 = {tmp_1002: tmp_1003}
tmp_1005 = repr(tmp_1004)
return tmp_1005
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_field_read_and_write(self):
def test_function(a, d):
a.b.c = d.e.f + 3
def expected_result(a, d):
tmp_1001 = a.b
tmp_1002 = d.e
tmp_1003 = tmp_1002.f
tmp_1001.c = tmp_1003 + 3
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_subscript_read_and_write(self):
def test_function(a, b, c, d, e, f):
a[b][c] = d[e][f] + 3
def expected_result(a, b, c, d, e, f):
tmp_1001 = a[b]
tmp_1002 = d[e]
tmp_1003 = tmp_1002[f]
tmp_1001[c] = tmp_1003 + 3
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_augassign_and_delete(self):
def test_function(a, x, y, z):
a += x + y + z
del a
del z[y][x]
def expected_result(a, x, y, z):
tmp_1001 = x + y
a += tmp_1001 + z
del a
tmp_1002 = z[y]
del tmp_1002[x]
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_raise_yield_and_raise(self):
def test_function(a, c, some_computed, exception):
yield a ** c
raise some_computed('complicated' + exception)
def expected_result(a, c, some_computed, exception):
tmp_1001 = a ** c
yield tmp_1001
tmp_1002 = 'complicated' + exception
tmp_1003 = some_computed(tmp_1002)
raise tmp_1003
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_with_and_if_with_expressions(self):
def test_function(foo, bar, function, quux, quozzle, w, x, y, z):
with foo + bar:
function(x + y)
if quux + quozzle:
function(z / w)
def expected_result(foo, bar, function, quux, quozzle, w, x, y, z):
tmp_1001 = foo + bar
with tmp_1001:
tmp_1002 = x + y
function(tmp_1002)
tmp_1003 = quux + quozzle
if tmp_1003:
tmp_1004 = z / w
function(tmp_1004)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_exec(self):
self.assert_body_anfs_as_expected(exec_expected_result, exec_test_function)
def test_simple_while_and_assert(self):
def test_function(foo, quux):
while foo:
assert quux
foo = foo + 1 * 3
def expected_result(foo, quux):
while foo:
assert quux
tmp_1001 = 1 * 3
foo = foo + tmp_1001
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_for(self):
def test_function(compute, something, complicated, foo):
for foo in compute(something + complicated):
bar = foo + 1 * 3
return bar
def expected_result(compute, something, complicated, foo):
tmp_1001 = something + complicated
tmp_1002 = compute(tmp_1001)
for foo in tmp_1002:
tmp_1003 = 1 * 3
bar = foo + tmp_1003
return bar
self.assert_body_anfs_as_expected(expected_result, test_function)
# This test collects several examples where the definition of A-normal form
# implemented by this transformer is questionable. Mostly it's here to spell
# out what the definition is in these cases.
def test_controversial(self):
def test_function(b, c, d, f):
a = c + d
a.b = c + d
a[b] = c + d
a += c + d
a, b = c
a, b = c, d
a = f(c)
a = f(c + d)
a[b + d] = f.e(c + d)
def expected_result(b, c, d, f):
a = c + d
a.b = c + d # Should be a.b = tmp? (Definitely not tmp = c + d)
a[b] = c + d # Should be a[b] = tmp? (Definitely not tmp = c + d)
a += c + d # Should be a += tmp? (Definitely not tmp = c + d)
a, b = c # Should be a = c[0], b = c[1]? Or not?
a, b = c, d # Should be a = c, b = d? Or not?
a = f(c)
tmp_1001 = c + d
a = f(tmp_1001)
tmp_1002 = b + d
tmp_1003 = f.e
tmp_1004 = c + d
a[tmp_1002] = tmp_1003(tmp_1004) # Or should be a[tmp1] = tmp2?
self.assert_body_anfs_as_expected(expected_result, test_function)
class AnfNonTransformationTest(AnfTransformerTest):
"""Test that specifying "no transformation" does nothing.
Reuses all the examples of AnfTransformerTest by overriding
`assert_body_anfs_as_expected_`.
"""
def assert_body_anfs_as_expected(self, expected_fn, test_fn):
# Testing the code bodies only. Wrapping them in functions so the
# syntax highlights nicely, but Python doesn't try to execute the
# statements.
node, _ = parser.parse_entity(test_fn, future_features=())
orig_source = compiler.ast_to_source(node, indentation=' ')
orig_str = textwrap.dedent(orig_source).strip()
config = [(anf.ANY, anf.LEAVE)] # Configuration to trasform nothing
node = anf.transform(
node, self._simple_context(),
config=config, gensym_source=DummyGensym)
new_source = compiler.ast_to_source(node, indentation=' ')
new_str = textwrap.dedent(new_source).strip()
self.assertEqual(orig_str, new_str)
class AnfConfiguredTest(AnfTestBase):
def test_constants_in_function_calls(self):
# An example specific configuration that differs from the default: Moving
# literals out of being directly passed to functions, but nothing else.
try:
# TODO(b/140808434): Fix this.
# gast pre-0.3
literals = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant, gast.Name)
except AttributeError:
# gast 0.3+
literals = (gast.Constant, gast.Name)
config = [(anf.ASTEdgePattern(gast.Call, anf.ANY, literals), anf.REPLACE)]
def test_function(x, frob):
return frob(x, x+1, 2)
def expected_result(x, frob):
tmp_1001 = 2
return frob(x, x+1, tmp_1001)
self.assert_body_anfs_as_expected(expected_result, test_function, config)
def test_anf_some_function_calls(self):
# Another example specific configuration that differs from the default:
# Moving all arguments out of some function calls but leaving others be.
whitelist = ['foo']
def transform(parent, field, child):
del field
del child
func_name = parent.func.id
return str(func_name) in whitelist
config = [(anf.ASTEdgePattern(gast.Call, anf.ANY, anf.ANY), transform)]
def test_function(x, foo, bar):
y = foo(x, x+1, 2)
return bar(y, y+1, 2)
def expected_result(x, foo, bar):
tmp_1001 = x+1
tmp_1002 = 2
y = foo(x, tmp_1001, tmp_1002)
return bar(y, y+1, 2)
self.assert_body_anfs_as_expected(expected_result, test_function, config)
def test_touching_name_constant(self):
# Checking that the nodes for `True`, `False`, and `None` can be manipulated
# by a configuration. This is non-trivial, because in Python 2 those are
# represented as `Name`, which is the same node type as variable references.
specials = (gast.Name, gast.Constant)
config = [(anf.ASTEdgePattern(gast.Call, anf.ANY, specials), anf.REPLACE)]
def test_function(f):
return f(True, False, None)
def expected_result(f):
tmp_1001 = True
tmp_1002 = False
tmp_1003 = None
return f(tmp_1001, tmp_1002, tmp_1003)
self.assert_body_anfs_as_expected(expected_result, test_function, config)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/common_transformers/anf_test.py
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/common_transformers/__init__.py
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for type_info module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct.testing import codegen
from tensorflow.python.platform import test
class CodeGenTest(test.TestCase):
def test_codegen_gens(self):
np.random.seed(0)
for _ in range(1000):
node = codegen.generate_random_functiondef()
fn = compiler.ast_to_object(node)
self.assertIsNotNone(
fn, 'Generated invalid AST that could not convert to source.')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/testing/codegen_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with basic entity definitions for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import with_statement # An extra future import for testing.
def simple_function(x):
"""Docstring."""
return x # comment
def nested_functions(x):
"""Docstring."""
def inner_fn(y):
return y
return inner_fn(x)
def function_with_print():
print('foo')
simple_lambda = lambda: None
class SimpleClass(object):
def simple_method(self):
return self
def method_with_print(self):
print('foo')
def function_with_multiline_call(x):
"""Docstring."""
return range(
x,
x + 1,
)
def basic_decorator(f):
return f
@basic_decorator
@basic_decorator
def decorated_function(x):
if x > 0:
return 1
return 2
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/testing/basic_definitions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random code generation for testing/fuzzing."""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import string
import gast
import numpy as np
from tensorflow.python.autograph.pyct import templates
class NodeSampler(object):
sample_map = None
def sample(self):
nodes, magnitudes = zip(*self.sample_map.items())
return np.random.choice(
nodes, p=np.array(magnitudes, dtype='float32') / np.sum(magnitudes))
class StatementSampler(NodeSampler):
sample_map = dict((
(gast.Assign, 10),
(gast.Print, 1),
(gast.If, 2),
(gast.While, 2),
(gast.For, 0),
))
class ExpressionSampler(NodeSampler):
sample_map = dict((
(gast.UnaryOp, 1),
(gast.BinOp, 8),
(gast.Name, 1),
(gast.Call, 0),
))
class CompareSampler(NodeSampler):
sample_map = dict((
(gast.Eq, 1),
(gast.NotEq, 1),
(gast.Lt, 1),
(gast.LtE, 1),
(gast.Gt, 1),
(gast.GtE, 1),
(gast.Is, 1),
(gast.IsNot, 1),
))
class BinaryOpSampler(NodeSampler):
sample_map = dict((
(gast.Add, 1),
(gast.Sub, 1),
(gast.Mult, 1),
(gast.Div, 1),
(gast.FloorDiv, 1),
(gast.Mod, 1),
(gast.Pow, 1),
))
class UnaryOpSampler(NodeSampler):
sample_map = dict(((gast.USub, 1), (gast.UAdd, 0)))
class NameSampler(NodeSampler):
sample_map = dict((
('new', 1),
('existing', 1),
))
N_CONTROLFLOW_STATEMENTS = 10
N_FUNCTIONDEF_STATEMENTS = 10
class CodeGenerator(object):
"""Generate random syntactically-valid Python ASTs."""
def __init__(self, max_depth=3, depth=0):
self.max_depth = max_depth
self.depth = depth
def generate_statement(self):
"""Generate a statement node, dispatching to the correct class method."""
desired_node = StatementSampler().sample()
self.depth += 1
# Enforce some constraints on generating statements.
# E.g., if statements need at least 3 readable variables.
# If we fail to satisfy our constraints, draw another sample.
if desired_node in (gast.While, gast.For, gast.If):
if self.depth > self.max_depth:
return self.generate_statement()
# Go get the generator method and run it
method = 'generate_' + desired_node.__name__
visitor = getattr(self, method)
node = visitor()
self.depth -= 1
return node
def sample_node_list(self, low, high, generator):
"""Generate a list of statements of random length.
Args:
low: Fewest number of statements to generate.
high: Highest number of statements to generate.
generator: Function to call to generate nodes.
Returns:
A list of statements.
"""
statements = []
for _ in range(np.random.randint(low, high)):
statements.append(generator())
return statements
def generate_Name(self, ctx=gast.Load()):
variable_name = '_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(4))
return gast.Name(variable_name, ctx=ctx, annotation=None)
def generate_BinOp(self):
# TODO(alexbw): convert to generate_expression when we get to limit
# expression depth.
op = BinaryOpSampler().sample()()
return gast.BinOp(self.generate_Name(), op, self.generate_Name())
def generate_Compare(self):
op = CompareSampler().sample()()
return gast.Compare(self.generate_Name(), [op], [self.generate_Name()])
def generate_UnaryOp(self):
operand = self.generate_Name()
op = UnaryOpSampler().sample()()
return gast.UnaryOp(op, operand)
def generate_expression(self):
desired_node = ExpressionSampler().sample()
# Go get the generator method and run it
method = 'generate_' + desired_node.__name__
generator = getattr(self, method)
return generator()
def generate_Assign(self):
"""Generate an Assign node."""
# Generate left-hand side
target_node = self.generate_Name(gast.Store())
# Generate right-hand side
value_node = self.generate_expression()
# Put it all together
node = gast.Assign(targets=[target_node], value=value_node)
return node
def generate_If(self):
"""Generate an If node."""
test = self.generate_Compare()
# Generate true branch statements
body = self.sample_node_list(
low=1,
high=N_CONTROLFLOW_STATEMENTS // 2,
generator=self.generate_statement)
# Generate false branch statements
orelse = self.sample_node_list(
low=1,
high=N_CONTROLFLOW_STATEMENTS // 2,
generator=self.generate_statement)
node = gast.If(test, body, orelse)
return node
def generate_While(self):
"""Generate a While node."""
test = self.generate_Compare()
body = self.sample_node_list(
low=1, high=N_CONTROLFLOW_STATEMENTS, generator=self.generate_statement)
orelse = [] # not generating else statements
node = gast.While(test, body, orelse)
return node
def generate_Call(self):
raise NotImplementedError
def generate_Return(self):
return gast.Return(self.generate_expression())
def generate_Print(self):
return templates.replace('print(x)', x=self.generate_expression())[0]
def generate_FunctionDef(self):
"""Generate a FunctionDef node."""
# Generate the arguments, register them as available
arg_vars = self.sample_node_list(
low=2, high=10, generator=lambda: self.generate_Name(gast.Param()))
args = gast.arguments(arg_vars, None, [], [], None, [])
# Generate the function body
body = self.sample_node_list(
low=1, high=N_FUNCTIONDEF_STATEMENTS, generator=self.generate_statement)
body.append(self.generate_Return())
fn_name = self.generate_Name().id
node = gast.FunctionDef(fn_name, args, body, (), None)
return node
def generate_random_functiondef():
return CodeGenerator().generate_FunctionDef()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/testing/codegen.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with test decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
def wrapping_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def standalone_decorator(f):
def standalone_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return standalone_wrapper
def functional_decorator():
def decorator(f):
def functional_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return functional_wrapper
return decorator
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/pyct/testing/decorators.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import metrics
from tensorflow.python.platform import test
class KerasFunctionalMetricsTest(test.TestCase):
def test_metrics(self):
with self.cached_session():
y_a = K.variable(np.random.random((6, 7)))
y_b = K.variable(np.random.random((6, 7)))
for metric in [metrics.binary_accuracy, metrics.categorical_accuracy]:
output = metric(y_a, y_b)
self.assertEqual(K.eval(output).shape, (6,))
def test_sparse_categorical_accuracy_int(self):
with self.cached_session():
metric = metrics.sparse_categorical_accuracy
y_true = K.variable(np.random.randint(0, 7, (6,)))
y_pred = K.variable(np.random.random((6, 7)))
self.assertEqual(K.eval(metric(y_true, y_pred)).shape, (6,))
# Test correctness if the shape of y_true is (num_samples,)
y_true = K.variable([1., 0., 0., 0.])
y_pred = K.variable([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]])
print(K.eval(metric(y_true, y_pred)))
self.assertAllEqual(K.eval(metric(y_true, y_pred)), [0., 1., 1., 1.])
# Test correctness if the shape of y_true is (num_samples, 1)
y_true = K.variable([[1.], [0.], [0.], [0.]])
y_pred = K.variable([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]])
print(K.eval(metric(y_true, y_pred)))
self.assertAllEqual(K.eval(metric(y_true, y_pred)), [0., 1., 1., 1.])
def test_sparse_categorical_accuracy_float(self):
with self.cached_session():
metric = metrics.sparse_categorical_accuracy
y_true = K.variable(np.random.random((6,)))
y_pred = K.variable(np.random.random((6, 7)))
self.assertEqual(K.eval(metric(y_true, y_pred)).shape, (6,))
def test_sparse_categorical_accuracy_eager(self):
"""Tests that ints passed in via Eager return results. See b/113504761."""
with context.eager_mode():
metric = metrics.sparse_categorical_accuracy
y_true = np.arange(6).reshape([6, 1])
y_pred = np.arange(36).reshape([6, 6])
self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.])
def test_sparse_categorical_accuracy_float_eager(self):
"""Tests that floats passed in via Eager return results. See b/113504761."""
with context.eager_mode():
metric = metrics.sparse_categorical_accuracy
y_true = np.arange(6, dtype=np.float32).reshape([6, 1])
y_pred = np.arange(36).reshape([6, 6])
self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.])
def test_sparse_top_k_categorical_accuracy(self):
with self.cached_session():
# Test correctness if the shape of y_true is (num_samples, 1)
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([[1], [0]]))
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(np.mean(result), 1)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(np.mean(result), 0.5)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(np.mean(result), 0.)
# Test correctness if the shape of y_true is (num_samples,)
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([1, 0]))
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(np.mean(result), 1)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(np.mean(result), 0.5)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(np.mean(result), 0.)
def test_top_k_categorical_accuracy(self):
with self.cached_session():
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(np.mean(result), 1)
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(np.mean(result), 0.5)
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(np.mean(result), 0.)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/metrics_functional_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops as nn
from tensorflow.python.platform import test
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
@test_util.run_all_in_graph_and_eager_modes
class KerasActivationsTest(test.TestCase):
def test_serialization(self):
all_activations = ['softmax', 'relu', 'elu', 'tanh',
'sigmoid', 'hard_sigmoid', 'linear',
'softplus', 'softsign', 'selu']
for name in all_activations:
fn = keras.activations.get(name)
ref_fn = getattr(keras.activations, name)
assert fn == ref_fn
config = keras.activations.serialize(fn)
fn = keras.activations.deserialize(config)
assert fn == ref_fn
def test_serialization_v2(self):
activation_map = {nn.softmax_v2: 'softmax'}
for fn_v2_key in activation_map:
fn_v2 = keras.activations.get(fn_v2_key)
config = keras.activations.serialize(fn_v2)
fn = keras.activations.deserialize(config)
assert fn.__name__ == activation_map[fn_v2_key]
def test_serialization_with_layers(self):
activation = keras.layers.LeakyReLU(alpha=0.1)
layer = keras.layers.Dense(3, activation=activation)
config = keras.layers.serialize(layer)
deserialized_layer = keras.layers.deserialize(
config, custom_objects={'LeakyReLU': activation})
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
def test_softmax(self):
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.softmax(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = _ref_softmax(test_values[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
with self.assertRaises(ValueError):
x = keras.backend.placeholder(ndim=1)
keras.activations.softmax(x)
def test_temporal_softmax(self):
x = keras.backend.placeholder(shape=(2, 2, 3))
f = keras.backend.function([x], [keras.activations.softmax(x)])
test_values = np.random.random((2, 2, 3)) * 10
result = f([test_values])[0]
expected = _ref_softmax(test_values[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.selu(x)])
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=keras.backend.floatx())
result = f([positive_values])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=keras.backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.softplus(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softplus(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softsign(self):
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.softsign(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softsign(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sigmoid(self):
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_sigmoid(self):
def ref_hard_sigmoid(x):
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.hard_sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_relu(self):
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.relu(x)])
positive_values = np.random.random((2, 5))
result = f([positive_values])[0]
self.assertAllClose(result, positive_values, rtol=1e-05)
negative_values = np.random.uniform(-1, 0, (2, 5))
result = f([negative_values])[0]
expected = np.zeros((2, 5))
self.assertAllClose(result, expected, rtol=1e-05)
def test_elu(self):
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.elu(x, 0.5)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
self.assertAllClose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=keras.backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
self.assertAllClose(result, true_result)
def test_tanh(self):
test_values = np.random.random((2, 5))
x = keras.backend.placeholder(ndim=2)
exp = keras.activations.tanh(x)
f = keras.backend.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_exponential(self):
test_values = np.random.random((2, 5))
x = keras.backend.placeholder(ndim=2)
exp = keras.activations.exponential(x)
f = keras.backend.function([x], [exp])
result = f([test_values])[0]
expected = np.exp(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_linear(self):
x = np.random.random((10, 5))
self.assertAllClose(x, keras.activations.linear(x))
def test_invalid_usage(self):
with self.assertRaises(ValueError):
keras.activations.get('unknown')
# The following should be possible but should raise a warning:
keras.activations.get(keras.layers.LeakyReLU())
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/activations_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras models for use in Model subclassing tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
# pylint: disable=missing-docstring,not-callable
class SimpleTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, x):
x = self.dense1(x)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
class SimpleConvTestModel(keras.Model):
def __init__(self, num_classes=10):
super(SimpleConvTestModel, self).__init__(name='test_model')
self.num_classes = num_classes
self.conv1 = keras.layers.Conv2D(32, (3, 3), activation='relu')
self.flatten = keras.layers.Flatten()
self.dense1 = keras.layers.Dense(num_classes, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
return self.dense1(x)
class MultiIOTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)):
super(MultiIOTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax')
self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax')
if use_dp:
self.dp = keras.layers.Dropout(0.5)
if use_bn:
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x1, x2 = inputs
x1 = self.dense1(x1)
x2 = self.dense1(x2)
if self.use_dp:
x1 = self.dp(x1)
if self.use_bn:
x2 = self.bn(x2)
return [self.dense2(x1), self.dense3(x2)]
class NestedTestModel1(keras.Model):
"""A model subclass nested inside a model subclass.
"""
def __init__(self, num_classes=2):
super(NestedTestModel1, self).__init__(name='nested_model_1')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.test_net = SimpleTestModel(num_classes=4,
use_bn=True,
use_dp=True)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
class NestedTestModel2(keras.Model):
"""A model subclass with a functional-API graph network inside.
"""
def __init__(self, num_classes=2):
super(NestedTestModel2, self).__init__(name='nested_model_2')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = self.bn = keras.layers.BatchNormalization()
self.test_net = self.get_functional_graph_model(32, 4)
@staticmethod
def get_functional_graph_model(input_dim, num_classes):
# A simple functional-API model (a.k.a. graph network)
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_nested_model_3(input_dim, num_classes):
# A functional-API model with a subclassed model inside.
# NOTE: this requires the inner subclass to implement `compute_output_shape`.
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(5, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
test_model = Inner()
x = test_model(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs, name='nested_model_3')
class CustomCallModel(keras.Model):
def __init__(self):
super(CustomCallModel, self).__init__()
self.dense1 = keras.layers.Dense(1, activation='relu')
self.dense2 = keras.layers.Dense(1, activation='softmax')
def call(self, first, second, fiddle_with_output='no', training=True):
combined = self.dense1(first) + self.dense2(second)
if fiddle_with_output == 'yes':
return 10. * combined
else:
return combined
class TrainingNoDefaultModel(keras.Model):
def __init__(self):
super(TrainingNoDefaultModel, self).__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training):
return self.dense1(x)
class TrainingMaskingModel(keras.Model):
def __init__(self):
super(TrainingMaskingModel, self).__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training=False, mask=None):
return self.dense1(x)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/model_subclassing_test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Built-in metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import types
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import categorical_hinge
from tensorflow.python.keras.losses import cosine_similarity
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_variable
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import util as tf_losses_utils
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.metrics.Metric')
@six.add_metaclass(abc.ABCMeta)
class Metric(Layer):
"""Encapsulates metric logic and state.
Usage:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result().numpy())
```
Usage with tf.keras API:
```python
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.compat.v1.train.RMSPropOptimizer(0.01),
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.CategoricalAccuracy()])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
model.fit(dataset, epochs=10, steps_per_epoch=30)
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_weight()` like: `self.var = self.add_weight(...)`
* `update_state()`: Has all updates to the state variables like:
self.var.assign_add(...).
* `result()`: Computes and returns a value for the metric
from the state variables.
Example subclass implementation:
```
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_weights(sample_weight, values)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
```
"""
def __init__(self, name=None, dtype=None, **kwargs):
super(Metric, self).__init__(name=name, dtype=dtype, **kwargs)
self.stateful = True # All metric layers are stateful.
self.built = True
if not base_layer_utils.v2_dtype_behavior_enabled():
# We only do this when the V2 behavior is not enabled, as when it is
# enabled, the dtype already defaults to floatx.
self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(dtype).name
def __new__(cls, *args, **kwargs):
obj = super(Metric, cls).__new__(cls)
# TODO(psv): We are excluding wrapping `update_state` of built-in metrics
# with function here because of b/121302287. With this, built-in metrics
# will continue to work with TPUs and custom metrics will not, however
# users writing custom metrics need not worry about control dependencies
# and returning ops.
if cls.__module__ == Metric.__module__:
update_state_fn = obj.update_state
else:
update_state_fn = def_function.function(obj.update_state)
obj.update_state = types.MethodType(
metrics_utils.update_state_wrapper(update_state_fn), obj)
obj.result = types.MethodType(metrics_utils.result_wrapper(obj.result), obj)
return obj
def __call__(self, *args, **kwargs):
"""Accumulates statistics and then computes metric result value.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric,
passed on to `update_state()`.
Returns:
The metric value tensor.
"""
def replica_local_fn(*args, **kwargs):
"""Updates the state of the metric in a replica-local context."""
update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
with ops.control_dependencies([update_op]):
result_t = self.result() # pylint: disable=not-callable
# We are adding the metric object as metadata on the result tensor.
# This is required when we want to use a metric with `add_metric` API on
# a Model/Layer in graph mode. This metric instance will later be used
# to reset variable state after each epoch of training.
# Example:
# model = Model()
# mean = Mean()
# model.add_metric(mean(values), name='mean')
result_t._metric_obj = self # pylint: disable=protected-access
return result_t
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
return distributed_training_utils.call_replica_local_fn(
replica_local_fn, *args, **kwargs)
@property
def dtype(self):
return self._dtype
def get_config(self):
"""Returns the serializable config of the metric."""
return {'name': self.name, 'dtype': self.dtype}
def reset_states(self):
"""Resets all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
K.batch_set_value([(v, 0) for v in self.variables])
@abc.abstractmethod
def update_state(self, *args, **kwargs):
"""Accumulates statistics for the metric.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Please use `tf.config.experimental_run_functions_eagerly(True)` to execute
this function eagerly for debugging or profiling.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def result(self):
"""Computes and returns the metric value tensor.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
"""
raise NotImplementedError('Must be implemented in subclasses.')
### For use by subclasses ###
@doc_controls.for_subclass_implementers
def add_weight(self,
name,
shape=(),
aggregation=tf_variables.VariableAggregation.SUM,
synchronization=tf_variables.VariableSynchronization.ON_READ,
initializer=None,
dtype=None):
"""Adds state variable. Only for use by subclasses."""
from tensorflow.python.distribute import distribution_strategy_context as ds_context # pylint:disable=g-import-not-at-top
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
if ds_context.has_strategy():
strategy = ds_context.get_strategy()
else:
strategy = None
# TODO(b/120571621): Make `ON_READ` work with Keras metrics on TPU.
if distributed_training_utils.is_tpu_strategy(strategy):
synchronization = tf_variables.VariableSynchronization.ON_WRITE
return super(Metric, self).add_weight(
name=name,
shape=shape,
dtype=self._dtype if dtype is None else dtype,
trainable=False,
initializer=initializer,
collections=[],
synchronization=synchronization,
aggregation=aggregation)
### End: For use by subclasses ###
class Reduce(Metric):
"""Encapsulates metrics that perform a reduce operation on the values."""
def __init__(self, reduction, name, dtype=None):
"""Creates a `Reduce` instance.
Args:
reduction: a `tf.keras.metrics.Reduction` enum value.
name: string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Reduce, self).__init__(name=name, dtype=dtype)
self.reduction = reduction
with ops.init_scope():
self.total = self.add_weight(
'total', initializer=init_ops.zeros_initializer)
if reduction in [metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
metrics_utils.Reduction.WEIGHTED_MEAN]:
self.count = self.add_weight(
'count', initializer=init_ops.zeros_initializer)
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the reduction metric.
For example, if `values` is [1, 3, 5, 7] and reduction=SUM_OVER_BATCH_SIZE,
then the value of `result()` is 4. If the `sample_weight` is specified as
[1, 1, 0, 0] then value of `result()` would be 2.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
[values], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[values], sample_weight)
values = math_ops.cast(values, self._dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = tf_losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
if self.reduction == metrics_utils.Reduction.SUM:
values = math_ops.reduce_sum(
values, axis=list(range(weight_ndim, ndim)))
else:
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
values = math_ops.multiply(values, sample_weight)
value_sum = math_ops.reduce_sum(values)
with ops.control_dependencies([value_sum]):
update_total_op = self.total.assign_add(value_sum)
# Exit early if the reduction doesn't have a denominator.
if self.reduction == metrics_utils.Reduction.SUM:
return update_total_op
# Update `count` for reductions that require a denominator.
if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN:
if sample_weight is None:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
else:
num_values = math_ops.reduce_sum(sample_weight)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
with ops.control_dependencies([update_total_op]):
return self.count.assign_add(num_values)
def result(self):
if self.reduction == metrics_utils.Reduction.SUM:
return array_ops.identity(self.total)
elif self.reduction in [
metrics_utils.Reduction.WEIGHTED_MEAN,
metrics_utils.Reduction.SUM_OVER_BATCH_SIZE
]:
return math_ops.div_no_nan(self.total, self.count)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
@keras_export('keras.metrics.Sum')
class Sum(Reduce):
"""Computes the (weighted) sum of the given values.
For example, if values is [1, 3, 5, 7] then the sum is 16.
If the weights were specified as [1, 1, 0, 0] then the sum would be 4.
This metric creates one variable, `total`, that is used to compute the sum of
`values`. This is ultimately returned as `sum`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
Usage:
```python
m = tf.keras.metrics.Sum()
m.update_state([1, 3, 5, 7])
print('Final result: ', m.result().numpy()) # Final result: 16.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs))
model.compile('sgd', loss='mse')
```
"""
def __init__(self, name='sum', dtype=None):
"""Creates a `Sum` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Sum, self).__init__(reduction=metrics_utils.Reduction.SUM,
name=name, dtype=dtype)
@keras_export('keras.metrics.Mean')
class Mean(Reduce):
"""Computes the (weighted) mean of the given values.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as `mean`
which is an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.Mean()
m.update_state([1, 3, 5, 7])
print('Final result: ', m.result().numpy()) # Final result: 4.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))
model.compile('sgd', loss='mse')
```
"""
def __init__(self, name='mean', dtype=None):
"""Creates a `Mean` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Mean, self).__init__(
reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype)
@keras_export('keras.metrics.MeanRelativeError')
class MeanRelativeError(Mean):
"""Computes the mean relative error by normalizing with the given values.
This metric creates two local variables, `total` and `count` that are used to
compute the mean relative absolute error. This average is weighted by
`sample_weight`, and it is ultimately returned as `mean_relative_error`:
an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
m.update_state([1, 3, 2, 3], [2, 4, 6, 8])
# metric = mean(|y_pred - y_true| / normalizer)
# = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3])
# = 5/4 = 1.25
print('Final result: ', m.result().numpy()) # Final result: 1.25
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
```
"""
def __init__(self, normalizer, name=None, dtype=None):
"""Creates a `MeanRelativeError` instance.
Args:
normalizer: The normalizer values with same shape as predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanRelativeError, self).__init__(name=name, dtype=dtype)
normalizer = math_ops.cast(normalizer, self._dtype)
self.normalizer = normalizer
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_pred, y_true], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true], sample_weight)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
y_pred, self.normalizer = confusion_matrix.remove_squeezable_dimensions(
y_pred, self.normalizer)
y_pred.shape.assert_is_compatible_with(y_true.shape)
relative_errors = math_ops.div_no_nan(
math_ops.abs(y_true - y_pred), self.normalizer)
return super(MeanRelativeError, self).update_state(
relative_errors, sample_weight=sample_weight)
def get_config(self):
n = self.normalizer
config = {'normalizer': K.eval(n) if is_tensor_or_variable(n) else n}
base_config = super(MeanRelativeError, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MeanMetricWrapper(Mean):
"""Wraps a stateless metric function with the Mean metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `MeanMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature
`fn(y_true, y_pred, **kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be
a `Tensor` whose rank is either 0, or the same rank as `y_true`,
and must be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_true, y_pred], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_true, y_pred], sample_weight)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
matches = self._fn(y_true, y_pred, **self._fn_kwargs)
return super(MeanMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.Accuracy')
class Accuracy(MeanMetricWrapper):
"""Calculates how often predictions matches labels.
For example, if `y_true` is [1, 2, 3, 4] and `y_pred` is [0, 2, 3, 4]
then the accuracy is 3/4 or .75. If the weights were specified as
[1, 1, 0, 0] then the accuracy would be 1/2 or .5.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.Accuracy()
m.update_state([1, 2, 3, 4], [0, 2, 3, 4])
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Accuracy()])
```
"""
def __init__(self, name='accuracy', dtype=None):
super(Accuracy, self).__init__(accuracy, name, dtype=dtype)
@keras_export('keras.metrics.BinaryAccuracy')
class BinaryAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches labels.
For example, if `y_true` is [1, 1, 0, 0] and `y_pred` is [0.98, 1, 0, 0.6]
then the binary accuracy is 3/4 or .75. If the weights were specified as
[1, 0, 0, 1] then the binary accuracy would be 1/2 or .5.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.BinaryAccuracy()
m.update_state([1, 1, 0, 0], [0.98, 1, 0, 0.6])
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.BinaryAccuracy()])
```
"""
def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5):
"""Creates a `BinaryAccuracy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
"""
super(BinaryAccuracy, self).__init__(
binary_accuracy, name, dtype=dtype, threshold=threshold)
@keras_export('keras.metrics.CategoricalAccuracy')
class CategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches labels.
For example, if `y_true` is [[0, 0, 1], [0, 1, 0]] and `y_pred` is
[[0.1, 0.9, 0.8], [0.05, 0.95, 0]] then the categorical accuracy is 1/2 or .5.
If the weights were specified as [0.7, 0.3] then the categorical accuracy
would be .3. You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `categorical accuracy`: an idempotent operation that
simply divides `total` by `count`.
`y_pred` and `y_true` should be passed in as vectors of probabilities, rather
than as labels. If necessary, use `tf.one_hot` to expand `y_true` as a vector.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.CategoricalAccuracy()
m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
print('Final result: ', m.result().numpy()) # Final result: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalAccuracy()])
```
"""
def __init__(self, name='categorical_accuracy', dtype=None):
"""Creates a `CategoricalAccuracy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(CategoricalAccuracy, self).__init__(
categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.SparseCategoricalAccuracy')
class SparseCategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches integer labels.
For example, if `y_true` is [[2], [1]] and `y_pred` is
[[0.1, 0.9, 0.8], [0.05, 0.95, 0]] then the categorical accuracy is 1/2 or .5.
If the weights were specified as [0.7, 0.3] then the categorical accuracy
would be .3. You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `sparse categorical accuracy`: an idempotent operation
that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.SparseCategoricalAccuracy()
m.update_state([[2], [1]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
print('Final result: ', m.result().numpy()) # Final result: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name='sparse_categorical_accuracy', dtype=None):
super(SparseCategoricalAccuracy, self).__init__(
sparse_categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.TopKCategoricalAccuracy')
class TopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Usage:
```python
m = tf.keras.metrics.TopKCategoricalAccuracy()
m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
print('Final result: ', m.result().numpy()) # Final result: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None):
"""Creates a `TopKCategoricalAccuracy` instance.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TopKCategoricalAccuracy, self).__init__(
top_k_categorical_accuracy, name, dtype=dtype, k=k)
@keras_export('keras.metrics.SparseTopKCategoricalAccuracy')
class SparseTopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
Usage:
```python
m = tf.keras.metrics.SparseTopKCategoricalAccuracy()
m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
print('Final result: ', m.result().numpy()) # Final result: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None):
"""Creates a `SparseTopKCategoricalAccuracy` instance.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(SparseTopKCategoricalAccuracy, self).__init__(
sparse_top_k_categorical_accuracy, name, dtype=dtype, k=k)
class _ConfusionMatrixConditionCount(Metric):
"""Calculates the number of the given confusion matrix condition."""
def __init__(self,
confusion_matrix_cond,
thresholds=None,
name=None,
dtype=None):
"""Creates a `_ConfusionMatrixConditionCount` instance.
Args:
confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions.
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype)
self._confusion_matrix_cond = confusion_matrix_cond
self.init_thresholds = thresholds
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=0.5)
self.accumulator = self.add_weight(
'accumulator',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the given confusion matrix condition statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{self._confusion_matrix_cond: self.accumulator},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def result(self):
if len(self.thresholds) == 1:
result = self.accumulator[0]
else:
result = self.accumulator
return ops.convert_to_tensor(result)
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {'thresholds': self.init_thresholds}
base_config = super(_ConfusionMatrixConditionCount, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.FalsePositives')
class FalsePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of false positives.
For example, if `y_true` is [0, 1, 0, 0] and `y_pred` is [0, 0, 1, 1]
then the false positives value is 2. If the weights were specified as
[0, 0, 1, 0] then the false positives value would be 1.
If `sample_weight` is given, calculates the sum of the weights of
false positives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.FalsePositives()
m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
print('Final result: ', m.result().numpy()) # Final result: 2
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.FalsePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `FalsePositives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(FalsePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.FalseNegatives')
class FalseNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of false negatives.
For example, if `y_true` is [0, 1, 1, 1] and `y_pred` is [0, 1, 0, 0]
then the false negatives value is 2. If the weights were specified as
[0, 0, 1, 0] then the false negatives value would be 1.
If `sample_weight` is given, calculates the sum of the weights of
false negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.FalseNegatives()
m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
print('Final result: ', m.result().numpy()) # Final result: 2
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.FalseNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `FalseNegatives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(FalseNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TrueNegatives')
class TrueNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
For example, if `y_true` is [0, 1, 0, 0] and `y_pred` is [1, 1, 0, 0]
then the true negatives value is 2. If the weights were specified as
[0, 0, 1, 0] then the true negatives value would be 1.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.TrueNegatives()
m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
print('Final result: ', m.result().numpy()) # Final result: 2
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.TrueNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `TrueNegatives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TrueNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TruePositives')
class TruePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of true positives.
For example, if `y_true` is [0, 1, 1, 1] and `y_pred` is [1, 0, 1, 1]
then the true positives value is 2. If the weights were specified as
[0, 0, 1, 0] then the true positives value would be 1.
If `sample_weight` is given, calculates the sum of the weights of
true positives. This metric creates one local variable, `true_positives`
that is used to keep track of the number of true positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.TruePositives()
m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
print('Final result: ', m.result().numpy()) # Final result: 2
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.TruePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `TruePositives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TruePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.Precision')
class Precision(Metric):
"""Computes the precision of the predictions with respect to the labels.
For example, if `y_true` is [0, 1, 1, 1] and `y_pred` is [1, 0, 1, 1]
then the precision value is 2/(2+1) ie. 0.66. If the weights were specified as
[0, 0, 1, 0] then the precision value would be 1.
The metric creates two local variables, `true_positives` and `false_positives`
that are used to compute the precision. This value is ultimately returned as
`precision`, an idempotent operation that simply divides `true_positives`
by the sum of `true_positives` and `false_positives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry is
correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in the
top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label.
Usage:
```python
m = tf.keras.metrics.Precision()
m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
print('Final result: ', m.result().numpy()) # Final result: 0.66
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Precision()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
"""Creates a `Precision` instance.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate precision with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Precision, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false positive statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_positives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Precision, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.Recall')
class Recall(Metric):
"""Computes the recall of the predictions with respect to the labels.
For example, if `y_true` is [0, 1, 1, 1] and `y_pred` is [1, 0, 1, 1]
then the recall value is 2/(2+1) ie. 0.66. If the weights were specified as
[0, 0, 1, 0] then the recall value would be 1.
This metric creates two local variables, `true_positives` and
`false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, recall will be computed as how often on average a class
among the labels of a batch entry is in the top-k predictions.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing the
fraction of them for which `class_id` is above the threshold and/or in the
top-k predictions.
Usage:
```python
m = tf.keras.metrics.Recall()
m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
print('Final result: ', m.result().numpy()) # Final result: 0.66
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Recall()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
"""Creates a `Recall` instance.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate recall with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Recall, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Recall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@six.add_metaclass(abc.ABCMeta)
class SensitivitySpecificityBase(Metric):
"""Abstract base class for computing sensitivity and specificity.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
"""
def __init__(self, value, num_thresholds=200, name=None, dtype=None):
super(SensitivitySpecificityBase, self).__init__(name=name, dtype=dtype)
if num_thresholds <= 0:
raise ValueError('`num_thresholds` must be > 0.')
self.value = value
self.true_positives = self.add_weight(
'true_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
# Compute `num_thresholds` thresholds in [0, 1]
if num_thresholds == 1:
self.thresholds = [0.5]
else:
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
self.thresholds = [0.0] + thresholds + [1.0]
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def reset_states(self):
num_thresholds = len(self.thresholds)
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
@keras_export('keras.metrics.SensitivityAtSpecificity')
class SensitivityAtSpecificity(SensitivitySpecificityBase):
"""Computes the sensitivity at a given specificity.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
sensitivity at the given specificity. The threshold for the given specificity
value is computed and used to evaluate the corresponding sensitivity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Usage:
```python
m = tf.keras.metrics.SensitivityAtSpecificity(0.4, num_thresholds=1)
m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
print('Final result: ', m.result().numpy()) # Final result: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SensitivityAtSpecificity()])
```
"""
def __init__(self, specificity, num_thresholds=200, name=None, dtype=None):
"""Creates a `SensitivityAtSpecificity` instance.
Args:
specificity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
self.specificity = specificity
self.num_thresholds = num_thresholds
super(SensitivityAtSpecificity, self).__init__(
specificity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
# Calculate specificities at all the thresholds.
specificities = math_ops.div_no_nan(
self.true_negatives, self.true_negatives + self.false_positives)
# Find the index of the threshold where the specificity is closest to the
# given specificity.
min_index = math_ops.argmin(
math_ops.abs(specificities - self.value), axis=0)
min_index = math_ops.cast(min_index, dtypes.int32)
# Compute sensitivity at that index.
return math_ops.div_no_nan(
self.true_positives[min_index],
self.true_positives[min_index] + self.false_negatives[min_index])
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'specificity': self.specificity
}
base_config = super(SensitivityAtSpecificity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.SpecificityAtSensitivity')
class SpecificityAtSensitivity(SensitivitySpecificityBase):
"""Computes the specificity at a given sensitivity.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
specificity at the given sensitivity. The threshold for the given sensitivity
value is computed and used to evaluate the corresponding specificity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Usage:
```python
m = tf.keras.metrics.SpecificityAtSensitivity(0.8, num_thresholds=1)
m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
print('Final result: ', m.result().numpy()) # Final result: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SpecificityAtSensitivity()])
```
"""
def __init__(self, sensitivity, num_thresholds=200, name=None, dtype=None):
"""Creates a `SpecificityAtSensitivity` instance.
Args:
sensitivity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
self.sensitivity = sensitivity
self.num_thresholds = num_thresholds
super(SpecificityAtSensitivity, self).__init__(
sensitivity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
# Calculate sensitivities at all the thresholds.
sensitivities = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
# Find the index of the threshold where the sensitivity is closest to the
# given specificity.
min_index = math_ops.argmin(
math_ops.abs(sensitivities - self.value), axis=0)
min_index = math_ops.cast(min_index, dtypes.int32)
# Compute specificity at that index.
return math_ops.div_no_nan(
self.true_negatives[min_index],
self.true_negatives[min_index] + self.false_positives[min_index])
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'sensitivity': self.sensitivity
}
base_config = super(SpecificityAtSensitivity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.AUC')
class AUC(Metric):
"""Computes the approximate AUC (Area under the curve) via a Riemann sum.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the AUC.
To discretize the AUC curve, a linearly spaced set of thresholds is used to
compute pairs of recall and precision values. The area under the ROC-curve is
therefore computed using the height of the recall values by the false positive
rate, while the area under the PR-curve is the computed using the height of
the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`. The `thresholds` parameter can be
used to manually specify thresholds which split the predictions more evenly.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.AUC(num_thresholds=3)
m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
# auc = ((((1+0.5)/2)*(1-0))+ (((0.5+0)/2)*(0-0))) = 0.75
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.AUC()])
```
"""
def __init__(self,
num_thresholds=200,
curve='ROC',
summation_method='interpolation',
name=None,
dtype=None,
thresholds=None):
"""Creates an `AUC` instance.
Args:
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use when discretizing the roc curve. Values must be > 1.
curve: (Optional) Specifies the name of the curve to be computed, 'ROC'
[default] or 'PR' for the Precision-Recall-curve.
summation_method: (Optional) Specifies the Riemann summation method used
(https://en.wikipedia.org/wiki/Riemann_sum): 'interpolation' [default],
applies mid-point summation scheme for `ROC`. For PR-AUC, interpolates
(true/false) positives but not the ratio that is precision (see Davis
& Goadrich 2006 for details); 'minoring' that applies left summation
for increasing intervals and right summation for decreasing intervals;
'majoring' that does the opposite.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
thresholds: (Optional) A list of floating point values to use as the
thresholds for discretizing the curve. If set, the `num_thresholds`
parameter is ignored. Values should be in [0, 1]. Endpoint thresholds
equal to {-epsilon, 1+epsilon} for a small positive epsilon value will
be automatically included with these to correctly handle predictions
equal to exactly 0 or 1.
"""
# Validate configurations.
if isinstance(curve, metrics_utils.AUCCurve) and curve not in list(
metrics_utils.AUCCurve):
raise ValueError('Invalid curve: "{}". Valid options are: "{}"'.format(
curve, list(metrics_utils.AUCCurve)))
if isinstance(
summation_method,
metrics_utils.AUCSummationMethod) and summation_method not in list(
metrics_utils.AUCSummationMethod):
raise ValueError(
'Invalid summation method: "{}". Valid options are: "{}"'.format(
summation_method, list(metrics_utils.AUCSummationMethod)))
# Update properties.
if thresholds is not None:
# If specified, use the supplied thresholds.
self.num_thresholds = len(thresholds) + 2
thresholds = sorted(thresholds)
else:
if num_thresholds <= 1:
raise ValueError('`num_thresholds` must be > 1.')
# Otherwise, linearly interpolate (num_thresholds - 2) thresholds in
# (0, 1).
self.num_thresholds = num_thresholds
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
# Add an endpoint "threshold" below zero and above one for either
# threshold method to account for floating point imprecisions.
self.thresholds = [0.0 - K.epsilon()] + thresholds + [1.0 + K.epsilon()]
if isinstance(curve, metrics_utils.AUCCurve):
self.curve = curve
else:
self.curve = metrics_utils.AUCCurve.from_str(curve)
if isinstance(summation_method, metrics_utils.AUCSummationMethod):
self.summation_method = summation_method
else:
self.summation_method = metrics_utils.AUCSummationMethod.from_str(
summation_method)
super(AUC, self).__init__(name=name, dtype=dtype)
# Create metric variables
self.true_positives = self.add_weight(
'true_positives',
shape=(self.num_thresholds,),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(self.num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(self.num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(self.num_thresholds,),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables({
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
}, y_true, y_pred, self.thresholds, sample_weight=sample_weight)
def interpolate_pr_auc(self):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
https://www.biostat.wisc.edu/~page/rocpr.pdf
Note here we derive & use a closed formula not present in the paper
as follows:
Precision = TP / (TP + FP) = TP / P
Modeling all of TP (true positive), FP (false positive) and their sum
P = TP + FP (predicted positive) as varying linearly within each interval
[A, B] between successive thresholds, we get
Precision slope = dTP / dP
= (TP_B - TP_A) / (P_B - P_A)
= (TP - TP_A) / (P - P_A)
Precision = (TP_A + slope * (P - P_A)) / P
The area within the interval is (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = self.true_positives[:self.num_thresholds -
1] - self.true_positives[1:]
p = self.true_positives + self.false_positives
dp = p[:self.num_thresholds - 1] - p[1:]
prec_slope = math_ops.div_no_nan(
dtp, math_ops.maximum(dp, 0), name='prec_slope')
intercept = self.true_positives[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0),
math_ops.div_no_nan(
p[:self.num_thresholds - 1],
math_ops.maximum(p[1:], 0),
name='recall_relative_ratio'),
array_ops.ones_like(p[1:]))
return math_ops.reduce_sum(
math_ops.div_no_nan(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
math_ops.maximum(self.true_positives[1:] + self.false_negatives[1:],
0),
name='pr_auc_increment'),
name='interpolate_pr_auc')
def result(self):
if (self.curve == metrics_utils.AUCCurve.PR and
self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION
):
# This use case is different and is handled separately.
return self.interpolate_pr_auc()
# Set `x` and `y` values for the curves based on `curve` config.
recall = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
if self.curve == metrics_utils.AUCCurve.ROC:
fp_rate = math_ops.div_no_nan(self.false_positives,
self.false_positives + self.true_negatives)
x = fp_rate
y = recall
else: # curve == 'PR'.
precision = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
x = recall
y = precision
# Find the rectangle heights based on `summation_method`.
if self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION:
# Note: the case ('PR', 'interpolation') has been handled above.
heights = (y[:self.num_thresholds - 1] + y[1:]) / 2.
elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING:
heights = math_ops.minimum(y[:self.num_thresholds - 1], y[1:])
else: # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING:
heights = math_ops.maximum(y[:self.num_thresholds - 1], y[1:])
# Sum up the areas of all the rectangles.
return math_ops.reduce_sum(
math_ops.multiply(x[:self.num_thresholds - 1] - x[1:], heights),
name=self.name)
def reset_states(self):
K.batch_set_value(
[(v, np.zeros((self.num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'curve': self.curve.value,
'summation_method': self.summation_method.value,
# We remove the endpoint thresholds as an inverse of how the thresholds
# were initialized. This ensures that a metric initialized from this
# config has the same thresholds.
'thresholds': self.thresholds[1:-1],
}
base_config = super(AUC, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.CosineSimilarity')
class CosineSimilarity(MeanMetricWrapper):
"""Computes the cosine similarity between the labels and predictions.
cosine similarity = (a . b) / ||a|| ||b||
[Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity)
For example, if `y_true` is [0, 1, 1], and `y_pred` is [1, 0, 1], the cosine
similarity is 0.5.
This metric keeps the average cosine similarity between `predictions` and
`labels` over a stream of data.
Usage:
```python
m = tf.keras.metrics.CosineSimilarity(axis=1)
m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
# l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
# l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
# l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
# result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
= ((0. + 0.) + (0.5 + 0.5)) / 2
print('Final result: ', m.result().numpy()) # Final result: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
```
"""
def __init__(self, name='cosine_similarity', dtype=None, axis=-1):
"""Creates a `CosineSimilarity` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
"""
super(CosineSimilarity, self).__init__(
cosine_similarity, name, dtype=dtype, axis=axis)
@keras_export('keras.metrics.MeanAbsoluteError')
class MeanAbsoluteError(MeanMetricWrapper):
"""Computes the mean absolute error between the labels and predictions.
For example, if `y_true` is [0., 0., 1., 1.], and `y_pred` is [1., 1., 1., 0.]
the mean absolute error is 3/4 (0.75).
Usage:
```python
m = tf.keras.metrics.MeanAbsoluteError()
m.update_state([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.MeanAbsoluteError()])
```
"""
def __init__(self, name='mean_absolute_error', dtype=None):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(MeanMetricWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.], and `y_pred` is [1., 1., 1., 0.]
the mean absolute percentage error is 5e+08.
Usage:
```python
m = tf.keras.metrics.MeanAbsolutePercentageError()
m.update_state([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Final result: ', m.result().numpy()) # Final result: 5e+08
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
```
"""
def __init__(self, name='mean_absolute_percentage_error', dtype=None):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredError')
class MeanSquaredError(MeanMetricWrapper):
"""Computes the mean squared error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.], and `y_pred` is [1., 1., 1., 0.]
the mean squared error is 3/4 (0.75).
Usage:
```python
m = tf.keras.metrics.MeanSquaredError()
m.update_state([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.MeanSquaredError()])
```
"""
def __init__(self, name='mean_squared_error', dtype=None):
super(MeanSquaredError, self).__init__(
mean_squared_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(MeanMetricWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.], and `y_pred` is [1., 1., 1., 0.]
the mean squared logarithmic error is 0.36034.
Usage:
```python
m = tf.keras.metrics.MeanSquaredLogarithmicError()
m.update_state([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Final result: ', m.result().numpy()) # Final result: 0.36034
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
```
"""
def __init__(self, name='mean_squared_logarithmic_error', dtype=None):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name, dtype=dtype)
@keras_export('keras.metrics.Hinge')
class Hinge(MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
For example, if `y_true` is [-1., 1., 1.], and `y_pred` is [0.6, -0.7, -0.5]
the hinge metric value is 1.6.
Usage:
```python
m = tf.keras.metrics.Hinge()
m.update_state([-1., 1., 1.], [0.6, -0.7, -0.5])
# result = max(0, 1-y_true * y_pred) = [1.6 + 1.7 + 1.5] / 3
print('Final result: ', m.result().numpy()) # Final result: 1.6
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.Hinge()])
```
"""
def __init__(self, name='hinge', dtype=None):
super(Hinge, self).__init__(hinge, name, dtype=dtype)
@keras_export('keras.metrics.SquaredHinge')
class SquaredHinge(MeanMetricWrapper):
"""Computes the squared hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
For example, if `y_true` is [-1., 1., 1.], and `y_pred` is [0.6, -0.7, -0.5]
the squared hinge metric value is 2.6.
Usage:
```python
m = tf.keras.metrics.SquaredHinge()
m.update_state([-1., 1., 1.], [0.6, -0.7, -0.5])
# result = max(0, 1-y_true * y_pred) = [1.6^2 + 1.7^2 + 1.5^2] / 3
print('Final result: ', m.result().numpy()) # Final result: 2.6
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.SquaredHinge()])
```
"""
def __init__(self, name='squared_hinge', dtype=None):
super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype)
@keras_export('keras.metrics.CategoricalHinge')
class CategoricalHinge(MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
For example, if `y_true` is [0., 1., 1.], and `y_pred` is [1., 0., 1.]
the categorical hinge metric value is 1.0.
Usage:
```python
m = tf.keras.metrics.CategoricalHinge()
m.update_state([0., 1., 1.], [1., 0., 1.])
print('Final result: ', m.result().numpy()) # Final result: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.CategoricalHinge()])
```
"""
def __init__(self, name='categorical_hinge', dtype=None):
super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype)
@keras_export('keras.metrics.RootMeanSquaredError')
class RootMeanSquaredError(Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`.
Usage:
```python
m = tf.keras.metrics.RootMeanSquaredError()
m.update_state([2., 4., 6.], [1., 3., 2.])
print('Final result: ', m.result().numpy()) # Final result: 2.449
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.RootMeanSquaredError()])
```
"""
def __init__(self, name='root_mean_squared_error', dtype=None):
super(RootMeanSquaredError, self).__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
error_sq = math_ops.squared_difference(y_pred, y_true)
return super(RootMeanSquaredError, self).update_state(
error_sq, sample_weight=sample_weight)
def result(self):
return math_ops.sqrt(math_ops.div_no_nan(self.total, self.count))
@keras_export('keras.metrics.LogCoshError')
class LogCoshError(MeanMetricWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true)
Usage:
```python
m = tf.keras.metrics.LogCoshError()
m.update_state([0., 1., 1.], [1., 0., 1.])
print('Final result: ', m.result().numpy()) # Final result: 0.289
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.LogCoshError()])
```
"""
def __init__(self, name='logcosh', dtype=None):
super(LogCoshError, self).__init__(logcosh, name, dtype=dtype)
@keras_export('keras.metrics.Poisson')
class Poisson(MeanMetricWrapper):
"""Computes the Poisson metric between `y_true` and `y_pred`.
`metric = y_pred - y_true * log(y_pred)`
Usage:
```python
m = tf.keras.metrics.Poisson()
m.update_state([1, 9, 2], [4, 8, 12])
print('Final result: ', m.result().numpy()) # Final result: -4.63
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.Poisson()])
```
"""
def __init__(self, name='poisson', dtype=None):
super(Poisson, self).__init__(poisson, name, dtype=dtype)
@keras_export('keras.metrics.KLDivergence')
class KLDivergence(MeanMetricWrapper):
"""Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`.
`metric = y_true * log(y_true / y_pred)`
Usage:
```python
m = tf.keras.metrics.KLDivergence()
m.update_state([.4, .9, .2], [.5, .8, .12])
print('Final result: ', m.result().numpy()) # Final result: -0.043
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.KLDivergence()])
```
"""
def __init__(self, name='kullback_leibler_divergence', dtype=None):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name, dtype=dtype)
@keras_export('keras.metrics.MeanIoU')
class MeanIoU(Metric):
"""Computes the mean Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class and then
computes the average over classes. IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.MeanIoU(num_classes=2)
m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
# cm = [[1, 1],
[1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
# result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
print('Final result: ', m.result().numpy()) # Final result: 0.33
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
```
"""
def __init__(self, num_classes, name=None, dtype=None):
"""Creates a `MeanIoU` instance.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanIoU, self).__init__(name=name, dtype=dtype)
self.num_classes = num_classes
# Variable to accumulate the predictions in the confusion matrix. Setting
# the type to be `float64` as required by confusion_matrix_ops.
self.total_cm = self.add_weight(
'total_confusion_matrix',
shape=(num_classes, num_classes),
initializer=init_ops.zeros_initializer,
dtype=dtypes.float64)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
# Flatten the input if its rank > 1.
if y_pred.shape.ndims > 1:
y_pred = array_ops.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = array_ops.reshape(y_true, [-1])
if sample_weight is not None and sample_weight.shape.ndims > 1:
sample_weight = array_ops.reshape(sample_weight, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=dtypes.float64)
return self.total_cm.assign_add(current_cm)
def result(self):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = math_ops.cast(
array_ops.diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(math_ops.not_equal(denominator, 0), dtype=self._dtype))
iou = math_ops.div_no_nan(true_positives, denominator)
return math_ops.div_no_nan(
math_ops.reduce_sum(iou, name='mean_iou'), num_valid_entries)
def reset_states(self):
K.set_value(self.total_cm, np.zeros((self.num_classes, self.num_classes)))
def get_config(self):
config = {'num_classes': self.num_classes}
base_config = super(MeanIoU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.MeanTensor')
class MeanTensor(Metric):
"""Computes the element-wise (weighted) mean of the given tensors.
`MeanTensor` returns a tensor with the same shape of the input tensors. The
mean value is updated by keeping local variables `total` and `count`. The
`total` tracks the sum of the weighted values, and `count` stores the sum of
the weighted counts.
Usage:
```python
m = tf.keras.metrics.MeanTensor()
m.update_state([0, 1, 2, 3])
m.update_state([4, 5, 6, 7])
print('Result: ', m.result().numpy()) # Result: [2, 3, 4, 5]
m.update_state([12, 10, 8, 6], sample_weights= [0, 0.2, 0.5, 1])
print('Result: ', m.result().numpy()) # Result: [2, 3.636, 4.8, 5.333]
```
"""
def __init__(self, name='mean_tensor', dtype=None):
"""Creates a `MeanTensor` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanTensor, self).__init__(name=name, dtype=dtype)
self._shape = None
self._total = None
self._count = None
self._built = False
def _build(self, shape):
self._shape = tensor_shape.TensorShape(shape)
# Create new state variables
self._total = self.add_weight(
'total', shape=shape, initializer=init_ops.zeros_initializer)
self._count = self.add_weight(
'count', shape=shape, initializer=init_ops.zeros_initializer)
with ops.init_scope():
if not context.executing_eagerly():
K._initialize_variables(K._get_session()) # pylint: disable=protected-access
self._built = True
@property
def total(self):
return self._total if self._built else None
@property
def count(self):
return self._count if self._built else None
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the element-wise mean.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
values = math_ops.cast(values, self._dtype)
if not self._built:
self._build(values.shape)
elif values.shape != self._shape:
raise ValueError('MeanTensor input values must always have the same '
'shape. Expected shape (set during the first call): {}. '
'Got: {}'.format(self._shape, values.shape))
num_values = array_ops.ones_like(values)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = tf_losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
num_values = math_ops.multiply(num_values, sample_weight)
values = math_ops.multiply(values, sample_weight)
update_total_op = self._total.assign_add(values)
with ops.control_dependencies([update_total_op]):
return self._count.assign_add(num_values)
def result(self):
if not self._built:
raise ValueError(
'MeanTensor does not have any result yet. Please call the MeanTensor '
'instance or use `.update_state(value)` before retrieving the result.'
)
return math_ops.div_no_nan(self.total, self.count)
def reset_states(self):
if self._built:
K.batch_set_value(
[(v, np.zeros(self._shape.as_list())) for v in self.variables])
@keras_export('keras.metrics.BinaryCrossentropy')
class BinaryCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are only two
label classes (0 and 1).
Usage:
```python
m = tf.keras.metrics.BinaryCrossentropy()
m.update_state([1., 0., 1., 0.], [1., 1., 1., 0.])
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Reduced metric = 7.665 / 2
print('Final result: ', m.result().numpy()) # Final result: 3.833
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryCrossentropy()])
```
"""
def __init__(self,
name='binary_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
"""Creates a `BinaryCrossentropy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional )Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
smoothed, meaning the confidence on label values are relaxed.
e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for
label `0` and `0.9` for label `1`"
"""
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.CategoricalCrossentropy')
class CategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are multiple
label classes (2 or more). Here we assume that labels are given as a `one_hot`
representation. eg., When labels values are [2, 0, 1],
`y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]].
Usage:
```python
m = tf.keras.metrics.CategoricalCrossentropy()
m.update_state([[0, 1, 0], [0, 0, 1]],
[[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# xent = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Reduced xent = (0.051 + 2.302) / 2
print('Final result: ', m.result().numpy()) # Final result: 1.176
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalCrossentropy()])
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional ) Whether `y_pred` is expected to be a logits tensor.
By default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
"""
def __init__(self,
name='categorical_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` metric.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
```python
m = tf.keras.metrics.SparseCategoricalCrossentropy()
m.update_state(
[1, 2],
[[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# logits = log(y_pred)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# xent = -sum(y * log(softmax), 1)
# log(softmax) = [[-2.9957, -0.0513, -16.1181], [-2.3026, -0.2231, -2.3026]]
# y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
print('Final result: ', m.result().numpy()) # Final result: 1.176
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()])
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional ) Whether `y_pred` is expected to be a logits tensor.
By default, we assume that `y_pred` encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the metric is
computed.
"""
def __init__(self,
name='sparse_categorical_crossentropy',
dtype=None,
from_logits=False,
axis=-1):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
axis=axis)
class SumOverBatchSize(Reduce):
"""Computes the weighted sum over batch size of the given values.
For example, if values is [1, 3, 5, 7] then the metric value is 4.
If the weights were specified as [1, 1, 0, 0] then the value would be 1.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as sum
over batch size which is an idempotent operation that simply divides `total`
by `count`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
"""
def __init__(self, name='sum_over_batch_size', dtype=None):
super(SumOverBatchSize, self).__init__(
reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
name=name,
dtype=dtype)
class SumOverBatchSizeMetricWrapper(SumOverBatchSize):
"""Wraps a function with the `SumOverBatchSizeMetricWrapper` metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `SumOverBatchSizeMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(SumOverBatchSizeMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
matches = self._fn(y_true, y_pred, **self._fn_kwargs)
return super(SumOverBatchSizeMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(SumOverBatchSizeMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def accuracy(y_true, y_pred):
[y_pred, y_true], _ = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true])
y_pred.shape.assert_is_compatible_with(y_true.shape)
if y_true.dtype != y_pred.dtype:
y_pred = math_ops.cast(y_pred, y_true.dtype)
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.binary_accuracy')
def binary_accuracy(y_true, y_pred, threshold=0.5):
threshold = math_ops.cast(threshold, y_pred.dtype)
y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@keras_export('keras.metrics.categorical_accuracy')
def categorical_accuracy(y_true, y_pred):
return math_ops.cast(
math_ops.equal(
math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)),
K.floatx())
@keras_export('keras.metrics.sparse_categorical_accuracy')
def sparse_categorical_accuracy(y_true, y_pred):
y_pred_rank = ops.convert_to_tensor(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor(y_true).shape.ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None) and (len(
K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast them
# to match.
if K.dtype(y_pred) != K.dtype(y_true):
y_pred = math_ops.cast(y_pred, K.dtype(y_true))
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.top_k_categorical_accuracy')
def top_k_categorical_accuracy(y_true, y_pred, k=5):
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), K.floatx())
@keras_export('keras.metrics.sparse_top_k_categorical_accuracy')
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
y_pred_rank = ops.convert_to_tensor(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor(y_true).shape.ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None) and (len(
K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), K.floatx())
# Aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine_proximity = cosine_similarity
def clone_metric(metric):
"""Returns a clone of the metric if stateful, otherwise returns it as is."""
if isinstance(metric, Metric):
with ops.init_scope():
return metric.__class__.from_config(metric.get_config())
return metric
def clone_metrics(metrics):
"""Clones the given metric list/dict."""
if metrics is None:
return None
if isinstance(metrics, dict):
return {key: clone_metric(value) for key, value in metrics.items()}
return [clone_metric(metric) for metric in metrics]
@keras_export('keras.metrics.serialize')
def serialize(metric):
return serialize_keras_object(metric)
@keras_export('keras.metrics.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
@keras_export('keras.metrics.get')
def get(identifier):
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'metric function identifier: %s' % identifier)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/metrics.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests temporal sample weights correctness using Keras model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import metrics
from tensorflow.python.keras import optimizer_v2
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
class Bias(layers.Layer):
"""Layer that add a bias to its inputs."""
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
def compute_output_shape(self, input_shape):
return input_shape
def get_multi_io_temporal_model():
timesteps = 2
inp_1 = layers.Input(shape=(1,), name='input_1')
inp_2 = layers.Input(shape=(1,), name='input_2')
x = layers.RepeatVector(timesteps)
out_1 = layers.TimeDistributed(Bias(), name='output_1')
out_2 = layers.TimeDistributed(Bias(), name='output_2')
branch_a = [inp_1, x, out_1]
branch_b = [inp_2, x, out_2]
return testing_utils.get_multi_io_model(branch_a, branch_b)
def get_compiled_multi_io_model_temporal(sample_weight_mode):
model = get_multi_io_temporal_model()
model.compile(
optimizer=optimizer_v2.gradient_descent.SGD(0.1),
loss='mae',
metrics=[metrics.MeanAbsoluteError(name='mae')],
weighted_metrics=[metrics.MeanAbsoluteError(name='mae_2')],
sample_weight_mode=sample_weight_mode,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def run_with_different_sample_weight_mode_inputs(fn, partial_sw=True):
"""Executes the given function with different sample weight mode inputs.
Args:
fn: Training or eval function to execute.
partial_sw: Boolean flag to indicate whether temporal sample weight mode
should be set partially just for one output.
"""
model = get_compiled_multi_io_model_temporal(sample_weight_mode='temporal')
fn(model)
model = get_compiled_multi_io_model_temporal(
sample_weight_mode=['temporal', 'temporal'])
fn(model)
model = get_compiled_multi_io_model_temporal(sample_weight_mode={
'output_1': 'temporal',
'output_2': 'temporal'
})
fn(model)
if partial_sw:
model = get_compiled_multi_io_model_temporal(
sample_weight_mode=[None, 'temporal'])
fn(model)
# TODO(b/129700800): Enable after bug is fixed.
# model = get_compiled_multi_io_model_temporal(sample_weight_mode={
# 'output_2': 'temporal'
# })
# fn(model)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
class TestMetricsCorrectnessMultiIOTemporal(keras_parameterized.TestCase):
def custom_generator_multi_io_temporal(self, sample_weights=None):
"""Generator for getting data for temporal multi io model.
Args:
sample_weights: List of sample_weights.
Yields:
Tuple of inputs, label, sample weights data.
"""
batch_size = 3
num_samples = 3
if sample_weights:
assert len(sample_weights) == 2
w1 = sample_weights[0]
w2 = sample_weights[1]
else:
w1 = None
w2 = None
iteration = 0
while True:
batch_index = iteration * batch_size % num_samples
iteration += 1
start = batch_index
end = start + batch_size
x = [self.x[start:end], self.x[start:end]]
y = [self.y1[start:end], self.y2[start:end]]
if sample_weights:
w = [
None if w1 is None else w1[start:end],
None if w2 is None else w2[start:end]
]
else:
w = None
yield x, y, w
def setUp(self):
super(TestMetricsCorrectnessMultiIOTemporal, self).setUp()
self.x = np.asarray([[0.], [1.], [2.]])
self.y1 = np.asarray([[[.5], [1.]], [[2.], [2.5]], [[3.5], [2.5]]])
self.y2 = np.asarray([[[.5], [1.5]], [[2.], [1.5]], [[3.5], [3.]]])
if tf2.enabled():
self.wmae = 'mae_2'
else:
self.wmae = 'weighted_mae_2'
# Without weights:
# Epoch 1 - bias = 0
# y_pred_1 = [[[0.], [0.]], [[1.], [1.]], [[2.], [2.]]]
# y_pred_2 = [[[0.], [0.]], [[1.], [1.]], [[2.], [2.]]]
# mae (y1 - y_pred_1) = [[[.5], [1.]], [[1.], [1.5]], [[1.5], [.5]]]
# mae = [[3/3, 3/3]] = [[1, 1]] = 2/2 = 1
# mae_2 (y2 - y_pred_2) = [[[.5], [1.5]], [[1.], [.5]], [[1.5], [1.]]]
# mae_2 = [[3/3, 3/3]] = [[1, 1]] = 2/2 = 1
# Epoch 2 - bias = 0.1 (2/2 * 0.1)
# y_pred_1 = [[[.1], [.1]], [[1.1], [1.1]], [[2.1], [2.1]]]
# y_pred_2 = [[[.1], [.1]], [[1.1], [1.1]], [[2.1], [2.1]]]
# mae (y1 - y_pred_1) = [[[.4], [.9]], [[.9], [1.4]], [[1.4], [.4]]]
# mae = [[2.7/3, 2.7/3]] = [[0.9, 0.9]] = 1.8/2 = 0.9
# mae_2 (y2 - y_pred_2) = [[[.4], [1.4]], [[.9], [.4]], [[1.4], [.9]]]
# mae_2 = [[2.7/3, 2.7/3]] = [[0.9, 0.9]] = 1.8/2 = 0.9
self.expected_fit_result = {
'output_1_mae': [1, 0.9],
'output_2_mae': [1, 0.9],
'output_1_' + self.wmae: [1, 0.9],
'output_2_' + self.wmae: [1, 0.9],
'loss': [2., 1.8],
'output_1_loss': [1, 0.9],
'output_2_loss': [1, 0.9],
}
self.sample_weight_1 = np.asarray([[.5, 2.], [.5, 2.], [.5, 2.]])
self.sample_weight_2 = np.asarray([[2., .5], [2., .5], [2., .5]])
# With weights:
# Epoch 1
# y_pred_1 = [[[0.], [0.]], [[1.], [1.]], [[2.], [2.]]]
# y_pred_2 = [[[0.], [0.]], [[1.], [1.]], [[2.], [2.]]]
# mae (y1 - y_pred_1) = [[[.5], [1.]], [[1.], [1.5]], [[1.5], [.5]]]
# with weights = [[[.5 * .5], [1 * 2]],
# [[1 * .5], [1.5 * 2]],
# [[1.5 * .5], [.5 * 2]]]
# mae (w/o weights) = [[3/3, 3/3]] = [[1, 1]] = 2/2 = 1
# mae (weighted mean) = [[1.5/1.5, 6/6]] = [[1, 1]] = 2/2 = 1
# mae (sum over bs) = [[1.5/3, 6/3]] = [[.5, 2]] = 2.5/2 = 1.25
# mae_2 (y2 - y_pred_2) = [[[.5], [1.5]], [[1.], [.5]], [[1.5], [1.]]]
# with weights = [[[.5 * 2], [1.5 * .5]],
# [[1. * 2], [.5 * .5]],
# [[1.5 * 2], [1. * .5]]]
# mae_2 (w/o weights) = [[3/3, 3/3]] = [[1, 1]] = 2/2 = 1
# mae_2 (weighted mean) = [[6/6, 1.5/1.5]] = [[1, 1]] = 2/2 = 1
# mae_2 (sum over bs) = [[6/3, 1.5/3]] = [[2, .5]] = 2.5/2 = 1.25
# Epoch 2 - bias = 0.125 (2.5/2 * 0.1)
# y_pred_1 = [[[0.125], [0.125]], [[1.125], [1.125]], [[2.125], [2.125]]]
# y_pred_2 = [[[0.125], [0.125]], [[1.125], [1.125]], [[2.125], [2.125]]]
# mae (y1 - y_pred_1) = [[[.375], [.875]],
# [[.875], [1.375]],
# [[1.375], [.375]]]
# with weights = [[[.375 * .5], [.875 * 2.]],
# [[.875 * .5], [1.375 * 2.]],
# [[1.375 * .5], [.375 * 2.]]]
# mae (w/o weights) = [[2.625/3, 2.625/3]] = (.875+.875)/2 = .875
# mae (weighted mean) = [[1.3125/1.5, 5.25/6]] = (.875+.875)/2 = .875
# mae (sum over bs) = [[1.3125/3, 5.25/3]] = (0.4375+1.75)/2 = 1.09375
# mae_2 (y2 - y_pred_2) = [[[.375], [1.375]],
# [[.875], [.375]],
# [[1.375], [.875]]]
# with weights = [[[.375 * 2.], [1.375 * .5]],
# [[.875 * 2.], [.375 * .5]],
# [[1.375 * 2.], [.875 * .5]]]
# mae_2 (w/o weights) = [[2.625/3, 2.625/3]] = (.875+.875)/2 = .875
# mae_2 (weighted mean) = [[5.25/6, 1.3125/1.5]] = (.875+.875)/2 = .875
# mae_2 (sum over bs) = [[5.25/3, 1.3125/3]] = (1.75+0.4375)/2 = 1.09375
self.expected_fit_result_with_weights = {
'output_1_mae': [1, 0.875],
'output_2_mae': [1, 0.875],
'output_1_' + self.wmae: [1, 0.875],
'output_2_' + self.wmae: [1, 0.875],
'loss': [2.5, 2.1875],
'output_1_loss': [1.25, 1.09375],
'output_2_loss': [1.25, 1.09375],
}
self.expected_fit_result_with_weights_output_2 = {
'output_1_mae': [1., 0.9],
'output_2_mae': [1, 0.875],
'output_1_' + self.wmae: [1., 0.9],
'output_2_' + self.wmae: [1., 0.875],
'loss': [2.25, 1.99375],
'output_1_loss': [1., 0.9],
'output_2_loss': [1.25, 1.09375],
}
# In the order: 'loss', 'output_1_loss', 'output_2_loss',
# 'output_1_mae', 'output_1_mae_2',
# 'output_2_mae', 'output_2_mae_2'
self.expected_batch_result_with_weights = [
2.1875, 1.09375, 1.09375, 0.875, 0.875, 0.875, 0.875
]
self.expected_batch_result_with_weights_output_2 = [
1.99375, 0.9, 1.09375, 0.9, 0.9, 0.875, 0.875
]
self.expected_batch_result = [1.8, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
def test_fit(self):
def _train_and_assert(model):
history = model.fit([self.x, self.x], [self.y1, self.y2],
batch_size=3,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
run_with_different_sample_weight_mode_inputs(_train_and_assert)
def test_fit_with_sample_weight(self):
def _train_and_assert(model):
history = model.fit([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
},
batch_size=3,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
run_with_different_sample_weight_mode_inputs(
_train_and_assert, partial_sw=False)
def test_fit_with_partial_sample_weight(self):
def _train_and_assert(model):
history = model.fit([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
},
batch_size=3,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
run_with_different_sample_weight_mode_inputs(_train_and_assert)
def test_eval(self):
def _eval_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2])
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=3)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
run_with_different_sample_weight_mode_inputs(_eval_and_assert)
def test_eval_with_sample_weight(self):
def _eval_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=3,
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
run_with_different_sample_weight_mode_inputs(
_eval_and_assert, partial_sw=False)
def test_eval_with_partial_sample_weight(self):
def _eval_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=3,
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(eval_result,
self.expected_batch_result_with_weights_output_2,
1e-3)
run_with_different_sample_weight_mode_inputs(_eval_and_assert)
def test_train_on_batch(self):
def _train_and_assert(model):
for _ in range(2):
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])
self.assertAllClose(result, self.expected_batch_result, 1e-3)
run_with_different_sample_weight_mode_inputs(_train_and_assert)
def test_train_on_batch_with_sample_weight(self):
def _train_and_assert(model):
for _ in range(2):
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
run_with_different_sample_weight_mode_inputs(
_train_and_assert, partial_sw=False)
def test_train_on_batch_with_partial_sample_weight(self):
def _train_and_assert(model):
for _ in range(2):
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(result,
self.expected_batch_result_with_weights_output_2,
1e-3)
run_with_different_sample_weight_mode_inputs(_train_and_assert)
def test_test_on_batch(self):
def _test_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2])
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])
self.assertAllClose(result, self.expected_batch_result, 1e-3)
run_with_different_sample_weight_mode_inputs(_test_and_assert)
def test_test_on_batch_with_sample_weight(self):
def _test_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
run_with_different_sample_weight_mode_inputs(
_test_and_assert, partial_sw=False)
def test_test_on_batch_with_partial_sample_weight(self):
def _test_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(result,
self.expected_batch_result_with_weights_output_2,
1e-3)
run_with_different_sample_weight_mode_inputs(_test_and_assert)
def test_fit_generator(self):
def _train_and_assert(model):
history = model.fit_generator(
self.custom_generator_multi_io_temporal(),
steps_per_epoch=1,
epochs=2)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
run_with_different_sample_weight_mode_inputs(_train_and_assert)
def test_fit_generator_with_sample_weight(self):
def _train_and_assert(model):
history = model.fit_generator(
self.custom_generator_multi_io_temporal(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps_per_epoch=1,
epochs=2)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
run_with_different_sample_weight_mode_inputs(
_train_and_assert, partial_sw=False)
def test_fit_generator_with_partial_sample_weight(self):
def _train_and_assert(model):
history = model.fit_generator(
self.custom_generator_multi_io_temporal(
sample_weights=[None, self.sample_weight_2]),
steps_per_epoch=1,
epochs=2)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
run_with_different_sample_weight_mode_inputs(_train_and_assert)
def test_eval_generator(self):
def _test_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2])
eval_result = model.evaluate_generator(
self.custom_generator_multi_io_temporal(), steps=1)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
run_with_different_sample_weight_mode_inputs(_test_and_assert)
def test_eval_generator_with_sample_weight(self):
def _test_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
eval_result = model.evaluate_generator(
self.custom_generator_multi_io_temporal(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps=2)
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
run_with_different_sample_weight_mode_inputs(
_test_and_assert, partial_sw=False)
def test_eval_generator_with_partial_sample_weight(self):
def _test_and_assert(model):
model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
eval_result = model.evaluate_generator(
self.custom_generator_multi_io_temporal(
sample_weights=[None, self.sample_weight_2]),
steps=2)
self.assertAllClose(eval_result,
self.expected_batch_result_with_weights_output_2,
1e-3)
run_with_different_sample_weight_mode_inputs(_test_and_assert)
def test_error_on_fit_with_class_weight(self):
def _train_and_assert(model):
with self.assertRaisesRegex(
ValueError,
r'`class_weight` not supported for 3\+ dimensional targets.'):
model.fit([self.x, self.x], [self.y1, self.y2],
class_weight={'output_1': {
.5: .5,
2.: .5,
3.5: .5
}},
batch_size=3,
epochs=2,
shuffle=False)
run_with_different_sample_weight_mode_inputs(_train_and_assert)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/temporal_sample_weights_correctness_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `models.py` (model cloning, mainly)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics
from tensorflow.python.keras import models
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class TestModel(keras.Model):
"""A model subclass."""
def __init__(self, n_outputs=4, trainable=True):
"""A test class with one dense layer and number of outputs as a variable."""
super(TestModel, self).__init__()
self.layer1 = keras.layers.Dense(n_outputs)
self.n_outputs = resource_variable_ops.ResourceVariable(
n_outputs, trainable=trainable)
def call(self, x):
return self.layer1(x)
def _get_layers(input_shape=(4,), add_input_layer=False):
if add_input_layer:
model_layers = [keras.layers.InputLayer(input_shape=input_shape),
keras.layers.Dense(4)]
elif input_shape:
model_layers = [keras.layers.Dense(4, input_shape=input_shape)]
else:
model_layers = [keras.layers.Dense(4)]
model_layers += [
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.5),
keras.layers.Dense(4)]
return model_layers
def _get_model(input_shape=(4,)):
model_layers = _get_layers(input_shape=None, add_input_layer=False)
return testing_utils.get_model_from_layers(
model_layers, input_shape=input_shape)
class TestModelCloning(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'has_input_layer',
'input_shape': (4,),
'add_input_layer': True,
'share_weights': False},
{'testcase_name': 'no_input_layer',
'input_shape': None,
'add_input_layer': False,
'share_weights': False},
{'testcase_name': 'has_input_layer_share_weights',
'input_shape': (4,),
'add_input_layer': True,
'share_weights': True},
{'testcase_name': 'no_input_layer_share_weights',
'input_shape': None,
'add_input_layer': False,
'share_weights': True},
])
def test_clone_sequential_model(
self, input_shape, add_input_layer, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_sequential_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
val_a = np.random.random((10, 4))
model = models.Sequential(_get_layers(input_shape, add_input_layer))
# Sanity check
self.assertEqual(
isinstance(model._layers[0], keras.layers.InputLayer),
add_input_layer)
self.assertEqual(model._is_graph_network, add_input_layer)
# With placeholder creation -- clone model should have an InputLayer
# if the original model has one.
new_model = clone_fn(model)
self.assertEqual(
isinstance(new_model._layers[0], keras.layers.InputLayer),
add_input_layer)
self.assertEqual(new_model._is_graph_network, model._is_graph_network)
if input_shape:
# update ops from batch norm needs to be included
self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
# On top of new tensor -- clone model should always have an InputLayer.
input_a = keras.Input(shape=(4,))
new_model = clone_fn(model, input_tensors=input_a)
self.assertIsInstance(new_model._layers[0], keras.layers.InputLayer)
self.assertTrue(new_model._is_graph_network)
# On top of new, non-Keras tensor -- clone model should always have an
# InputLayer.
if not context.executing_eagerly():
# TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error
# saying they should not be used with EagerTensors
input_a = keras.backend.variable(val_a)
new_model = clone_fn(model, input_tensors=input_a)
self.assertIsInstance(new_model._layers[0], keras.layers.InputLayer)
self.assertTrue(new_model._is_graph_network)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'clone_weights', 'share_weights': False},
{'testcase_name': 'share_weights', 'share_weights': True},
])
def test_clone_functional_model(self, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_functional_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_a = keras.layers.BatchNormalization()(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
# With placeholder creation
new_model = clone_fn(model)
self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
input_a = keras.Input(shape=(4,), name='a')
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new, non-Keras tensors
if not context.executing_eagerly():
# TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error
# saying they should not be used with EagerTensors
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = clone_fn(model, input_tensors=[input_a, input_b])
self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch(None, val_out)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'clone_weights', 'share_weights': False},
{'testcase_name': 'share_weights', 'share_weights': True},
])
def test_clone_functional_with_masking(self, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_functional_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
x = np.array([[[1.], [1.]], [[0.], [0.]]])
inputs = keras.Input((2, 1))
outputs = keras.layers.Masking(mask_value=0)(inputs)
outputs = keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one'))(outputs)
model = keras.Model(inputs, outputs)
model = clone_fn(model)
model.compile(
loss='mse',
optimizer=testing_utils.get_v2_optimizer('adam'),
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.)
def test_model_cloning_invalid_use_cases(self):
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(4, input_shape=(4,)))
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
fn_model = keras.models.Model(x, y)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(seq_model)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(None)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(fn_model)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=[x, x])
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=y)
def test_functional_cloning_does_not_create_unnecessary_placeholders(self):
with ops.Graph().as_default():
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
graph = ops.Graph()
with graph.as_default():
x = array_ops.ones((10, 4))
_ = keras.models.clone_model(model, input_tensors=[x])
has_placeholder = _has_placeholder(graph)
self.assertFalse(has_placeholder)
def test_sequential_cloning_does_not_create_unnecessary_placeholders(self):
with ops.Graph().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
graph = ops.Graph()
with graph.as_default():
x = array_ops.ones((10, 4))
_ = keras.models.clone_model(model, input_tensors=[x])
has_placeholder = _has_placeholder(graph)
self.assertFalse(has_placeholder)
def _has_placeholder(graph):
ops_types = [op.type for op in graph.get_operations()]
return any('Placeholder' in s for s in ops_types)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CheckpointingTests(keras_parameterized.TestCase):
def test_optimizer_dependency(self):
model = _get_model()
opt = adam.AdamOptimizer(.01)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(
x=np.array([[1., 2., 3., 4.]]),
y=np.array([[1., 1., 1., 1.]]),
epochs=2)
save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
beta1_power, _ = opt._get_beta_accumulators()
self.evaluate(beta1_power.assign(12.))
model.save_weights(save_prefix)
self.evaluate(beta1_power.assign(13.))
model.load_weights(save_prefix)
self.assertEqual(12., self.evaluate(beta1_power))
@keras_parameterized.run_all_keras_modes
class TestModelBackend(keras_parameterized.TestCase):
def test_model_backend_float64_use_cases(self):
# Test case for GitHub issue 19318
floatx = keras.backend.floatx()
keras.backend.set_floatx('float64')
x = keras.Input((5,))
y = keras.layers.Dense(1)(x)
model = keras.models.Model(x, y)
model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
keras.backend.set_floatx(floatx)
class TestCloneAndBuildModel(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_non_compiled_model(self):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model = _get_model()
with self.assertRaisesRegexp(ValueError, 'has not been compiled'):
models.clone_and_build_model(model, compile_clone=True)
is_subclassed = (testing_utils.get_model_type() == 'subclass')
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=False, in_place_reset=is_subclassed)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.evaluate(inp, out)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.train_on_batch(inp, out)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch(inp, out)
# Create new tensors for inputs and targets
input_a = keras.Input(shape=(4,))
target_a = keras.Input(shape=(4,))
new_model = models.clone_and_build_model(
model, input_tensors=input_a, target_tensors=[target_a],
compile_clone=False, in_place_reset=is_subclassed)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.evaluate(inp, out)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.train_on_batch(inp, out)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch(inp, out)
def _assert_same_compile_params(self, model):
"""Assert that two models have the same compile parameters."""
self.assertEqual('mse', model.loss)
self.assertTrue(
isinstance(model.optimizer,
(keras.optimizers.RMSprop,
keras.optimizer_v2.rmsprop.RMSprop)))
self.assertEqual(['acc', metrics.categorical_accuracy],
model._compile_metrics)
def _clone_and_build_test_helper(self, model, model_type):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
is_subclassed = (model_type == 'subclass')
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=True, in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
# Create new tensors for inputs and targets
input_a = keras.Input(shape=(4,), name='a')
new_model = models.clone_and_build_model(
model, input_tensors=input_a, compile_clone=True,
in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
target_a = keras.Input(shape=(4,), name='b')
new_model = models.clone_and_build_model(
model, input_tensors=input_a, target_tensors=[target_a],
compile_clone=True, in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_compiled(self):
model = _get_model()
model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self._clone_and_build_test_helper(model, testing_utils.get_model_type())
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_sequential_without_inputs_defined(self):
model = models.Sequential(_get_layers(input_shape=None))
model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self._clone_and_build_test_helper(model, 'sequential')
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model.train_on_batch(inp, out)
self._clone_and_build_test_helper(model, 'sequential')
def assert_optimizer_iterations_increases(self, optimizer):
model = _get_model()
model.compile(
optimizer,
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
global_step = keras.backend.variable(123, dtype=dtypes.int64)
clone_model = models.clone_and_build_model(
model, compile_clone=True, optimizer_iterations=global_step,
in_place_reset=(testing_utils.get_model_type() == 'subclass'))
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
clone_model.train_on_batch(inp, out)
self.assertEqual(K.eval(global_step), 124)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_replace_tf_optimizer_iterations_variable(self):
self.assert_optimizer_iterations_increases(adam.AdamOptimizer(0.01))
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_replace_keras_optimizer_iterations_variable(self):
if testing_utils.should_run_eagerly():
# This needs to be updated to run with v2 optimizers.
self.skipTest('b/120991591')
self.assert_optimizer_iterations_increases('adam')
def test_clone_optimizer_in_different_graph(self):
with ops.Graph().as_default():
with self.session():
model = testing_utils.get_small_sequential_mlp(3, 4)
optimizer = keras.optimizer_v2.adam.Adam()
model.compile(
optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy],
)
model.fit(
x=np.array([[1., 2., 3., 4.]]),
y=np.array([[1., 1., 1., 1.]]),
epochs=1)
optimizer_config = optimizer.get_config()
with ops.Graph().as_default():
with self.session():
with self.assertRaisesRegexp(ValueError,
'Cannot use the given session'):
models.clone_and_build_model(model, compile_clone=True)
# The optimizer_config object allows the model to be cloned in a
# different graph.
models.clone_and_build_model(model, compile_clone=True,
optimizer_config=optimizer_config)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/models_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import sys
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tfdev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
py_any = any
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping {graph: set_of_freezable_variables}.
# Each set tracks objects created via `freezable_variable` in the graph.
_FREEZABLE_VARS = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
_DUMMY_EAGER_GRAPH = threading.local()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from tensorflow.keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer/optimizer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = get_graph()
if graph not in PER_GRAPH_OBJECT_NAME_UIDS:
PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
PER_GRAPH_OBJECT_NAME_UIDS.clear()
@keras_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
global _GRAPH
global _FREEZABLE_VARS
_GRAPH = None
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
with name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[graph] = phase
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
_FREEZABLE_VARS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.compat.v1.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
graph = ops.get_default_graph()
if graph is _GRAPH:
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
learning_phase = symbolic_learning_phase()
_mark_func_graph_as_unsaveable(graph, learning_phase)
return learning_phase
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
learning_phase = symbolic_learning_phase()
_mark_func_graph_as_unsaveable(graph, learning_phase)
return learning_phase
def global_learning_phase_is_set():
return _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES
def _mark_func_graph_as_unsaveable(graph, learning_phase):
"""Mark func graph as unsaveable due to use of symbolic keras learning phase.
Functions that capture the symbolic learning phase cannot be exported to
SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised
if it is exported.
Args:
graph: Graph or FuncGraph object.
learning_phase: Learning phase placeholder or int defined in the graph.
"""
if graph.building_function and is_placeholder(learning_phase):
graph.mark_as_unsaveable(
'The keras learning phase placeholder was used inside a function. '
'Exporting placeholders is not supported when saving out a SavedModel. '
'Please call `tf.keras.backend.set_learning_phase(0)` in the function '
'to set the learning phase to a constant value.')
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
with name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
try:
set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_eager_value
elif _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager / tf.function only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert ops.executing_eagerly_outside_functions()
global_learning_phase_was_set = global_learning_phase_is_set()
if global_learning_phase_was_set:
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
yield
finally:
# Restore learning phase to initial value or unset.
if global_learning_phase_was_set:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value
else:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and
_CURRENT_SCRATCH_GRAPH is not graph):
raise ValueError('Multiple scratch graphs specified.')
if _CURRENT_SCRATCH_GRAPH:
yield _CURRENT_SCRATCH_GRAPH
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH = None
@keras_export(v1=['keras.backend.set_session'])
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if os.environ.get('OMP_NUM_THREADS'):
logging.warning(
'OMP_NUM_THREADS is no longer used by the default Keras config. '
'To configure the number of threads, use tf.config.threading APIs.')
config = context.context().config
config.allow_soft_placement = True
return config
def get_default_graph_uid_map():
graph = ops.get_default_graph()
name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections.defaultdict(int)
PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map
return name_uid_map
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if tfdev.is_device_spec(device):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return tfdev.DeviceSpec.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [name for name in context.list_devices() if 'GPU' in name]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
@keras_export('keras.backend.name_scope', v1=[])
def name_scope(name):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
# Define some computation that uses `a`.
return foo_op(..., name=scope)
```
When executed, the Tensor `a` will have the name `MyOp/a`.
Args:
name: The prefix to use on all names created within the name scope.
Returns:
Name scope context manager.
"""
return ops.name_scope_v2(name)
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> import numpy as np
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = variables_module.Variable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
graph = get_graph()
optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet())
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
if graph not in _GRAPH_VARIABLES:
_GRAPH_VARIABLES[graph] = object_identity.ObjectIdentityWeakSet()
_GRAPH_VARIABLES[graph].add(v)
def unique_object_name(name,
name_uid_map=None,
avoid_names=None,
namespace='',
zero_based=False):
"""Makes a object name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
```python
_unique_layer_name('dense') # dense_1
_unique_layer_name('dense') # dense_2
```
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet())
for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()):
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> import tensorflow as tf
>>> import numpy
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.compat.v1.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
ragged=False):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
ragged: Boolean, whether the placeholder should have a ragged type.
In this case, values of 'None' in the 'shape' argument represent
ragged dimensions. For more information about RaggedTensors, see this
[guide](https://www.tensorflow.org/guide/ragged_tensors).
Raises:
ValueError: If called with eager execution
ValueError: If called with sparse = True and ragged = True.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True when creating a placeholder.'
)
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
elif ragged:
ragged_rank = 0
for i in range(1, len(shape)):
if shape[i] is None:
ragged_rank += 1
else:
break
value_shape = shape[(ragged_rank + 1):]
x = ragged_factory_ops.placeholder(
dtype=dtype,
ragged_rank=ragged_rank,
value_shape=value_shape,
name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
if isinstance(x, composite_tensor.CompositeTensor):
flat_components = nest.flatten(x, expand_composites=True)
return py_any(is_placeholder(c) for c in flat_components)
else:
return x.op.type == 'Placeholder'
except AttributeError:
return False
def freezable_variable(value, shape=None, name=None):
"""A tensor-like object whose value can be updated only up until execution.
After creating the freezable variable, you can update its value by calling
`var.update_value(new_value)` (similar to a regular variable).
Unlike an actual variable, the value used during execution is the current
value at the time the execution function (`backend.function()`) was created.
This is an internal API, expected to be temporary. It is used to implement a
mutable `trainable` property for `BatchNormalization` layers, with a frozen
value after model compilation.
We don't use a plain variable in this case because we need the value used
in a specific model to be frozen after `compile` has been called
(e.g. GAN use case).
Arguments:
value: The initial value for the tensor-like object.
shape: The shape for the tensor-like object (cannot be changed).
name: The name for the tensor-like object.
Returns:
A tensor-like object with a static value that can be updated via
`x.update_value(new_value)`, up until creating an execution function
(afterwards the value is fixed).
"""
graph = get_graph()
with graph.as_default():
x = array_ops.placeholder_with_default(
value, shape=shape, name=name)
x._initial_value = value
x._current_value = value
def update_value(new_value):
x._current_value = new_value
def get_value():
return x._current_value
x.update_value = update_value
x.get_value = get_value
global _FREEZABLE_VARS
if graph not in _FREEZABLE_VARS:
_FREEZABLE_VARS[graph] = object_identity.ObjectIdentityWeakSet()
_FREEZABLE_VARS[graph].add(x)
return x
@keras_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32'
```
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple or list of integers, shape of returned Keras variable
dtype: data type of returned Keras variable
name: name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
from tensorflow.keras import backend as K
kvar = K.zeros((3,4))
K.eval(kvar)
# array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.],
# [ 0., 0., 0., 0.]], dtype=float32)
A = tf.constant([1,2,3])
kvar2 = K.zeros(A.shape) # [0., 0., 0.] float32 by default
kvar3 = K.zeros(A.shape,dtype=tf.int32) # [0, 0, 0] with int32 dtype
kvar4 = K.zeros([2,3]) # [[0., 0., 0.], [0., 0., 0.]]
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: dtype of returned Keras variable.
`None` uses the dtype of `x`.
name: name for the variable to create.
Returns:
A Keras variable with the shape of `x` filled with zeros.
Example:
```python
from tensorflow.keras import backend as K
kvar = K.variable(np.random.random((2,3)))
kvar_zeros = K.zeros_like(kvar)
K.eval(kvar_zeros)
# array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Examples:
Cast a float32 variable to a float64 tensor
```python
>>> import tensorflow as tf
>>> from tensorflow.keras import backend as K
>>> input = K.ones(shape=(1,3))
>>> print(input)
>>> cast_input = K.cast(input, dtype='float64')
>>> print(cast_input)
<tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,
numpy=array([[1., 1., 1.]], dtype=float32)>
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
# `training` is higher-up than the Keras backend in the abstraction hierarchy.
# In particular, `training` depends on layers, and thus on Keras.
# moving_averages, being low-level ops, should not be part of the training
# module.
from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top
zero_debias = not tf2.enabled()
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=zero_debias)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x_ndim - 1, y_ndim - 2]
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@keras_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _constant_to_tensor(min_value, x.dtype.base_dtype)
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor with the element wise maximum value(s) of `x` and `y`.
Examples:
```python
# maximum of two tensors
>>> x = tf.Variable([[1, 2], [3, 4]])
>>> y = tf.Variable([[2, 1], [0, -1]])
>>> m = tf.keras.backend.maximum(x, y)
>>> m
<tf.Tensor: id=42, shape=(2, 2), dtype=int32, numpy=
array([[2, 2],
[3, 4]], dtype=int32)>
```
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
Example:
```python
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]])
>>> tf.keras.backend.concatenate((a, b), axis=-1)
<tf.Tensor: id=14, shape=(3, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 10, 20, 30],
[ 4, 5, 6, 40, 50, 60],
[ 7, 8, 9, 70, 80, 90]], dtype=int32)>
```
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
Example:
```python
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
>>> a
<tf.Tensor: id=32, shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.keras.backend.reshape(a, shape=(2, 6))
<tf.Tensor: id=35, shape=(2, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12]], dtype=int32)>
```
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
Example:
```python
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
>>> a
<tf.Tensor: id=49, shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))
<tf.Tensor: id=52, shape=(3, 4), dtype=int32, numpy=
array([[ 1, 4, 7, 10],
[ 2, 5, 8, 11],
[ 3, 6, 9, 12]], dtype=int32)>
```
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_images_v2(
x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)
elif interpolation == 'bilinear':
x = image_ops.resize_images_v2(x, new_shape,
method=image_ops.ResizeMethod.BILINEAR)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
Example:
```python
>>> b = tf.constant([1, 2, 3])
>>> tf.keras.backend.repeat_elements(b, rep=2, axis=0)
<tf.Tensor: id=70, shape=(6,), dtype=int32,
numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)>
```
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
Example:
```python
>>> b = tf.constant([[1, 2], [3, 4]])
>>> b
<tf.Tensor: id=78, shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4]], dtype=int32)>
>>> tf.keras.backend.repeat(b, n=2)
<tf.Tensor: id=82, shape=(2, 2, 2), dtype=int32, numpy=
array([[[1, 2],
[1, 2]],
[[3, 4],
[3, 4]]], dtype=int32)>
```
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
Example:
```python
>>> tf.keras.backend.arange(start=0, stop=10, step=1.5)
<tf.Tensor: id=96, shape=(7,), dtype=float32,
numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)>
```
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
Example:
```python
>>> b = tf.constant([[1, 2], [3, 4]])
>>> b
<tf.Tensor: id=102, shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4]], dtype=int32)>
>>> tf.keras.backend.flatten(b)
<tf.Tensor: id=105, shape=(4,), dtype=int32,
numpy=array([1, 2, 3, 4], dtype=int32)>
```
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
Examples:
Flattening a 3D tensor to 2D by collapsing the last dimension.
```python
>>> from tensorflow.keras import backend as K
>>> x_batch = K.ones(shape=(2, 3, 4, 5))
>>> x_batch_flatten = K.batch_flatten(x_batch)
>>> K.int_shape(x_batch_flatten)
(2, 60)
```
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
Example:
```python
>>> a = tf.constant([[1, 2],[3, 4]])
>>> b = tf.constant([[10, 20],[30, 40]])
>>> tf.keras.backend.stack((a, b))
<tf.Tensor: id=146, shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 1, 2],
[ 3, 4]],
[[10, 20],
[30, 40]]], dtype=int32)>
```
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
if not tensor_util.is_tensor(x):
return x
if context.executing_eagerly():
return x.numpy()
if not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
if ops.executing_eagerly_outside_functions():
# This method of evaluating works inside the Keras FuncGraph.
return function([], x)(x)
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
# In order to support assigning weights to resizable variables in
# Keras, we make a placeholder with the correct number of dimensions
# but with None in each dimension. This way, we can assign weights
# of any size (as long as they have the correct dimensionality).
placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)
assign_placeholder = array_ops.placeholder(
tf_dtype, shape=placeholder_shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
# In order to support assigning weights to resizable variables in
# Keras, we make a placeholder with the correct number of dimensions
# but with None in each dimension. This way, we can assign weights
# of any size (as long as they have the correct dimensionality).
placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)
assign_placeholder = array_ops.placeholder(
tf_dtype, shape=placeholder_shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@keras_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
if isinstance(x, ops.Tensor) and hasattr(x, 'graph'):
with get_graph().as_default():
op = logging_ops.print_v2(message, x, output_stream=sys.stdout)
with ops.control_dependencies([op]):
return array_ops.identity(x)
else:
logging_ops.print_v2(message, x, output_stream=sys.stdout)
return x
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self._inputs_structure = inputs
self.inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(
nest.flatten(outputs, expand_composites=True))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def _eval_if_composite(self, tensor):
"""Helper method which evaluates any CompositeTensors passed to it."""
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
if isinstance(tensor, composite_tensor.CompositeTensor):
return self._session.run(tensor)
else:
return tensor
def __call__(self, inputs):
inputs = nest.flatten(inputs, expand_composites=True)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
output_structure = nest.pack_sequence_as(
self._outputs_structure,
fetched[:len(self.outputs)],
expand_composites=True)
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
return nest.map_structure(self._eval_if_composite, output_structure)
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
self.name = name
self._inputs_structure = inputs
inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
outputs = nest.flatten(outputs, expand_composites=True)
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
if updates and not outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
graphs = {
i.graph
for i in nest.flatten([inputs, outputs, updates])
if hasattr(i, 'graph')
}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
global_graph = get_graph()
updates_ops = []
legacy_update_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op). Otherwise it
# is assumed to already be an op -- we cannot control its execution
# order.
if isinstance(update, tuple):
legacy_update_ops.append(update)
else:
if hasattr(update, 'op'):
update = update.op
if update is not None:
# `update.op` may have been None in certain cases.
updates_ops.append(update)
self._freezable_vars_to_feed = []
self._freezable_vars_values = []
freezable_vars_from_keras_graph = object_identity.ObjectIdentitySet(
_FREEZABLE_VARS.get(global_graph, {}))
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = (
outputs + updates_ops + [p for [p, _] in legacy_update_ops] +
[p_new for [_, p_new] in legacy_update_ops
if isinstance(p_new, ops.Tensor)])
lifted_map = lift_to_graph.lift_to_graph(
tensors=init_tensors,
graph=exec_graph,
sources=inputs,
add_sources=True,
handle_captures=True,
base_graph=source_graph)
inputs = [lifted_map[i] for i in inputs]
outputs = [lifted_map[i] for i in outputs]
updates_ops = [lifted_map[i] for i in updates_ops]
legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))
for p, p_new in legacy_update_ops]
# Keep track of the value to feed to any "freezable variables"
# created in this graph.
for old_op, new_op in lifted_map.items():
if old_op in freezable_vars_from_keras_graph:
frozen_var = old_op
if frozen_var._initial_value != frozen_var._current_value:
# We only feed a frozen_variable if its value has changed;
# otherwise it can rely on the default value of the
# underlying placeholder_with_default.
self._freezable_vars_to_feed.append(new_op)
self._freezable_vars_values.append(frozen_var._current_value)
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
with ops.control_dependencies(outputs):
for p, p_new in legacy_update_ops:
updates_ops.append(state_ops.assign(p, p_new))
self.inputs, self.outputs = inputs, outputs
self._input_references = self.inputs + self._freezable_vars_to_feed
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
exec_graph.inputs = self._input_references + exec_graph.internal_captures
exec_graph.outputs = self.outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = len(self._input_references)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with exec_graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[ops.tensor_id(
x)] = tensor_util.constant_value(x.op.inputs[0])
def __call__(self, inputs):
input_values = nest.flatten(inputs, expand_composites=True)
if self._freezable_vars_values:
input_values = input_values + self._freezable_vars_values
converted_inputs = []
for tensor, value in zip(self._input_references, input_values):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(
ops.tensor_id(tensor), None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
if not isinstance(value, ops.Tensor):
value = ops.convert_to_tensor(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
# EagerTensor.numpy() will often make a copy to ensure memory safety.
# However in this case `outputs` is not directly returned, so it is always
# safe to reuse the underlying buffer without checking. In such a case the
# private numpy conversion method is preferred to guarantee performance.
return nest.pack_sequence_as(
self._outputs_structure,
[x._numpy() for x in outputs], # pylint: disable=protected-access
expand_composites=True)
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: If specified, assume time dimension is of this length.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
assert not nest.is_sequence(mask_t)
assert not nest.is_sequence(input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = _expand_mask(mask_t, new_state)
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output))
outputs = array_ops.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs))
else:
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants))
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
element_shape=out.shape,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': input_length,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output)
flat_new_output = tuple(
array_ops.where(m, o, zo) for m, o, zo in zip(
tiled_mask_t, flat_output, flat_mask_output))
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state)
flat_final_state = tuple(
array_ops.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state))
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if isinstance(output_, ops.Tensor):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
# TODO(b/138862903): Handle the case when training is tensor.
if not tensor_util.is_tensor(training):
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
Example:
```python:
import tensorflow as tf
from tensorflow.keras import backend as K
a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])
print("a: ", a)
b = tf.constant([.9, .05, .05, .5, .89, .6, .05, .01, .94], shape=[3,3])
print("b: ", b)
loss = K.categorical_crossentropy(a, b)
print('Loss: ', loss) #Loss: tf.Tensor([0.10536055 0.8046684 0.06187541], shape=(3,), dtype=float32)
loss = K.categorical_crossentropy(a, a)
print('Loss: ', loss) #Loss: tf.Tensor([1.1920929e-07 1.1920929e-07 1.1920929e-07], shape=(3,), dtype=float32)
```
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(
labels=target, logits=output, axis=axis)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
if isinstance(output.shape, (tuple, list)):
output_rank = len(output.shape)
else:
output_rank = output.shape.ndims
if output_rank is not None:
axis %= output_rank
if axis != output_rank - 1:
permutation = list(
itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))
output = array_ops.transpose(output, perm=permutation)
elif axis != -1:
raise ValueError(
'Cannot compute sparse categorical crossentropy with `axis={}` on an '
'output tensor with unknown rank'.format(axis))
target = cast(target, 'int64')
# Try to adjust the shape so that rank of labels = 1 - rank of logits.
output_shape = array_ops.shape_v2(output)
target_rank = target.shape.ndims
update_shape = (
target_rank is not None and output_rank is not None and
target_rank != output_rank - 1)
if update_shape:
target = flatten(target)
output = array_ops.reshape(output, [-1, output_shape[-1]])
if py_any([_is_symbolic_tensor(v) for v in [target, output]]):
with get_graph().as_default():
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
else:
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
if update_shape and output_rank >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, output_shape[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Sigmoid'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
else:
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
@keras_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.mul(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend([slice(position[d] * strides[d],
position[d] * strides[d] + kernel_size[d])
for d in spatial_dimensions])
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
# pylint: disable=g-no-augmented-assignment
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = nn.bias_add(x, bias, data_format='NCHW')
else:
x = x + reshape(bias, (1, bias_shape[0], 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1))
else:
x = x + reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
# pylint: enable=g-no-augmented-assignment
return x
# RANDOMNESS
@keras_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
The binomial distribution with parameters `n` and `p` is the probability
distribution of the number of successful Bernoulli process. Only supports
`n` = 1 for now.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(old_input, current_input):
return array_ops.expand_dims(
math_ops.range(array_ops.shape(old_input)[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if distribution_strategy._in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return (strategy is not None and
strategy.__class__.__name__.startswith('TPUStrategy'))
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
def _is_symbolic_tensor(x):
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/backend.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Model subclassing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import model_subclassing_test_util as model_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import data_structures
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
@keras_parameterized.run_all_keras_modes
class ModelSubclassingTest(keras_parameterized.TestCase):
def test_custom_build(self):
class DummyModel(keras.Model):
def __init__(self):
super(DummyModel, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.uses_custom_build = False
def call(self, inputs):
return self.dense1(inputs)
def build(self, input_shape):
self.uses_custom_build = True
test_model = DummyModel()
dummy_data = array_ops.ones((32, 50))
test_model(dummy_data)
self.assertTrue(test_model.uses_custom_build, 'Model should use user '
'defined build when called.')
def test_attribute_conflict_error(self):
class ModelWithProperty(keras.Model):
@property
def read_only(self):
return 1.
m = ModelWithProperty()
with self.assertRaisesRegexp(AttributeError, 'read_only'):
m.read_only = 2.
def test_custom_build_with_fit(self):
class DummyModel(keras.Model):
def __init__(self):
super(DummyModel, self).__init__()
self.layer1 = keras.layers.Dense(10, activation='relu')
def build(self, input_shape):
self.layer2 = keras.layers.Dense(1, activation='relu')
def call(self, inputs):
return self.layer2(self.layer1(inputs))
model = DummyModel()
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=2)
self.assertLen(model.layers, 2)
self.assertLen(model.trainable_variables, 4)
def test_invalid_input_shape_build(self):
num_classes = 2
input_dim = 50
model = model_util.SimpleTestModel(
num_classes=num_classes, use_dp=True, use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'input shape is not one of the valid types'):
model.build(input_shape=tensor_shape.Dimension(input_dim))
def test_embed_dtype_with_subclass_build(self):
class Embedding(keras.layers.Layer):
"""An Embedding layer."""
def __init__(self, vocab_size, embedding_dim, **kwargs):
super(Embedding, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
def build(self, _):
self.embedding = self.add_variable(
'embedding_kernel',
shape=[self.vocab_size, self.embedding_dim],
dtype=np.float32,
initializer=init_ops.random_uniform_initializer(-0.1, 0.1),
trainable=True)
def call(self, x):
return embedding_ops.embedding_lookup(self.embedding, x)
class EmbedModel(keras.Model):
def __init__(self, vocab_size, embed_size):
super(EmbedModel, self).__init__()
self.embed1 = Embedding(vocab_size, embed_size)
def call(self, inputs):
return self.embed1(inputs)
model = EmbedModel(100, 20)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'if your layers do not support float type inputs'):
model.build(input_shape=(35, 20))
def test_single_time_step_rnn_build(self):
dim = 4
timesteps = 1
batch_input_shape = (None, timesteps, dim)
units = 3
class SimpleRNNModel(keras.Model):
def __init__(self):
super(SimpleRNNModel, self).__init__()
self.lstm = keras.layers.LSTM(units)
def call(self, inputs):
return self.lstm(inputs)
model = SimpleRNNModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(batch_input_shape)
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, timesteps, dim)))
def test_single_io_subclass_build(self):
num_classes = 2
input_dim = 50
batch_size = None
model = model_util.SimpleTestModel(
num_classes=num_classes, use_dp=True, use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(input_shape=(batch_size, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, input_dim)))
def test_single_io_dimension_subclass_build(self):
num_classes = 2
input_dim = tensor_shape.Dimension(50)
batch_size = tensor_shape.Dimension(None)
model = model_util.SimpleTestModel(
num_classes=num_classes, use_dp=True, use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(input_shape=(batch_size, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, input_dim)))
def test_multidim_io_subclass_build(self):
num_classes = 10
# Input size, e.g. image
batch_size = 32
input_shape = (32, 32, 3)
model = model_util.SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
batch_input_shape = (batch_size,) + input_shape
model.build(input_shape=batch_input_shape)
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones(batch_input_shape))
def test_tensorshape_io_subclass_build(self):
num_classes = 10
# Input size, e.g. image
batch_size = None
input_shape = (32, 32, 3)
model = model_util.SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32,) + input_shape))
def test_subclass_save_model(self):
num_classes = 10
# Input size, e.g. image
batch_size = None
input_shape = (32, 32, 3)
model = model_util.SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
weights = model.get_weights()
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = model_util.SimpleConvTestModel(num_classes)
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
if h5py is not None:
model.load_weights(hdf5_format_name)
self.assertAllClose(weights, model.get_weights())
model.load_weights(tf_format_name)
self.assertAllClose(weights, model.get_weights())
def test_multi_io_subclass_build(self):
batch_size = None
num_samples = 1000
input_dim = 50
model = model_util.MultiIOTestModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
batch_input_shape = tensor_shape.TensorShape((batch_size, input_dim))
model.build(
input_shape=[batch_input_shape, batch_input_shape])
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
model([x1, x2])
def test_summary(self):
class ToString(object):
def __init__(self):
self.contents = ''
def __call__(self, msg):
self.contents += msg + '\n'
# Single-io
model = model_util.SimpleTestModel(num_classes=4, use_bn=True, use_dp=True)
model._set_inputs(np.ones((3, 4))) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 356' in print_fn.contents)
# Multi-io
model = model_util.MultiIOTestModel(
num_classes=(5, 6), use_bn=True, use_dp=True)
model._set_inputs([np.ones((3, 4)),
np.ones((3, 4))]) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 587' in print_fn.contents)
def test_no_dependency(self):
class Foo(keras.Model):
def __init__(self):
super(Foo, self).__init__()
self.isdep = keras.layers.Dense(1)
self.notdep = data_structures.NoDependency(keras.layers.Dense(2))
self.notdep_var = data_structures.NoDependency(
resource_variable_ops.ResourceVariable(1., name='notdep_var'))
m = Foo()
self.assertEqual([m.isdep, m.notdep], m.layers)
self.assertEqual(1, len(m._checkpoint_dependencies))
self.assertIs(m.isdep, m._checkpoint_dependencies[0].ref)
self.assertEqual('notdep_var:0', m.notdep_var.name)
def test_extra_variable(self):
class ExtraVar(keras.Model):
def __init__(self):
super(ExtraVar, self).__init__()
self.dense = keras.layers.Dense(1)
self.var = resource_variable_ops.ResourceVariable(1.)
self.not_trainable_var = resource_variable_ops.ResourceVariable(
2., trainable=False)
def call(self, inputs):
return self.dense(inputs + self.var)
m = ExtraVar()
self.assertTrue(m.trainable)
self.assertEqual([m.dense], m.layers)
self.assertEqual([m.var, m.not_trainable_var], m.variables)
self.assertEqual([m.var], m.trainable_variables)
self.assertEqual([m.not_trainable_var], m.non_trainable_variables)
self.assertLen(m.get_weights(), 2)
m.trainable = False
self.assertEqual([m.var, m.not_trainable_var], m.variables)
self.assertEqual([], m.trainable_variables)
self.assertEqual([m.var, m.not_trainable_var], m.non_trainable_variables)
self.assertLen(m.get_weights(), 2)
m.trainable = True
m(array_ops.ones([1, 1]))
self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.variables)
self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.weights)
self.assertLen(m.get_weights(), 4)
self.assertEqual([m.dense.kernel, m.dense.bias, m.var, m.not_trainable_var],
m.variables)
self.assertEqual([m.dense.kernel, m.dense.bias, m.var],
m.trainable_variables)
self.assertEqual([m.not_trainable_var], m.non_trainable_variables)
m.dense.trainable = False
self.assertEqual(
[m.dense.kernel, m.dense.bias, m.var, m.not_trainable_var],
m.variables)
self.assertEqual([m.var], m.trainable_variables)
self.assertEqual([m.dense.kernel, m.dense.bias, m.not_trainable_var],
m.non_trainable_variables)
self.assertLen(m.get_weights(), 4)
def test_add_weight_in_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,), trainable=False)
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
self.assertEqual(1, len(model.trainable_weights))
self.assertEqual(1, len(model.non_trainable_weights))
self.assertEqual(2, len(model.weights))
class MyModelCustomBuild(keras.Model):
def build(self, input_shape):
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,), trainable=False)
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModelCustomBuild()
model(x)
self.assertEqual(1, len(model.trainable_weights))
self.assertEqual(1, len(model.non_trainable_weights))
self.assertEqual(2, len(model.weights))
def test_add_update_in_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,))
def call(self, inputs):
# Unconditional
self.add_update(self.b.assign(self.b * 2))
# Conditional
self.add_update(self.c.assign(inputs[1, :]), inputs)
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
if context.executing_eagerly():
self.assertEqual(0, len(model.updates))
else:
self.assertEqual(2, len(model.updates))
self.assertEqual(1, len(model.get_updates_for(None)))
self.assertEqual(1, len(model.get_updates_for(x)))
class GraphSpecificModelSubclassingTests(test.TestCase):
@test_util.run_deprecated_v1
def test_single_io_workflow_with_tensors(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = model_util.SimpleTestModel(
num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_deprecated_v1
def test_multi_io_workflow_with_tensors(self):
num_classes = (2, 3)
num_samples = 10
input_dim = 50
with self.cached_session():
model = model_util.MultiIOTestModel(
num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
y1 = array_ops.zeros((num_samples, num_classes[0]))
y2 = array_ops.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_deprecated_v1
def test_updates_and_losses_for_nested_models_in_subclassed_model(self):
# Case 1: deferred-build sequential nested in subclass.
class TestModel1(keras.Model):
def __init__(self):
super(TestModel1, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = keras.Sequential([keras.layers.BatchNormalization(axis=1)])
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel1()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
# Case 2: placeholder-sequential nested in subclass.
class TestModel2(keras.Model):
def __init__(self):
super(TestModel2, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = keras.Sequential(
[keras.layers.BatchNormalization(axis=1, input_shape=(10,))])
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel2()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
# Case 3: functional-API model nested in subclass.
inputs = keras.Input((10,))
outputs = keras.layers.BatchNormalization(axis=1)(inputs)
bn = keras.Model(inputs, outputs)
class TestModel3(keras.Model):
def __init__(self):
super(TestModel3, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = bn
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel3()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
@test_util.run_deprecated_v1
def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
with self.cached_session():
model = model_util.MultiIOTestModel(
num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
x2_placeholder = array_ops.placeholder(
dtype='float32', shape=(None, input_dim))
model._set_inputs([x1, x2_placeholder])
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
@test_util.run_all_in_graph_and_eager_modes
class CustomCallSignatureTests(test.TestCase):
def test_no_inputs_in_signature(self):
model = model_util.CustomCallModel()
first = array_ops.ones([2, 3])
second = array_ops.ones([2, 5])
output = model(first, second)
self.evaluate([v.initializer for v in model.variables])
expected_output = self.evaluate(model.dense1(first) + model.dense2(second))
self.assertAllClose(expected_output, self.evaluate(output))
output = model(first, second, fiddle_with_output='yes')
self.assertAllClose(10. * expected_output, self.evaluate(output))
output = model(first, second=second, training=False)
self.assertAllClose(expected_output, self.evaluate(output))
def test_training_args_call_build(self):
input_dim = 2
model = model_util.TrainingNoDefaultModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build((None, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
def test_training_and_mask_args_call_build(self):
input_dim = 2
model = model_util.TrainingMaskingModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build((None, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
def test_custom_call_kwargs_and_build(self):
first_input_shape = (2, 3)
second_input_shape = (2, 5)
model = model_util.CustomCallModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'cannot build your model if it has positional'):
model.build(input_shape=[first_input_shape, second_input_shape])
def test_kwargs_in_signature(self):
class HasKwargs(keras.Model):
def call(self, x, y=3, **kwargs):
return x
model = HasKwargs()
arg = array_ops.ones([1])
model(arg, a=3)
if not context.executing_eagerly():
self.assertEqual(len(model.inputs), 1)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def test_training_no_default(self):
if not context.executing_eagerly():
self.skipTest('b/138307499')
model = model_util.TrainingNoDefaultModel()
arg = array_ops.ones([1, 1])
model(arg, True)
def test_positional_arg_in_call(self):
class ModelWithPositionalArgs(keras.Model):
def call(self, x, x2, x3=None):
return x + x2
x = np.ones((10, 1))
y = np.ones((10, 1))
m = ModelWithPositionalArgs()
m.compile('sgd', 'mse')
with self.assertRaisesRegexp(ValueError, r'Models passed to `fit`'):
m.fit(x, y, batch_size=2)
with self.assertRaisesRegexp(ValueError, r'Models passed to `evaluate`'):
m.evaluate(x, y, batch_size=2)
with self.assertRaisesRegexp(ValueError, r'Models passed to `predict`'):
m.predict(x, batch_size=2)
with self.assertRaisesRegexp(ValueError,
r'Models passed to `train_on_batch`'):
m.train_on_batch(x, y)
with self.assertRaisesRegexp(ValueError,
r'Models passed to `test_on_batch`'):
m.test_on_batch(x, y)
with self.assertRaisesRegexp(ValueError,
r'Models passed to `predict_on_batch`'):
m.predict_on_batch(x)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/model_subclassing_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras testing_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import googletest
class KerasParameterizedTest(keras_parameterized.TestCase):
def test_run_with_all_model_types(self):
model_types = []
models = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types
def testBody(self):
model_types.append(testing_utils.get_model_type())
models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
e.testBody_functional()
e.testBody_subclass()
e.testBody_sequential()
self.assertLen(model_types, 3)
self.assertAllEqual(model_types, [
"functional",
"subclass",
"sequential"
])
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
self.assertIsInstance(models[2], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 6)
def test_run_with_all_model_types_and_extra_params(self):
model_types = []
models = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types
@parameterized.named_parameters(
[dict(testcase_name="_0", with_brackets=True),
dict(testcase_name="_1", with_brackets=False)])
def testBody(self, with_brackets):
with_brackets = "with_brackets" if with_brackets else "without_brackets"
model_types.append((with_brackets, testing_utils.get_model_type()))
models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
e.testBody_0_functional()
e.testBody_0_subclass()
e.testBody_0_sequential()
e.testBody_1_functional()
e.testBody_1_subclass()
e.testBody_1_sequential()
self.assertLen(model_types, 6)
self.assertAllEqual(model_types, [
("with_brackets", "functional"),
("with_brackets", "subclass"),
("with_brackets", "sequential"),
("without_brackets", "functional"),
("without_brackets", "subclass"),
("without_brackets", "sequential"),
])
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
self.assertIsInstance(models[2], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 12)
def test_run_with_all_model_types_exclude_one(self):
model_types = []
models = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types(exclude_models="sequential")
def testBody(self):
model_types.append(testing_utils.get_model_type())
models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
if hasattr(e, "testBody_functional"):
e.testBody_functional()
if hasattr(e, "testBody_subclass"):
e.testBody_subclass()
if hasattr(e, "testBody_sequential"):
e.testBody_sequential()
self.assertLen(model_types, 2)
self.assertAllEqual(model_types, [
"functional",
"subclass"
])
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 4)
def test_run_with_all_model_types_exclude_multiple(self):
model_types = []
models = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types(
exclude_models=["sequential", "functional"])
def testBody(self):
model_types.append(testing_utils.get_model_type())
models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
if hasattr(e, "testBody_functional"):
e.testBody_functional()
if hasattr(e, "testBody_subclass"):
e.testBody_subclass()
if hasattr(e, "testBody_sequential"):
e.testBody_sequential()
self.assertLen(model_types, 1)
self.assertAllEqual(model_types, [
"subclass"
])
# Validate that the models are what they should be
self.assertFalse(models[0]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 2)
def test_run_all_keras_modes(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function))
e = ExampleTest()
if not tf2.enabled():
e.testBody_v1_session()
e.testBody_v2_eager()
e.testBody_v2_funcgraph()
e.testBody_v2_function()
if not tf2.enabled():
self.assertLen(l, 4)
self.assertAllEqual(l, [
("graph", False, False),
("eager", True, True),
("eager", False, False),
("eager", False, True),
])
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, 8)
else:
self.assertLen(l, 3)
self.assertAllEqual(l, [
("eager", True, True),
("eager", False, False),
("eager", False, True),
])
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, 6)
def test_run_all_keras_modes_extra_params(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
[dict(testcase_name="_0", with_brackets=True),
dict(testcase_name="_1", with_brackets=False)])
def testBody(self, with_brackets):
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_tf_function = testing_utils.should_run_tf_function()
l.append(
(with_brackets, mode, should_run_eagerly, should_run_tf_function))
e = ExampleTest()
if not tf2.enabled():
e.testBody_0_v1_session()
e.testBody_1_v1_session()
e.testBody_0_v2_eager()
e.testBody_0_v2_funcgraph()
e.testBody_0_v2_function()
e.testBody_1_v2_eager()
e.testBody_1_v2_funcgraph()
e.testBody_1_v2_function()
expected_combinations = {
("with_brackets", "eager", True, True),
("with_brackets", "eager", False, False),
("with_brackets", "eager", False, True),
("without_brackets", "eager", True, True),
("without_brackets", "eager", False, False),
("without_brackets", "eager", False, True),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("with_brackets", "graph", False, False),
("without_brackets", "graph", False, False),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_always_skip_v1(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function))
e = ExampleTest()
if hasattr(e, "testBody_v1_session"):
e.testBody_v1_session()
if hasattr(e, "testBody_v2_eager"):
e.testBody_v2_eager()
if hasattr(e, "testBody_v2_funcgraph"):
e.testBody_v2_funcgraph()
if hasattr(e, "testBody_v2_function"):
e.testBody_v2_function()
self.assertLen(l, 3)
self.assertEqual(
set(l), {
("eager", True, True),
("eager", False, False),
("eager", False, True),
})
def test_run_all_keras_modes_with_all_model_types(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function,
testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_v2_eager_functional()
e.testBody_v2_funcgraph_functional()
e.testBody_v2_function_functional()
e.testBody_v2_eager_sequential()
e.testBody_v2_funcgraph_sequential()
e.testBody_v2_function_sequential()
e.testBody_v2_eager_subclass()
e.testBody_v2_funcgraph_subclass()
e.testBody_v2_function_subclass()
if not tf2.enabled():
e.testBody_v1_session_functional()
e.testBody_v1_session_sequential()
e.testBody_v1_session_subclass()
expected_combinations = {
("eager", True, True, "functional"),
("eager", False, False, "functional"),
("eager", False, True, "functional"),
("eager", True, True, "sequential"),
("eager", False, False, "sequential"),
("eager", False, True, "sequential"),
("eager", True, True, "subclass"),
("eager", False, False, "subclass"),
("eager", False, True, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, False, "functional"),
("graph", False, False, "sequential"),
("graph", False, False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_model_types_with_all_keras_modes(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function,
testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_functional_v2_eager()
e.testBody_functional_v2_funcgraph()
e.testBody_functional_v2_function()
e.testBody_sequential_v2_eager()
e.testBody_sequential_v2_funcgraph()
e.testBody_sequential_v2_function()
e.testBody_subclass_v2_eager()
e.testBody_subclass_v2_funcgraph()
e.testBody_subclass_v2_function()
if not tf2.enabled():
e.testBody_functional_v1_session()
e.testBody_sequential_v1_session()
e.testBody_subclass_v1_session()
expected_combinations = {
("eager", True, True, "functional"),
("eager", False, False, "functional"),
("eager", False, True, "functional"),
("eager", True, True, "sequential"),
("eager", False, False, "sequential"),
("eager", False, True, "sequential"),
("eager", True, True, "subclass"),
("eager", False, False, "subclass"),
("eager", False, True, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, False, "functional"),
("graph", False, False, "sequential"),
("graph", False, False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_with_all_model_types_annotate_class(self):
l = []
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@parameterized.named_parameters(dict(testcase_name="_arg",
arg=True))
def testBody(self, arg):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function,
testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_funcgraph_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_funcgraph_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_funcgraph_subclass()
e.testBody_arg_v2_function_subclass()
if not tf2.enabled():
e.testBody_arg_v1_session_functional()
e.testBody_arg_v1_session_sequential()
e.testBody_arg_v1_session_subclass()
expected_combinations = {
("eager", True, True, "functional"),
("eager", False, False, "functional"),
("eager", False, True, "functional"),
("eager", True, True, "sequential"),
("eager", False, False, "sequential"),
("eager", False, True, "sequential"),
("eager", True, True, "subclass"),
("eager", False, False, "subclass"),
("eager", False, True, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, False, "functional"),
("graph", False, False, "sequential"),
("graph", False, False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self):
l = []
@keras_parameterized.run_with_all_model_types
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(dict(testcase_name="_arg",
arg=True))
def testBody(self, arg):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function,
testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_funcgraph_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_funcgraph_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_funcgraph_subclass()
e.testBody_arg_v2_function_subclass()
if not tf2.enabled():
e.testBody_arg_v1_session_functional()
e.testBody_arg_v1_session_sequential()
e.testBody_arg_v1_session_subclass()
expected_combinations = {
("eager", True, True, "functional"),
("eager", False, False, "functional"),
("eager", False, True, "functional"),
("eager", True, True, "sequential"),
("eager", False, False, "sequential"),
("eager", False, True, "sequential"),
("eager", True, True, "subclass"),
("eager", False, False, "subclass"),
("eager", False, True, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, False, "functional"),
("graph", False, False, "sequential"),
("graph", False, False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
def test_run_all_keras_modes_extra_params_2(self, arg):
self.assertEqual(arg, True)
@keras_parameterized.run_with_all_model_types
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
def test_run_with_all_model_types_extra_params_2(self, arg):
self.assertEqual(arg, True)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/keras_parameterized_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras weights constraints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
def get_test_values():
return [0.1, 0.5, 3, 8, 1e-7]
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100. - 50.
example_array[0, 0] = 0. # 0 could possibly cause trouble
return example_array
def get_example_kernel(width):
np.random.seed(3537)
example_array = np.random.rand(width, width, 2, 2)
return example_array
@test_util.run_all_in_graph_and_eager_modes
class KerasConstraintsTest(test.TestCase):
def test_serialization(self):
all_activations = ['max_norm', 'non_neg',
'unit_norm', 'min_max_norm']
for name in all_activations:
fn = keras.constraints.get(name)
ref_fn = getattr(keras.constraints, name)()
assert fn.__class__ == ref_fn.__class__
config = keras.constraints.serialize(fn)
fn = keras.constraints.deserialize(config)
assert fn.__class__ == ref_fn.__class__
def test_max_norm(self):
array = get_example_array()
for m in get_test_values():
norm_instance = keras.constraints.max_norm(m)
normed = norm_instance(keras.backend.variable(array))
assert np.all(keras.backend.eval(normed) < m)
# a more explicit example
norm_instance = keras.constraints.max_norm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array(
[[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0],
[2. / np.sqrt(3), 2. / np.sqrt(3), 2. / np.sqrt(3)]]).T
x_normed_actual = keras.backend.eval(
norm_instance(keras.backend.variable(x)))
self.assertAllClose(x_normed_actual, x_normed_target, rtol=1e-05)
def test_non_neg(self):
non_neg_instance = keras.constraints.non_neg()
normed = non_neg_instance(keras.backend.variable(get_example_array()))
assert np.all(np.min(keras.backend.eval(normed), axis=1) == 0.)
def test_unit_norm(self):
unit_norm_instance = keras.constraints.unit_norm()
normalized = unit_norm_instance(keras.backend.variable(get_example_array()))
norm_of_normalized = np.sqrt(
np.sum(keras.backend.eval(normalized)**2, axis=0))
# In the unit norm constraint, it should be equal to 1.
difference = norm_of_normalized - 1.
largest_difference = np.max(np.abs(difference))
assert np.abs(largest_difference) < 10e-5
def test_min_max_norm(self):
array = get_example_array()
for m in get_test_values():
norm_instance = keras.constraints.min_max_norm(
min_value=m, max_value=m * 2)
normed = norm_instance(keras.backend.variable(array))
value = keras.backend.eval(normed)
l2 = np.sqrt(np.sum(np.square(value), axis=0))
assert not l2[l2 < m]
assert not l2[l2 > m * 2 + 1e-5]
def test_conv2d_radial_constraint(self):
for width in (3, 4, 5, 6):
array = get_example_kernel(width)
norm_instance = keras.constraints.radial_constraint()
normed = norm_instance(keras.backend.variable(array))
value = keras.backend.eval(normed)
assert np.all(value.shape == array.shape)
assert np.all(value[0:, 0, 0, 0] == value[-1:, 0, 0, 0])
assert len(set(value[..., 0, 0].flatten())) == math.ceil(float(width) / 2)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/constraints_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Code for model cloning, plus model-related API entries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import saving
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine.base_layer import AddMetric
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util.tf_export import keras_export
# API entries importable from `keras.models`:
Model = training.Model # pylint: disable=invalid-name
Sequential = sequential.Sequential # pylint: disable=invalid-name
save_model = saving.save_model
load_model = saving.load_model
model_from_config = saving.model_from_config
model_from_yaml = saving.model_from_yaml
model_from_json = saving.model_from_json
# Callable used to clone a layer with weights preserved.
def share_weights(layer):
return layer
def _clone_layer(layer):
return layer.__class__.from_config(layer.get_config())
def _insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes):
"""Inserts ancillary layers into the model with the proper order."""
# Sort `AddMetric` layers so they agree with metrics_names.
metric_layers = [
layer for layer in ancillary_layers if isinstance(layer, AddMetric)
]
metric_layers.sort(key=lambda layer: metrics_names.index(layer.metric_name))
ancillary_layers = [
layer for layer in ancillary_layers if not isinstance(layer, AddMetric)
] + metric_layers
model._insert_layers(ancillary_layers, relevant_nodes=list(new_nodes))
def _make_new_nodes(nodes_by_depth, layer_fn, layer_map, tensor_map):
"""Uses the layers in `layer_map` to make new nodes based on `nodes_by_depth`.
Args:
nodes_by_depth: Provides structure information to create new nodes.
layer_fn: Function to clone layers.
layer_map: Map from layers in `model` to new layers.
tensor_map: Map from tensors in `model` to newly compute tensors.
Returns:
A set of new nodes. `layer_map` and `tensor_map` are updated.
"""
# Iterated over every node in the reference model, in depth order.
new_nodes = set()
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
new_layer = layer_fn(layer)
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
if all(
tensor in tensor_map for tensor in nest.flatten(node.input_tensors)):
computed_tensors = nest.map_structure(lambda t: tensor_map[t],
node.input_tensors)
# Call layer.
kwargs = node.arguments or {}
output_tensors = layer(computed_tensors, **kwargs)
# Thread-safe way to keep track of what node was created.
first_output_tensor = nest.flatten(output_tensors)[0]
new_nodes.add(
layer._inbound_nodes[first_output_tensor._keras_history.node_index])
for x, y in zip(
nest.flatten(node.output_tensors), nest.flatten(output_tensors)):
tensor_map[x] = y
return new_nodes
def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Input layers are always cloned.
Arguments:
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
layer_fn: callable to be applied on non-input layers in the model. By
default it clones the layer. Another example is to preserve the layer
to share the weights. This is required when we create a per-replica
copy of the model with distribution strategy; we want the weights to
be shared but still feed inputs separately so we create new input
layers.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value or `layer_fn`
argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
if not model._is_graph_network:
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'but got a subclass model instead.')
layer_map = {} # Cache for created layers.
tensor_map = object_identity.ObjectIdentityDictionary(
) # Map {reference_tensor: corresponding_tensor}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(**layer.get_config())
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history.layer
layer_map[layer] = newly_created_input_layer
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = nest.flatten(input_tensors)
input_tensors_ = []
for i, input_tensor in enumerate(input_tensors):
if not K.is_keras_tensor(input_tensor):
original_input_layer = model._input_layers[i]
name = original_input_layer.name
input_tensor = Input(tensor=input_tensor,
name='input_wrapper_for_' + name)
input_tensors_.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history.layer
layer_map[original_input_layer] = newly_created_input_layer
else:
input_tensors_.append(input_tensor)
input_tensors = input_tensors_
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = y
if not callable(layer_fn):
raise ValueError('Expected `layer_fn` argument to be a callable.')
# Has the side effect of filling out `layer_map` and `tensor_map`.
new_nodes = _make_new_nodes(model._nodes_by_depth, layer_fn, layer_map,
tensor_map)
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
output_tensors.append(tensor_map[x])
input_tensors = nest.pack_sequence_as(model._nested_inputs, input_tensors)
output_tensors = nest.pack_sequence_as(model._nested_outputs, output_tensors)
metrics_names = model.metrics_names
model = Model(input_tensors, output_tensors, name=model.name)
# Layers not directly tied to outputs of the Model, such as loss layers
# created in `add_loss` and `add_metric`.
ancillary_layers = [
layer for layer in layer_map.values() if layer not in model.layers
]
if ancillary_layers:
_insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes)
return model
def _remove_ancillary_layers(model, layer_map, layers):
"""Removes and returns any ancillary layers from `layers` based on `model`.
Ancillary layers are part of the model topology but not used to compute the
model outputs, e.g., layers from `add_loss` and `add_metric`.
Args:
model: A Keras Model.
layer_map: A map to from layers in the `model` to those in `layers`.
layers: A list of all layers.
Returns:
Two lists of layers: (1) `layers` with the ancillary layers removed, and (2)
the ancillary layers.
"""
ancillary_layers = [] # Additional layers for computing losses and metrics.
if not model._is_graph_network:
return layers, ancillary_layers
# Ancillary layers are those with depth < 0.
depths = [depth for depth in model._nodes_by_depth.keys() if depth < 0]
depths.sort(reverse=True) # Order topologically from inputs to outputs.
for depth in depths:
for node in model._nodes_by_depth[depth]:
ancillary_layers.append(layer_map[node.outbound_layer])
return [l for l in layers if l not in ancillary_layers], ancillary_layers
def _clone_sequential_model(model, input_tensors=None, layer_fn=_clone_layer):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
layer_fn: callable to be applied on non-input layers in the model. By
default it clones the layer. Another example is to preserve the layer
to share the weights. This is required when we create a per-replica
copy of the model with distribution strategy; we want the weights to
be shared but still feed inputs separately so we create new input
layers.
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value or `layer_fn`
argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
if not callable(layer_fn):
raise ValueError('Expected `layer_fn` argument to be a callable.')
layers = [] # Layers needed to compute the model's outputs.
layer_map = {}
# Use model._layers to ensure that all layers are cloned. The model's layers
# property will exclude the initial InputLayer (if it exists) in the model,
# resulting in a different Sequential model structure.
for layer in model._layers:
if isinstance(layer, InputLayer) and input_tensors is not None:
# If input tensors are provided, the original model's InputLayer is
# overwritten with a different InputLayer.
continue
cloned_layer = (
_clone_layer(layer)
if isinstance(layer, InputLayer) else layer_fn(layer))
layers.append(cloned_layer)
layer_map[layer] = cloned_layer
layers, ancillary_layers = _remove_ancillary_layers(model, layer_map, layers)
if input_tensors is None:
cloned_model = Sequential(layers=layers, name=model.name)
elif len(generic_utils.to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
else:
# Overwrite the original model's input layer.
if isinstance(input_tensors, tuple):
input_tensors = list(input_tensors)
x = generic_utils.to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history.layer
if isinstance(origin_layer, InputLayer):
cloned_model = Sequential(
layers=[origin_layer] + layers, name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
else:
input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history.layer
cloned_model = Sequential(layers=[input_layer] + layers, name=model.name)
if not ancillary_layers:
return cloned_model
tensor_map = {} # Maps tensors from `model` to those in `cloned_model`.
for depth, cloned_nodes in cloned_model._nodes_by_depth.items():
nodes = model._nodes_by_depth[depth]
# This should be safe in a Sequential model. In an arbitrary network, you
# need to sort using the outbound layer of the node as a key.
for cloned_node, node in zip(cloned_nodes, nodes):
if isinstance(cloned_node.output_tensors, list):
for j, output_tensor in enumerate(cloned_node.output_tensors):
tensor_map[node.output_tensors[j]] = output_tensor
else:
tensor_map[node.output_tensors] = cloned_node.output_tensors
# Ancillary nodes have negative depth.
new_nodes = _make_new_nodes(
{
depth: nodes
for depth, nodes in model._nodes_by_depth.items()
if depth < 0
}, layer_fn, layer_map, tensor_map)
_insert_ancillary_layers(cloned_model, ancillary_layers, model.metrics_names,
new_nodes)
return cloned_model
@keras_export('keras.models.clone_model')
def clone_model(model, input_tensors=None, clone_function=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors or InputLayer objects
to build the model upon. If not provided,
placeholders will be created.
clone_function: Callable to be used to clone each layer in the target
model (except `InputLayer` instances). It takes as argument the layer
instance to be cloned, and returns the corresponding layer instance to
be used in the model copy. If unspecified, this callable defaults to
the following serialization/deserialization function:
`lambda layer: layer.__class__.from_config(layer.get_config())`.
By passing a custom callable, you can customize your copy of the
model, e.g. by wrapping certain layers of interest (you might want to
replace all `LSTM` instances with equivalent
`Bidirectional(LSTM(...))` instances, for example).
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights. The cloned model might behave
differently from the original model if a custom clone_function
modifies the layer.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if clone_function is None:
clone_function = _clone_layer
if isinstance(model, Sequential):
return _clone_sequential_model(
model, input_tensors=input_tensors, layer_fn=clone_function)
else:
return _clone_functional_model(
model, input_tensors=input_tensors, layer_fn=clone_function)
# "Clone" a subclassed model by reseting all of the attributes.
def _in_place_subclassed_model_reset(model):
"""Substitute for model cloning that works for subclassed models.
Subclassed models cannot be cloned because their topology is not serializable.
To "instantiate" an identical model in a new TF graph, we reuse the original
model object, but we clear its state.
After calling this function on a model instance, you can use the model
instance as if it were a model clone (in particular you can use it in a new
graph).
This method clears the state of the input model. It is thus destructive.
However the original state can be restored fully by calling
`_in_place_subclassed_model_state_restoration`.
Args:
model: Instance of a Keras model created via subclassing.
Raises:
ValueError: In case the model uses a subclassed model as inner layer.
"""
assert not model._is_graph_network # Only makes sense for subclassed networks
# Retrieve all layers tracked by the model as well as their attribute names
attributes_cache = {}
for name in dir(model):
# Skip the check of methods in tf.Module since they basically
# recursively query all the other attributes within same module.
if name == 'submodules':
continue
try:
value = getattr(model, name)
except (AttributeError, ValueError, TypeError):
continue
if isinstance(value, Layer):
attributes_cache[name] = value
assert value in model.layers
if hasattr(value, 'layers') and value.layers:
raise ValueError('We do not support the use of nested layers '
'in `model_to_estimator` at this time. Found nested '
'layer: %s' % value)
elif isinstance(
value, (list, tuple)) and name not in ('layers', '_layers', 'metrics',
'_compile_metric_functions',
'_output_loss_metrics'):
# Handle case: list/tuple of layers (also tracked by the Network API).
if value and all(isinstance(val, Layer) for val in value):
raise ValueError('We do not support the use of list-of-layers '
'attributes in subclassed models used with '
'`model_to_estimator` at this time. Found list '
'model: %s' % name)
# Replace layers on the model with fresh layers
layers_to_names = {value: key for key, value in attributes_cache.items()}
original_layers = model._layers[:]
setattr_tracking = model._setattr_tracking
model._setattr_tracking = False
model._layers = []
for layer in original_layers: # We preserve layer order.
config = layer.get_config()
# This will not work for nested subclassed models used as layers.
# This would be theoretically possible to support, but would add complexity.
# Only do it if users complain.
if isinstance(layer, Network) and not layer._is_graph_network:
raise ValueError('We do not support the use of nested subclassed models '
'in `model_to_estimator` at this time. Found nested '
'model: %s' % layer)
fresh_layer = layer.__class__.from_config(config)
name = layers_to_names[layer]
setattr(model, name, fresh_layer)
model._layers.append(fresh_layer)
# Cache original model build attributes (in addition to layers)
if (not hasattr(model, '_original_attributes_cache') or
model._original_attributes_cache is None):
if model.built:
attributes_to_cache = [
'inputs',
'outputs',
'total_loss',
'optimizer',
'train_function',
'test_function',
'predict_function',
'_training_endpoints',
'_collected_trainable_weights',
'_feed_inputs',
'_feed_input_names',
'_feed_input_shapes',
]
for name in attributes_to_cache:
attributes_cache[name] = getattr(model, name)
model._original_attributes_cache = attributes_cache
_reset_build_compile_trackers(model)
model._setattr_tracking = setattr_tracking
def _reset_build_compile_trackers(model):
"""Reset state trackers for model.
Note that we do not actually zero out attributes such as optimizer,
but instead rely on the expectation that all of the attrs will be
over-written on calling build/compile/etc. This is somewhat fragile,
insofar as we check elsewhere for the presence of these attributes as
evidence of having been built/compiled/etc. Pending a better way to do this,
we reset key attributes here to allow building and compiling.
Args:
model: the model that is being reset
"""
# Reset build state
model.built = False
model.inputs = None
model.outputs = None
# Reset compile state
model._is_compiled = False # pylint:disable=protected-access
model.optimizer = None
def in_place_subclassed_model_state_restoration(model):
"""Restores the original state of a model after it was "reset".
This undoes this action of `_in_place_subclassed_model_reset`, which is called
in `clone_and_build_model` if `in_place_reset` is set to True.
Args:
model: Instance of a Keras model created via subclassing, on which
`_in_place_subclassed_model_reset` was previously called.
"""
assert not model._is_graph_network
# Restore layers and build attributes
if (hasattr(model, '_original_attributes_cache') and
model._original_attributes_cache is not None):
# Models have sticky attribute assignment, so we want to be careful to add
# back the previous attributes and track Layers by their original names
# without adding dependencies on "utility" attributes which Models exempt
# when they're constructed.
setattr_tracking = model._setattr_tracking
model._setattr_tracking = False
model._layers = []
for name, value in model._original_attributes_cache.items():
setattr(model, name, value)
if isinstance(value, Layer):
model._layers.append(value)
model._original_attributes_cache = None
model._setattr_tracking = setattr_tracking
else:
# Restore to the state of a never-called model.
_reset_build_compile_trackers(model)
def clone_and_build_model(
model, input_tensors=None, target_tensors=None, custom_objects=None,
compile_clone=True, in_place_reset=False, optimizer_iterations=None,
optimizer_config=None):
"""Clone a `Model` and build/compile it with the same settings used before.
This function can be be run in the same graph or in a separate graph from the
model. When using a separate graph, `in_place_reset` must be `False`.
Note that, currently, the clone produced from this function may not work with
TPU DistributionStrategy. Try at your own risk.
Args:
model: `tf.keras.Model` object. Can be Functional, Sequential, or
sub-classed.
input_tensors: Optional list or dictionary of input tensors to build the
model upon. If not provided, placeholders will be created.
target_tensors: Optional list of target tensors for compiling the model. If
not provided, placeholders will be created.
custom_objects: Optional dictionary mapping string names to custom classes
or functions.
compile_clone: Boolean, whether to compile model clone (default `True`).
in_place_reset: Boolean, whether to reset the model in place. Only used if
the model is a subclassed model. In the case of a subclassed model,
this argument must be set to `True` (default `False`). To restore the
original model, use the function
`in_place_subclassed_model_state_restoration(model)`.
optimizer_iterations: An iterations variable that will be incremented by the
optimizer if the clone is compiled. This argument is used when a Keras
model is cloned into an Estimator model function, because Estimators
create their own global step variable.
optimizer_config: Optimizer config dictionary or list of dictionary
returned from `get_config()`. This argument should be defined if
`clone_and_build_model` is called in a different graph or session from
the original model, and the optimizer is an instance of `OptimizerV2`.
Returns:
Clone of the model.
Raises:
ValueError: Cloning fails in the following cases
- cloning a subclassed model with `in_place_reset` set to False.
- compiling the clone when the original model has not been compiled.
"""
# Grab optimizer now, as we reset-in-place for subclassed models, but
# want to maintain access to the original optimizer.
orig_optimizer = model.optimizer
if compile_clone and not orig_optimizer:
raise ValueError(
'Error when cloning model: compile_clone was set to True, but the '
'original model has not been compiled.')
if model._is_graph_network or isinstance(model, Sequential):
if custom_objects:
with CustomObjectScope(custom_objects):
clone = clone_model(model, input_tensors=input_tensors)
else:
clone = clone_model(model, input_tensors=input_tensors)
if all([isinstance(clone, Sequential),
not clone._is_graph_network,
getattr(model, '_build_input_shape', None) is not None]):
# Set model inputs to build the model and add input/output properties.
# TODO(kathywu): Add multiple placeholders to handle edge case where
# sequential model has multiple inputs.
clone._set_inputs(
K.placeholder(model._build_input_shape, dtype=model.inputs[0].dtype))
else:
try:
# Prefer clonining the model if serial/deserial logic is implemented for
# subclassed model.
clone = model.__class__.from_config(model.get_config())
except NotImplementedError:
logging.warning('This model is a subclassed model. Please implement '
'`get_config` and `from_config` to better support '
'cloning the model.')
if not in_place_reset:
raise ValueError(
'This model is a subclassed model. '
'Such a model cannot be cloned, but there is a workaround where '
'the model is reset in-place. To use this, please set the argument '
'`in_place_reset` to `True`. This will reset the attributes in the '
'original model. To restore the attributes, call '
'`in_place_subclassed_model_state_restoration(model)`.')
clone = model
_in_place_subclassed_model_reset(clone)
if input_tensors is not None:
if isinstance(input_tensors, (list, tuple)) and len(input_tensors) == 1:
input_tensors = input_tensors[0]
clone._set_inputs(input_tensors)
if compile_clone:
if isinstance(orig_optimizer, optimizers.TFOptimizer):
optimizer = optimizers.TFOptimizer(
orig_optimizer.optimizer, optimizer_iterations)
K.track_tf_optimizer(optimizer)
else:
if not isinstance(orig_optimizer, (tuple, list)):
orig_optimizer = [orig_optimizer]
if optimizer_config is None:
optimizer = [
opt.__class__.from_config(opt.get_config())
for opt in orig_optimizer
]
elif isinstance(optimizer_config, dict):
optimizer = [orig_optimizer[0].__class__.from_config(optimizer_config)]
else:
# optimizer config is list of dict, same order as orig_optimizer.
optimizer = [
opt.__class__.from_config(opt_config)
for (opt, opt_config) in zip(orig_optimizer, optimizer_config)
]
if optimizer_iterations is not None:
for opt in optimizer:
opt.iterations = optimizer_iterations
if len(optimizer) == 1:
optimizer = optimizer[0]
clone.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=target_tensors)
return clone
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/models.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import profiler
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.callbacks.TensorBoard'])
class TensorBoard(callbacks.Callback):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network for histograms
computation.
write_images: whether to write model weights to visualize as image in
TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding layers
will be saved. If set to 0, embeddings won't be computed. Data to be
visualized in TensorBoard's Embedding tab must be passed as
`embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If None
or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single input)
or list of Numpy arrays (if the model has multiple inputs). Learn [more
about
embeddings](https://www.tensorflow.org/programmers_guide/embedding)
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatibility(eager)
Using the `TensorBoard` callback will work when eager execution is enabled,
with the restriction that outputting histogram summaries of weights and
gradients is not supported. Consequently, `histogram_freq` will be ignored.
@end_compatibility
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch',
profile_batch=2):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and context.executing_eagerly():
logging.warning(
UserWarning('Weight and gradient histograms not supported for eager'
'execution, setting `histogram_freq` to `0`.'))
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
# TODO(fishx): Add a link to the full profiler tutorial.
self._profile_batch = profile_batch
# One profiler session is running if it is True.
self._is_profiling = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _init_writer(self, model):
"""Sets file writer."""
if context.executing_eagerly():
self.writer = summary_ops_v2.create_file_writer(self.log_dir)
if not model.run_eagerly and self.write_graph:
with self.writer.as_default():
summary_ops_v2.graph(K.get_graph(), step=0)
elif self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())
else:
self.writer = tf_summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
else:
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self._init_writer(model)
# histogram summaries only enabled in graph mode
if not context.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf_summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
# Avoid circular dependency.
from tensorflow.python.keras.engine import training_utils # pylint: disable=g-import-not-at-top
self.embeddings_data = training_utils.standardize_input_data(
self.embeddings_data, model.input_names)
# If embedding_layer_names are not provided, get all of the embedding
# layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
self.step = step = array_ops.placeholder(dtypes.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = array_ops.reshape(embedding_input,
(step, int(embedding_size)))
shape = (self.embeddings_data[0].shape[0], int(embedding_size))
embedding = variables.Variable(
array_ops.zeros(shape), name=layer.name + '_embedding')
embeddings_vars[layer.name] = embedding
batch = state_ops.assign(embedding[batch_id:batch_id + step],
embedding_input)
self.assign_embeddings.append(batch)
self.saver = saver.Saver(list(embeddings_vars.values()))
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for this
# because TensorBoard dependency assumes TensorFlow package is installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (embeddings_metadata is not None and
layer_name in embeddings_metadata):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(summary, self._total_val_batches_seen)
self._total_val_batches_seen += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Arguments:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.always_record_summaries():
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
batch_logs = {('batch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_profiling:
profiler.save(self.log_dir, profiler.stop())
self._is_profiling = False
elif (not self._is_profiling and
self._total_batches_seen == self._profile_batch - 1):
profiler.start()
self._is_profiling = True
def on_train_begin(self, logs=None):
if self._profile_batch == 1:
profiler.start()
self._is_profiling = True
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model eval_function callbacks, reset batch count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._epoch = epoch
# pylint: disable=protected-access
# add the histogram summary op if it should run this epoch
self.model._make_test_function()
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
# pylint: enable=protected-access
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
# don't output batch_size and
# batch number as TensorBoard summaries
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
if self.update_freq == 'epoch':
step = epoch
else:
step = self._samples_seen
self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
# pylint: disable=protected-access
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
# pylint: enable=protected-access
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError('To visualize embeddings, embeddings_data must '
'be provided.')
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
sess = K.get_session()
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {self.model.input: embeddings_data[0][batch]}
feed_dict.update({self.batch_id: i, self.step: step})
if not isinstance(K.learning_phase(), int):
feed_dict[K.learning_phase()] = False
sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(sess,
os.path.join(self.log_dir, 'keras_embedding.ckpt'),
epoch)
i += self.batch_size
def on_train_end(self, logs=None):
if self._is_profiling:
profiler.save(self.log_dir, profiler.stop())
self._is_profiling = False
self.writer.close()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/callbacks_v1.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import callbacks_v1
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import adam
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class TestTensorBoardV1(test.TestCase):
@test_util.run_deprecated_v1
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks_v1.TensorBoard(
log_dir=temp_dir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [
callbacks_v1.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True,
write_grads=True,
batch_size=5)
]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj, _):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
callbacks_v1.TensorBoard._init_writer = _init_writer
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
callbacks_v1.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@test_util.run_deprecated_v1
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0})
tb_cbk.on_train_end()
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {'acc': 10.0})
tb_cbk.on_train_end()
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [callbacks_v1.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
@test_util.run_deprecated_v1
def test_TensorBoard_update_freq(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif 'epoch_' in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
tb_cbk.on_train_end()
# Batch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
# Integer mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/callbacks_v1_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
@test_util.disable_cudnn_autotune
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, expected_output_shape=None,
validate_training=True, adapt_data=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Numpy array of the expected output.
expected_output_dtype: Data type expected for the output.
expected_output_shape: Shape tuple for the expected shape of the output.
validate_training: Whether to attempt to validate training on this layer.
This might be set to False for non-differentiable layers that output
string or integer values.
adapt_data: Optional data for an 'adapt' call. If None, adapt() will not
be tested for this layer. This is only relevant for PreprocessingLayers.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
keras.backend.dtype(y),
expected_output_dtype,
kwargs))
def assert_shapes_equal(expected, actual):
"""Asserts that the output shape from the layer matches the actual shape."""
if len(expected) != len(actual):
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
for expected_dim, actual_dim in zip(expected, actual):
if isinstance(expected_dim, tensor_shape.Dimension):
expected_dim = expected_dim.value
if isinstance(actual_dim, tensor_shape.Dimension):
actual_dim = actual_dim.value
if expected_dim is not None and expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
if expected_output_shape is not None:
assert_shapes_equal(tensor_shape.TensorShape(expected_output_shape),
y.shape)
# check shape inference
model = keras.models.Model(x, y)
computed_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
computed_output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
assert_shapes_equal(computed_output_shape, actual_output_shape)
assert_shapes_equal(computed_output_signature.shape, actual_output_shape)
if computed_output_signature.dtype != actual_output.dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output_dtype='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual_output.dtype,
computed_output_signature.dtype, kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# See b/120160788 for more details. This should be mitigated after 2.0.
if validate_training:
model = keras.models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
model = keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(computed_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
computed_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
_thread_local_data.experimental_run_tf_function = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
@tf_contextlib.contextmanager
def experimental_run_tf_function_scope(value):
"""Provides a scope within which we compile models to run with distribution.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models with default distribution
in the active test. Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.experimental_run_tf_function
try:
_thread_local_data.experimental_run_tf_function = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.experimental_run_tf_function = previous_value
def should_run_tf_function():
"""Returns whether the models we are testing should be run distributed."""
if _thread_local_data.experimental_run_tf_function is None:
raise ValueError(
'Cannot call `should_run_tf_function()` outside of a '
'`experimental_run_tf_function_scope()` or `run_all_keras_modes` '
'decorator.')
return (_thread_local_data.experimental_run_tf_function and
context.executing_eagerly())
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = keras.models.Sequential()
if input_dim:
model.add(keras.layers.Dense(num_hidden, activation='relu',
input_dim=input_dim))
else:
model.add(keras.layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(keras.layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = keras.Input(shape=(input_dim,))
outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
return keras.Model(inputs, outputs)
class _SmallSubclassMLP(keras.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLP, self).__init__()
self.layer_a = keras.layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(keras.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = keras.layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return _SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(keras.Model):
"""A Keras subclass model."""
def __init__(self, layers, *args, **kwargs):
"""Instantiate a model.
Args:
layers: a list of layers to be added to the model.
*args: Model's args
**kwargs: Model's keyword args, at most one of
input_tensor -> the input tensor required for ragged/sparse input.
"""
inputs = kwargs.pop('input_tensor', None)
super(_SubclassModel, self).__init__(*args, **kwargs)
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(layers)
if inputs is not None:
self._set_inputs(inputs)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(keras.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func, *args, **kwargs):
super(_SubclassModelCustomBuild, self).__init__(*args, **kwargs)
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
layers = []
for layer in self._layer_generating_func():
layers.append(layer)
self.all_layers = layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(layers,
input_shape=None,
input_dtype=None,
name=None,
input_ragged=None,
input_sparse=None):
"""Builds a model from a sequence of layers.
Args:
layers: The layers used to build the network.
input_shape: Shape tuple of the input or 'TensorShape' instance.
input_dtype: Datatype of the input.
name: Name for the model.
input_ragged: Boolean, whether the input data is a ragged tensor.
input_sparse: Boolean, whether the input data is a sparse tensor.
Returns:
A Keras model.
"""
model_type = get_model_type()
if model_type == 'subclass':
inputs = None
if input_ragged or input_sparse:
inputs = keras.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
return _SubclassModel(layers, name=name, input_tensor=inputs)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: layers
return _SubclassModelCustomBuild(layer_generating_func, name=name)
if model_type == 'sequential':
model = keras.models.Sequential(name=name)
if input_shape:
model.add(
keras.layers.InputLayer(
input_shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse))
for layer in layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = keras.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
outputs = inputs
for layer in layers:
outputs = layer(outputs)
return keras.Model(inputs, outputs, name=name)
raise ValueError('Unknown model type {}'.format(model_type))
class _MultiIOSubclassModel(keras.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None):
super(_MultiIOSubclassModel, self).__init__()
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(keras.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
def enable_v2_dtype_behavior(fn):
"""Decorator for enabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, True)
def disable_v2_dtype_behavior(fn):
"""Decorator for disabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, False)
def _set_v2_dtype_behavior(fn, enabled):
"""Returns version of 'fn' that runs with v2 dtype behavior on or off."""
def wrapper(*args, **kwargs):
v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR
base_layer_utils.V2_DTYPE_BEHAVIOR = enabled
try:
return fn(*args, **kwargs)
finally:
base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior
return wrapper
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/testing_utils.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
# b/123041942
# In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
# layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
# internal method name is returned in serialization. This results in errors in
# model exporting and loading as Keras can't find any activation function with
# the name of `softmax_v2`.
# This dict maps the activation function name from its v2 version to its
# canonical name.
_TF_ACTIVATIONS_V2 = {
'softmax_v2': 'softmax',
}
@keras_export('keras.activations.softmax')
def softmax(x, axis=-1):
"""The softmax activation function transforms the outputs so that all values are in
range (0, 1) and sum to 1. It is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution. The softmax of x is calculated by
exp(x)/tf.reduce_sum(exp(x)).
Arguments:
x : Input tensor.
axis: Integer, axis along which the softmax normalization is applied.
Returns:
Tensor, output of softmax transformation (all values are non-negative
and sum to 1).
Raises:
ValueError: In case `dim(x) == 1`.
"""
ndim = K.ndim(x)
if ndim == 2:
return nn.softmax(x)
elif ndim > 2:
e = math_ops.exp(x - math_ops.reduce_max(x, axis=axis, keepdims=True))
s = math_ops.reduce_sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D. '
'Received input: %s' % (x,))
@keras_export('keras.activations.elu')
def elu(x, alpha=1.0):
"""Exponential linear unit.
Arguments:
x: Input tensor.
alpha: A scalar, slope of negative section.
Returns:
The exponential linear activation: `x` if `x > 0` and
`alpha * (exp(x)-1)` if `x < 0`.
Reference:
- [Fast and Accurate Deep Network Learning by Exponential
Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)
"""
return K.elu(x, alpha)
@keras_export('keras.activations.selu')
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is:
`scale * x` if `x > 0` and `scale * alpha * (exp(x) - 1)` if `x < 0`
where `alpha` and `scale` are pre-defined constants
(`alpha = 1.67326324`
and `scale = 1.05070098`).
The SELU activation function multiplies `scale` > 1 with the
`[elu](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/activations/elu)`
(Exponential Linear Unit (ELU)) to ensure a slope larger than one
for positive net inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see [`lecun_normal` initialization]
(https://www.tensorflow.org/api_docs/python/tf/keras/initializers/lecun_normal))
and the number of inputs is "large enough"
(see references for more information).

(Courtesy: Blog on Towards DataScience at
https://towardsdatascience.com/selu-make-fnns-great-again-snn-8d61526802a9)
Example Usage:
```python3
n_classes = 10 #10-class problem
model = models.Sequential()
model.add(Dense(64, kernel_initializer='lecun_normal', activation='selu',
input_shape=(28, 28, 1))))
model.add(Dense(32, kernel_initializer='lecun_normal', activation='selu'))
model.add(Dense(16, kernel_initializer='lecun_normal', activation='selu'))
model.add(Dense(n_classes, activation='softmax'))
```
Arguments:
x: A tensor or variable to compute the activation function for.
Returns:
The scaled exponential unit activation: `scale * elu(x, alpha)`.
# Note
- To be used together with the initialization "[lecun_normal]
(https://www.tensorflow.org/api_docs/python/tf/keras/initializers/lecun_normal)".
- To be used together with the dropout variant "[AlphaDropout]
(https://www.tensorflow.org/api_docs/python/tf/keras/layers/AlphaDropout)".
References:
[Self-Normalizing Neural Networks (Klambauer et al, 2017)]
(https://arxiv.org/abs/1706.02515)
"""
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * K.elu(x, alpha)
@keras_export('keras.activations.softplus')
def softplus(x):
"""Softplus activation function.
Arguments:
x: Input tensor.
Returns:
The softplus activation: `log(exp(x) + 1)`.
"""
return nn.softplus(x)
@keras_export('keras.activations.softsign')
def softsign(x):
"""Softsign activation function.
Arguments:
x: Input tensor.
Returns:
The softplus activation: `x / (abs(x) + 1)`.
"""
return nn.softsign(x)
@keras_export('keras.activations.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified Linear Unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
return K.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
@keras_export('keras.activations.tanh')
def tanh(x):
"""Hyperbolic Tangent (tanh) activation function.
For example:
```python
# Constant 1-D tensor populated with value list.
a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
b = tf.keras.activations.tanh(a) #[-0.9950547,-0.7615942,
0.,0.7615942,0.9950547]
```
Arguments:
x: Input tensor.
Returns:
A tensor of same shape and dtype of input `x`.
The tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) -
exp(-x))/(exp(x) + exp(-x)))`.
"""
return nn.tanh(x)
@keras_export('keras.activations.sigmoid')
def sigmoid(x):
"""Sigmoid.
Applies the sigmoid activation function. The sigmoid function is defined as
1 divided by (1 + exp(-x)). It's curve is like an "S" and is like a smoothed
version of the Heaviside (Unit Step Function) function. For small values
(<-5) the sigmoid returns a value close to zero and for larger values (>5)
the result of the function gets close to 1.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
Sigmoid activation function.
Arguments:
x: Input tensor.
Returns:
The sigmoid activation: `(1.0 / (1.0 + exp(-x)))`.
"""
return nn.sigmoid(x)
@keras_export('keras.activations.exponential')
def exponential(x):
"""Exponential activation function.
Arguments:
x: Input tensor.
Returns:
The exponential activation: `exp(x)`.
"""
return math_ops.exp(x)
@keras_export('keras.activations.hard_sigmoid')
def hard_sigmoid(x):
"""Hard sigmoid activation function.
Faster to compute than sigmoid activation.
Arguments:
x: Input tensor.
Returns:
Hard sigmoid activation:
- `0` if `x < -2.5`
- `1` if `x > 2.5`
- `0.2 * x + 0.5` if `-2.5 <= x <= 2.5`.
"""
return K.hard_sigmoid(x)
@keras_export('keras.activations.linear')
def linear(x):
"""Linear activation function.
Arguments:
x: Input tensor.
Returns:
The linear activation: `x`.
"""
return x
@keras_export('keras.activations.serialize')
def serialize(activation):
if (hasattr(activation, '__name__') and
activation.__name__ in _TF_ACTIVATIONS_V2):
return _TF_ACTIVATIONS_V2[activation.__name__]
return serialize_keras_object(activation)
@keras_export('keras.activations.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='activation function')
@keras_export('keras.activations.get')
def get(identifier):
if identifier is None:
return linear
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
return identifier
elif isinstance(identifier, dict):
return deserialize_keras_object(
identifier, printable_module_name='activation')
else:
raise TypeError(
'Could not interpret activation function identifier: {}'.format(
repr(identifier)))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/activations.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import weakref
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.adam import AdamOptimizer
def _get_model(input_dim, num_hidden, output_dim):
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(keras.layers.Dense(output_dim, activation='softmax'))
return model
@keras_parameterized.run_all_keras_modes
class KerasOptimizersTest(keras_parameterized.TestCase):
# After experimental_run_tf_function is turned on, optimizer v1 can no longer
# work in eager mode, skipping the test if so.
def _test_optimizer(self, optimizer, target=0.75):
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=1000, test_samples=200, input_shape=(10,), num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = _get_model(x_train.shape[1], 20, y_train.shape[1])
model.compile(
loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations), 0)
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations),
126) # 63 steps per epoch
self.assertGreaterEqual(history.history['acc'][-1], target)
config = keras.optimizers.serialize(optimizer)
optim = keras.optimizers.deserialize(config)
new_config = keras.optimizers.serialize(optim)
new_config['class_name'] = new_config['class_name'].lower()
new_config['config'].pop('name', None)
if 'amsgrad' not in config['config']:
new_config['config'].pop('amsgrad', None)
if 'decay' in new_config['config'] and 'schedule_decay' in config['config']:
new_config['config']['schedule_decay'] = new_config['config'].pop('decay')
if 'momentum' not in config['config']:
new_config['config'].pop('momentum', None)
if 'centered' not in config['config']:
new_config['config'].pop('centered', None)
self.assertDictEqual(config, new_config)
# Test constraints.
model = keras.models.Sequential()
dense = keras.layers.Dense(
10,
input_shape=(x_train.shape[1],),
kernel_constraint=lambda x: 0. * x + 1.,
bias_constraint=lambda x: 0. * x + 2.,
activation='relu')
model.add(dense)
model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations),
126) # Using same optimizer from before
model.train_on_batch(x_train[:10], y_train[:10])
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations), 127)
kernel, bias = dense.get_weights()
np.testing.assert_allclose(kernel, 1., atol=1e-3)
np.testing.assert_allclose(bias, 2., atol=1e-3)
def test_sgd(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.SGD())
def test_momentum(self):
with self.cached_session():
self._test_optimizer(
keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True))
def test_rmsprop(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.RMSprop())
self._test_optimizer(keras.optimizers.RMSprop(decay=1e-3))
def test_adagrad(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Adagrad())
self._test_optimizer(keras.optimizers.Adagrad(decay=1e-3))
def test_adadelta(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Adadelta(), target=0.6)
# Accuracy seems dependent on the initialization. Even adding
# tf.compat.v1.Print nodes in the graph seemed to affect the
# initialization seed, and hence the accuracy.
self._test_optimizer(keras.optimizers.Adadelta(decay=1e-3), target=0.4)
def test_adam(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Adam())
# Accuracy seems dependent on the seed initialization.
# TODO(b/121051441): fix test flakiness.
self._test_optimizer(keras.optimizers.Adam(decay=1e-3), target=0.73)
self._test_optimizer(keras.optimizers.Adam(amsgrad=True))
def test_adamax(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Adamax())
self._test_optimizer(keras.optimizers.Adamax(decay=1e-3))
def test_nadam(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Nadam())
def test_clipnorm(self):
with self.cached_session():
self._test_optimizer(
keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=0.5))
def test_clipvalue(self):
with self.cached_session():
self._test_optimizer(
keras.optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=0.5))
def test_tf_optimizer(self):
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
model = keras.models.Sequential()
model.add(keras.layers.Dense(
2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1)))
# This is possible
model.compile(
loss='mean_squared_error',
optimizer=optimizer,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
keras.backend.track_tf_optimizer(optimizer)
model.fit(np.random.random((5, 3)),
np.random.random((5, 2)),
epochs=1,
batch_size=5,
verbose=0)
# not supported
with self.assertRaises(NotImplementedError):
_ = optimizer.weights
with self.assertRaises(NotImplementedError):
optimizer.get_config()
with self.assertRaises(NotImplementedError):
optimizer.from_config(None)
def test_optimizer_garbage_collection(self):
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
graph = ops.Graph()
with graph.as_default():
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
keras.backend.track_tf_optimizer(optimizer)
optimizer_weak = weakref.ref(optimizer)
graph_weak = weakref.ref(graph)
del graph, optimizer
gc.collect()
# Check that the weak references are dead now.
self.assertIs(graph_weak(), None)
self.assertIs(optimizer_weak(), None)
def test_tf_optimizer_iterations(self):
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
with self.cached_session():
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
model = keras.models.Sequential()
model.add(keras.layers.Dense(
2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1)))
model.compile(
loss='mean_squared_error',
optimizer=optimizer,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
keras.backend.track_tf_optimizer(optimizer)
self.assertEqual(keras.backend.get_value(model.optimizer.iterations), 0)
model.fit(np.random.random((55, 3)),
np.random.random((55, 2)),
epochs=1,
batch_size=5,
verbose=0)
self.assertEqual(keras.backend.get_value(model.optimizer.iterations), 11)
def test_negative_clipvalue_or_clipnorm(self):
with self.assertRaises(ValueError):
_ = keras.optimizers.SGD(lr=0.01, clipvalue=-0.5)
with self.assertRaises(ValueError):
_ = keras.optimizers.Adam(clipnorm=-2.0)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/optimizers_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2 if not is_sequence else None,
steps_per_epoch=5 if is_sequence else None,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'.*1/2\n'
r'.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*\n'
r'.*2/2\n'
r'.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=30,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Check for V2 scalar summaries, which have a different PB
# structure.
if event.summary.value[
0].metadata.plugin_data.plugin_name == 'scalars':
container = result.scalars
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def test_custom_summary(self):
if not testing_utils.should_run_tf_function():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = summary_pb2.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with summary_ops_v2.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return summary_ops_v2.write(
tag=tag,
tensor=math_ops.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', math_ops.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class KerasInitializersTest(test.TestCase):
def _runner(self, init, shape, target_mean=None, target_std=None,
target_max=None, target_min=None):
variable = keras.backend.variable(init(shape))
output = keras.backend.get_value(variable)
# Test serialization (assumes deterministic behavior).
config = init.get_config()
reconstructed_init = init.__class__.from_config(config)
variable = keras.backend.variable(reconstructed_init(shape))
output_2 = keras.backend.get_value(variable)
self.assertAllClose(output, output_2, atol=1e-4)
def test_uniform(self):
tensor_shape = (9, 6, 7)
with self.cached_session():
self._runner(
keras.initializers.RandomUniformV2(minval=-1, maxval=1, seed=124),
tensor_shape,
target_mean=0.,
target_max=1,
target_min=-1)
def test_normal(self):
tensor_shape = (8, 12, 99)
with self.cached_session():
self._runner(
keras.initializers.RandomNormalV2(mean=0, stddev=1, seed=153),
tensor_shape,
target_mean=0.,
target_std=1)
def test_truncated_normal(self):
tensor_shape = (12, 99, 7)
with self.cached_session():
self._runner(
keras.initializers.TruncatedNormalV2(mean=0, stddev=1, seed=126),
tensor_shape,
target_mean=0.,
target_max=2,
target_min=-2)
def test_constant(self):
tensor_shape = (5, 6, 4)
with self.cached_session():
self._runner(
keras.initializers.ConstantV2(2.),
tensor_shape,
target_mean=2,
target_max=2,
target_min=2)
def test_lecun_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(1. / fan_in)
self._runner(
keras.initializers.lecun_uniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_glorot_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._runner(
keras.initializers.GlorotUniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_he_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / fan_in)
self._runner(
keras.initializers.he_uniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_lecun_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(1. / fan_in)
self._runner(
keras.initializers.lecun_normalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_glorot_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._runner(
keras.initializers.GlorotNormalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_he_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / fan_in)
self._runner(
keras.initializers.he_normalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_orthogonal(self):
tensor_shape = (20, 20)
with self.cached_session():
self._runner(
keras.initializers.OrthogonalV2(seed=123),
tensor_shape,
target_mean=0.)
def test_identity(self):
with self.cached_session():
tensor_shape = (3, 4, 5)
with self.assertRaises(ValueError):
self._runner(
keras.initializers.IdentityV2(),
tensor_shape,
target_mean=1. / tensor_shape[0],
target_max=1.)
tensor_shape = (3, 3)
self._runner(
keras.initializers.IdentityV2(),
tensor_shape,
target_mean=1. / tensor_shape[0],
target_max=1.)
def test_zero(self):
tensor_shape = (4, 5)
with self.cached_session():
self._runner(
keras.initializers.ZerosV2(),
tensor_shape,
target_mean=0.,
target_max=0.)
def test_one(self):
tensor_shape = (4, 5)
with self.cached_session():
self._runner(
keras.initializers.OnesV2(),
tensor_shape,
target_mean=1.,
target_max=1.)
def test_default_random_uniform(self):
ru = keras.initializers.get('uniform')
self.assertEqual(ru.minval, -0.05)
self.assertEqual(ru.maxval, 0.05)
def test_default_random_normal(self):
rn = keras.initializers.get('normal')
self.assertEqual(rn.mean, 0.0)
self.assertEqual(rn.stddev, 0.05)
def test_default_truncated_normal(self):
tn = keras.initializers.get('truncated_normal')
self.assertEqual(tn.mean, 0.0)
self.assertEqual(tn.stddev, 0.05)
def test_initializer_v2_get(self):
tf2_force_enabled = tf2._force_enable # pylint: disable=protected-access
try:
tf2.enable()
rn = keras.initializers.get('random_normal')
self.assertIn('init_ops_v2', rn.__class__.__module__)
finally:
tf2._force_enable = tf2_force_enabled # pylint: disable=protected-access
def test_custom_initializer_saving(self):
def my_initializer(shape, dtype=None):
return array_ops.ones(shape, dtype=dtype)
inputs = keras.Input((10,))
outputs = keras.layers.Dense(1, kernel_initializer=my_initializer)(inputs)
model = keras.Model(inputs, outputs)
model2 = model.from_config(
model.get_config(), custom_objects={'my_initializer': my_initializer})
self.assertEqual(model2.layers[1].kernel_initializer, my_initializer)
@test_util.run_v2_only
def test_load_external_variance_scaling_v2(self):
external_serialized_json = {
'class_name': 'VarianceScaling',
'config': {
'distribution': 'normal',
'mode': 'fan_avg',
'scale': 1.0,
'seed': None
}
}
initializer = keras.initializers.deserialize(external_serialized_json)
self.assertEqual(initializer.distribution, 'truncated_normal')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/initializers_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
DATA_DIM = 5
NUM_CLASSES = 2
class KerasRegularizersTest(keras_parameterized.TestCase,
parameterized.TestCase):
def create_model(self, kernel_regularizer=None, activity_regularizer=None):
model = keras.models.Sequential()
model.add(keras.layers.Dense(NUM_CLASSES,
kernel_regularizer=kernel_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(DATA_DIM,)))
return model
def get_data(self):
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(DATA_DIM,),
num_classes=NUM_CLASSES)
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
return (x_train, y_train), (x_test, y_test)
def create_multi_input_model_from(self, layer1, layer2):
input_1 = keras.layers.Input(shape=(DATA_DIM,))
input_2 = keras.layers.Input(shape=(DATA_DIM,))
out1 = layer1(input_1)
out2 = layer2(input_2)
out = keras.layers.Average()([out1, out2])
model = keras.models.Model([input_1, input_2], out)
model.add_loss(keras.backend.mean(out2))
model.add_loss(math_ops.reduce_sum(input_1))
return model
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_kernel_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(kernel_regularizer=regularizer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
('l2_zero', keras.regularizers.l2(0.)),
])
def test_activity_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(activity_regularizer=regularizer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_zero_regularization(self):
# Verifies that training with zero regularization works.
x, y = np.ones((10, 10)), np.ones((10, 3))
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=5, epochs=1)
def test_custom_regularizer_saving(self):
def my_regularizer(weights):
return math_ops.reduce_sum(math_ops.abs(weights))
inputs = keras.Input((10,))
outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs)
model = keras.Model(inputs, outputs)
model2 = model.from_config(
model.get_config(), custom_objects={'my_regularizer': my_regularizer})
self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_layer(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
model = self.create_multi_input_model_from(dense_layer, dense_layer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 5)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_model(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor))
model = self.create_multi_input_model_from(dummy_model, dummy_model)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 6)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_layer_in_different_models(self, regularizer):
shared_dense = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
models = []
for _ in range(2):
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
unshared_dense = keras.layers.Dense(
NUM_CLASSES, kernel_regularizer=regularizer)
out = unshared_dense(shared_dense(input_tensor))
models.append(keras.models.Model(input_tensor, out))
model = self.create_multi_input_model_from(
layer1=models[0], layer2=models[1])
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 14)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/regularizers_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the Keras API meant to be a high-level API for TensorFlow.
Detailed documentation and user guides are available at
[keras.io](https://keras.io).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.keras import activations
from tensorflow.python.keras import applications
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import callbacks_v1
from tensorflow.python.keras import constraints
from tensorflow.python.keras import datasets
from tensorflow.python.keras import estimator
from tensorflow.python.keras import initializers
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics
from tensorflow.python.keras import models
from tensorflow.python.keras import ops
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import premade
from tensorflow.python.keras import preprocessing
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import utils
from tensorflow.python.keras import wrappers
from tensorflow.python.keras.layers import Input
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.models import Sequential
from tensorflow.python.util.tf_export import keras_export
if tf2.enabled():
__version__ = '2.3.0-tf'
else:
__version__ = '2.2.4-tf'
keras_export('keras.__version__').export_constant(__name__, '__version__')
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import metrics
from tensorflow.python.keras import Model
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import util as trackable_utils
@test_util.run_all_in_graph_and_eager_modes
class KerasSumTest(test.TestCase):
def test_sum(self):
m = metrics.Sum(name='my_sum')
# check config
self.assertEqual(m.name, 'my_sum')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, dtypes.float32)
self.assertEqual(len(m.variables), 1)
self.evaluate(variables.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
# check reset_states()
m.reset_states()
self.assertEqual(self.evaluate(m.total), 0)
def test_sum_with_sample_weight(self):
m = metrics.Sum(dtype=dtypes.float64)
self.assertEqual(m.dtype, dtypes.float64)
self.evaluate(variables.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50)
self.assertEqual(self.evaluate(m.total), 50)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52., 4) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52., 4)
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5, 1) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 1)
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5, 1) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 1)
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 1)
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2)
# result = (prev: 57.5) + 0.5 + 1 + 1.5 + 1 + 0.25 + 2
self.assertAlmostEqual(result, 63.75, 2)
self.assertAlmostEqual(self.evaluate(m.total), 63.75, 2)
def test_sum_graph_with_placeholder(self):
with context.graph_mode(), self.cached_session() as sess:
m = metrics.Sum()
v = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
self.evaluate(variables.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(result, 50)
self.assertEqual(self.evaluate(m.total), 50)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(result, 52., 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52., 2)
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Sum()
checkpoint = trackable_utils.Checkpoint(sum=m)
self.evaluate(variables.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint sum object (= 300)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(600., self.evaluate(m.result()))
# restore to a different checkpoint sum object
restore_sum = metrics.Sum()
restore_checkpoint = trackable_utils.Checkpoint(sum=restore_sum)
status = restore_checkpoint.restore(save_path)
restore_update = restore_sum(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(600., self.evaluate(restore_sum.result()))
@keras_parameterized.run_all_keras_modes
class KerasMeanTest(keras_parameterized.TestCase):
# TODO(b/120949004): Re-enable garbage collection check
# @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_mean(self):
m = metrics.Mean(name='my_mean')
# check config
self.assertEqual(m.name, 'my_mean')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, dtypes.float32)
self.assertEqual(len(m.variables), 2)
self.evaluate(variables.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
self.assertEqual(self.evaluate(m.count), 3)
# check reset_states()
m.reset_states()
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# Check save and restore config
m2 = metrics.Mean.from_config(m.get_config())
self.assertEqual(m2.name, 'my_mean')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, dtypes.float32)
self.assertEqual(len(m2.variables), 2)
def test_mean_with_sample_weight(self):
m = metrics.Mean(dtype=dtypes.float64)
self.assertEqual(m.dtype, dtypes.float64)
self.evaluate(variables.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50 / 0.5)
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52 / 1.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6
self.assertEqual(result, 10.45)
self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)
self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)
def test_mean_graph_with_placeholder(self):
with context.graph_mode(), self.cached_session() as sess:
m = metrics.Mean()
v = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
self.evaluate(variables.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
self.assertEqual(result, 50 / 0.5)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
self.assertAlmostEqual(result, 52 / 1.7, 2)
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Mean()
checkpoint = trackable_utils.Checkpoint(mean=m)
self.evaluate(variables.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint mean object
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(200., self.evaluate(m.result()))
# restore to a different checkpoint mean object
restore_mean = metrics.Mean()
restore_checkpoint = trackable_utils.Checkpoint(mean=restore_mean)
status = restore_checkpoint.restore(save_path)
restore_update = restore_mean(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(200., self.evaluate(restore_mean.result()))
self.assertEqual(3, self.evaluate(restore_mean.count))
def test_multiple_instances(self):
m = metrics.Mean()
m2 = metrics.Mean()
self.assertEqual(m.name, 'mean')
self.assertEqual(m2.name, 'mean')
self.assertEqual([v.name for v in m.variables],
testing_utils.get_expected_metric_variable_names(
['total', 'count']))
self.assertEqual([v.name for v in m2.variables],
testing_utils.get_expected_metric_variable_names(
['total', 'count'], name_suffix='_1'))
self.evaluate(variables.variables_initializer(m.variables))
self.evaluate(variables.variables_initializer(m2.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
self.assertEqual(self.evaluate(m2([63, 10])), 36.5)
self.assertEqual(self.evaluate(m2.total), 73)
self.assertEqual(self.evaluate(m2.count), 2)
self.assertEqual(self.evaluate(m.result()), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
@test_util.run_all_in_graph_and_eager_modes
class KerasAccuracyTest(test.TestCase):
def test_accuracy(self):
acc_obj = metrics.Accuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [2], [3], [4]], [[1], [2], [3], [4]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# Check save and restore config
a2 = metrics.Accuracy.from_config(acc_obj.get_config())
self.assertEqual(a2.name, 'my_acc')
self.assertTrue(a2.stateful)
self.assertEqual(len(a2.variables), 2)
self.assertEqual(a2.dtype, dtypes.float32)
# check with sample_weight
result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_accuracy_ragged(self):
acc_obj = metrics.Accuracy(name='my_acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = ragged_factory_ops.constant([[1], [2], [3], [4]])
rt2 = ragged_factory_ops.constant([[1], [2], [3], [4]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = ragged_factory_ops.constant([[2], [1]])
rt2 = ragged_factory_ops.constant([[2], [0]])
sw_ragged = ragged_factory_ops.constant([[0.5], [0.2]])
result_t = acc_obj(rt1, rt2, sample_weight=sw_ragged)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_binary_accuracy(self):
acc_obj = metrics.BinaryAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_pred squeeze
update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertAlmostEqual(result, 0.75, 2) # 3/4
# check y_true squeeze
result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4/6
# check with sample_weight
result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7
def test_binary_accuracy_ragged(self):
acc_obj = metrics.BinaryAccuracy(name='my_acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = ragged_factory_ops.constant([[1], [0]])
rt2 = ragged_factory_ops.constant([[1], [0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_true squeeze only supported for dense tensors and is
# not supported by ragged tensor (different ranks). --> error
rt1 = ragged_factory_ops.constant([[[1], [1]]])
rt2 = ragged_factory_ops.constant([[1], [0]])
with self.assertRaises(ValueError):
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
def test_binary_accuracy_threshold(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(variables.variables_initializer(acc_obj.variables))
result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_binary_accuracy_threshold_ragged(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(variables.variables_initializer(acc_obj.variables))
rt1 = ragged_factory_ops.constant([[1], [1], [0], [0]])
rt2 = ragged_factory_ops.constant([[0.9], [0.6], [0.4], [0.8]])
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_categorical_accuracy(self):
acc_obj = metrics.CategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_categorical_accuracy_ragged(self):
acc_obj = metrics.CategoricalAccuracy(name='my_acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = ragged_factory_ops.constant([[0, 0, 1], [0, 1, 0]])
rt2 = ragged_factory_ops.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = ragged_factory_ops.constant([[0, 0, 1], [0, 1, 0]])
rt2 = ragged_factory_ops.constant([[0.1, 0.1, 0.8], [0.05, 0, 0.95]])
sample_weight = ragged_factory_ops.constant([[0.5], [0.2]])
with self.assertRaises(errors_impl.InvalidArgumentError):
result_t = acc_obj(rt1, rt2, sample_weight)
result = self.evaluate(result_t)
def test_sparse_categorical_accuracy(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[2], [1]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_ragged(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# verify that correct value is returned
rt1 = ragged_factory_ops.constant([[2], [1]])
rt2 = ragged_factory_ops.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
with self.assertRaises(errors_impl.InvalidArgumentError):
# sparse_categorical_accuracy is not supported for composite/ragged
# tensors.
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
def test_sparse_categorical_accuracy_mismatched_dims(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):
with context.graph_mode(), self.cached_session() as sess:
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
t = array_ops.placeholder(dtypes.float32)
p = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
result_t = acc_obj(t, p, w)
result = sess.run(
result_t,
feed_dict=({
t: [2, 1],
p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
w: [[0.5], [0.2]]
}))
self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7
@test_util.run_all_in_graph_and_eager_modes
class CosineSimilarityTest(test.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = constant_op.constant(self.np_y_true)
self.y_pred = constant_op.constant(self.np_y_pred)
def test_config(self):
cosine_obj = metrics.CosineSimilarity(
axis=2, name='my_cos', dtype=dtypes.int32)
self.assertEqual(cosine_obj.name, 'my_cos')
self.assertEqual(cosine_obj._dtype, dtypes.int32)
# Check save and restore config
cosine_obj2 = metrics.CosineSimilarity.from_config(cosine_obj.get_config())
self.assertEqual(cosine_obj2.name, 'my_cos')
self.assertEqual(cosine_obj2._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(variables.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_weighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(variables.variables_initializer(cosine_obj.variables))
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(
self.y_true,
self.y_pred,
sample_weight=constant_op.constant(sample_weight))
expected_loss = np.sum(
self.expected_loss * sample_weight) / np.sum(sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = metrics.CosineSimilarity(axis=1)
self.evaluate(variables.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsoluteErrorTest(test.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name='my_mae', dtype=dtypes.int32)
self.assertEqual(mae_obj.name, 'my_mae')
self.assertEqual(mae_obj._dtype, dtypes.int32)
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, 'my_mae')
self.assertEqual(mae_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(variables.variables_initializer(mae_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mae_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(variables.variables_initializer(mae_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsolutePercentageErrorTest(test.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name='my_mape', dtype=dtypes.int32)
self.assertEqual(mape_obj.name, 'my_mape')
self.assertEqual(mape_obj._dtype, dtypes.int32)
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config())
self.assertEqual(mape_obj2.name, 'my_mape')
self.assertEqual(mape_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(variables.variables_initializer(mape_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mape_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mape_obj.result()
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(variables.variables_initializer(mape_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = metrics.MeanSquaredError(name='my_mse', dtype=dtypes.int32)
self.assertEqual(mse_obj.name, 'my_mse')
self.assertEqual(mse_obj._dtype, dtypes.int32)
# Check save and restore config
mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())
self.assertEqual(mse_obj2.name, 'my_mse')
self.assertEqual(mse_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(variables.variables_initializer(mse_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(variables.variables_initializer(mse_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredLogarithmicErrorTest(test.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name='my_msle', dtype=dtypes.int32)
self.assertEqual(msle_obj.name, 'my_msle')
self.assertEqual(msle_obj._dtype, dtypes.int32)
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config())
self.assertEqual(msle_obj2.name, 'my_msle')
self.assertEqual(msle_obj2._dtype, dtypes.int32)
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(variables.variables_initializer(msle_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = msle_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(variables.variables_initializer(msle_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class HingeTest(test.TestCase):
def test_config(self):
hinge_obj = metrics.Hinge(name='hinge', dtype=dtypes.int32)
self.assertEqual(hinge_obj.name, 'hinge')
self.assertEqual(hinge_obj._dtype, dtypes.int32)
# Check save and restore config
hinge_obj2 = metrics.Hinge.from_config(hinge_obj.get_config())
self.assertEqual(hinge_obj2.name, 'hinge')
self.assertEqual(hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(variables.variables_initializer(hinge_obj.variables))
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# reduced metric = (0.6 + 0.4125) / 2
update_op = hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = hinge_obj.result()
self.assertAllClose(0.506, result, atol=1e-3)
def test_weighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(variables.variables_initializer(hinge_obj.variables))
y_true = constant_op.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
sample_weight = constant_op.constant([1.5, 2.])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted metric = [0.6 * 1.5, 0.4125 * 2]
# reduced metric = (0.6 * 1.5 + 0.4125 * 2) / (1.5 + 2)
result = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.493, self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class SquaredHingeTest(test.TestCase):
def test_config(self):
sq_hinge_obj = metrics.SquaredHinge(name='sq_hinge', dtype=dtypes.int32)
self.assertEqual(sq_hinge_obj.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj._dtype, dtypes.int32)
# Check save and restore config
sq_hinge_obj2 = metrics.SquaredHinge.from_config(sq_hinge_obj.get_config())
self.assertEqual(sq_hinge_obj2.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(variables.variables_initializer(sq_hinge_obj.variables))
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# reduced metric = (0.485 + 0.2431) / 2
update_op = sq_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = sq_hinge_obj.result()
self.assertAllClose(0.364, result, atol=1e-3)
def test_weighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(variables.variables_initializer(sq_hinge_obj.variables))
y_true = constant_op.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
sample_weight = constant_op.constant([1.5, 2.])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted metric = [0.485 * 1.5, 0.2431 * 2]
# reduced metric = (0.485 * 1.5 + 0.2431 * 2) / (1.5 + 2)
result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.347, self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalHingeTest(test.TestCase):
def test_config(self):
cat_hinge_obj = metrics.CategoricalHinge(
name='cat_hinge', dtype=dtypes.int32)
self.assertEqual(cat_hinge_obj.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj._dtype, dtypes.int32)
# Check save and restore config
cat_hinge_obj2 = metrics.CategoricalHinge.from_config(
cat_hinge_obj.get_config())
self.assertEqual(cat_hinge_obj2.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(variables.variables_initializer(cat_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = cat_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = cat_hinge_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(variables.variables_initializer(cat_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.5, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class RootMeanSquaredErrorTest(test.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name='rmse', dtype=dtypes.int32)
self.assertEqual(rmse_obj.name, 'rmse')
self.assertEqual(rmse_obj._dtype, dtypes.int32)
rmse_obj2 = metrics.RootMeanSquaredError.from_config(rmse_obj.get_config())
self.assertEqual(rmse_obj2.name, 'rmse')
self.assertEqual(rmse_obj2._dtype, dtypes.int32)
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(variables.variables_initializer(rmse_obj.variables))
y_true = constant_op.constant((2, 4, 6))
y_pred = constant_op.constant((1, 3, 2))
update_op = rmse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(math.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(variables.variables_initializer(rmse_obj.variables))
y_true = constant_op.constant((2, 4, 6, 8))
y_pred = constant_op.constant((1, 3, 2, 3))
sample_weight = constant_op.constant((0, 1, 0, 1))
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class TopKCategoricalAccuracyTest(test.TestCase):
def test_config(self):
a_obj = metrics.TopKCategoricalAccuracy(name='topkca', dtype=dtypes.int32)
self.assertEqual(a_obj.name, 'topkca')
self.assertEqual(a_obj._dtype, dtypes.int32)
a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config())
self.assertEqual(a_obj2.name, 'topkca')
self.assertEqual(a_obj2._dtype, dtypes.int32)
def test_correctness(self):
a_obj = metrics.TopKCategoricalAccuracy()
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([[0, 0, 1], [0, 1, 0]])
y_pred = constant_op.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.TopKCategoricalAccuracy(k=1)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_true = constant_op.constant([[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]])
y_pred = constant_op.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.TopKCategoricalAccuracy(k=6)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.TopKCategoricalAccuracy(k=2)
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
y_pred = constant_op.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = constant_op.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class SparseTopKCategoricalAccuracyTest(test.TestCase):
def test_config(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(
name='stopkca', dtype=dtypes.int32)
self.assertEqual(a_obj.name, 'stopkca')
self.assertEqual(a_obj._dtype, dtypes.int32)
a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config(
a_obj.get_config())
self.assertEqual(a_obj2.name, 'stopkca')
self.assertEqual(a_obj2._dtype, dtypes.int32)
def test_correctness(self):
a_obj = metrics.SparseTopKCategoricalAccuracy()
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([2, 1])
y_pred = constant_op.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.SparseTopKCategoricalAccuracy(k=1)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_pred = constant_op.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.SparseTopKCategoricalAccuracy(k=6)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(k=2)
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([1, 0, 2])
y_pred = constant_op.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = constant_op.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class LogCoshErrorTest(test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
logcosh_obj = metrics.LogCoshError(name='logcosh', dtype=dtypes.int32)
self.assertEqual(logcosh_obj.name, 'logcosh')
self.assertEqual(logcosh_obj._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(variables.variables_initializer(logcosh_obj.variables))
update_op = logcosh_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = logcosh_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(variables.variables_initializer(logcosh_obj.variables))
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
result = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class PoissonTest(test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_results = y_pred - np.multiply(y_true, np.log(y_pred))
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
poisson_obj = metrics.Poisson(name='poisson', dtype=dtypes.int32)
self.assertEqual(poisson_obj.name, 'poisson')
self.assertEqual(poisson_obj._dtype, dtypes.int32)
poisson_obj2 = metrics.Poisson.from_config(poisson_obj.get_config())
self.assertEqual(poisson_obj2.name, 'poisson')
self.assertEqual(poisson_obj2._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(variables.variables_initializer(poisson_obj.variables))
update_op = poisson_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = poisson_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(variables.variables_initializer(poisson_obj.variables))
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
result = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class KLDivergenceTest(test.TestCase):
def setup(self):
y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3))
y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3))
self.batch_size = 2
self.expected_results = np.multiply(y_true, np.log(y_true / y_pred))
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
k_obj = metrics.KLDivergence(name='kld', dtype=dtypes.int32)
self.assertEqual(k_obj.name, 'kld')
self.assertEqual(k_obj._dtype, dtypes.int32)
k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config())
self.assertEqual(k_obj2.name, 'kld')
self.assertEqual(k_obj2._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(variables.variables_initializer(k_obj.variables))
update_op = k_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = k_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(variables.variables_initializer(k_obj.variables))
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / (1.2 + 3.4)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class MeanRelativeErrorTest(test.TestCase):
def test_config(self):
normalizer = constant_op.constant([1, 3], dtype=dtypes.float32)
mre_obj = metrics.MeanRelativeError(normalizer=normalizer, name='mre')
self.assertEqual(mre_obj.name, 'mre')
self.assertArrayNear(self.evaluate(mre_obj.normalizer), [1, 3], 1e-1)
mre_obj2 = metrics.MeanRelativeError.from_config(mre_obj.get_config())
self.assertEqual(mre_obj2.name, 'mre')
self.assertArrayNear(self.evaluate(mre_obj2.normalizer), [1, 3], 1e-1)
def test_unweighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_y_pred - np_y_true), np_y_true))
y_pred = constant_op.constant(np_y_pred, shape=(1, 4), dtype=dtypes.float32)
y_true = constant_op.constant(np_y_true, shape=(1, 4))
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(variables.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_weighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
sample_weight = np.asarray([0.2, 0.3, 0.5, 0], dtype=np.float32)
rel_errors = np.divide(np.absolute(np_y_pred - np_y_true), np_y_true)
expected_error = np.sum(rel_errors * sample_weight)
y_pred = constant_op.constant(np_y_pred, dtype=dtypes.float32)
y_true = constant_op.constant(np_y_true)
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(variables.variables_initializer(mre_obj.variables))
result = mre_obj(
y_true, y_pred, sample_weight=constant_op.constant(sample_weight))
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_zero_normalizer(self):
y_pred = constant_op.constant([2, 4], dtype=dtypes.float32)
y_true = constant_op.constant([1, 3])
mre_obj = metrics.MeanRelativeError(normalizer=array_ops.zeros_like(y_true))
self.evaluate(variables.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertEqual(self.evaluate(result), 0)
@test_util.run_all_in_graph_and_eager_modes
class MeanIoUTest(test.TestCase):
def test_config(self):
m_obj = metrics.MeanIoU(num_classes=2, name='mean_iou')
self.assertEqual(m_obj.name, 'mean_iou')
self.assertEqual(m_obj.num_classes, 2)
m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config())
self.assertEqual(m_obj2.name, 'mean_iou')
self.assertEqual(m_obj2.num_classes, 2)
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_pred = constant_op.constant([0, 1, 0, 1], dtype=dtypes.float32)
y_true = constant_op.constant([0, 0, 1, 1])
sample_weight = constant_op.constant([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = constant_op.constant([[0, 1], [0, 1]], dtype=dtypes.float32)
y_true = constant_op.constant([[0, 0], [1, 1]])
sample_weight = constant_op.constant([[0.2, 0.3], [0.4, 0.1]])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_zero_valid_entries(self):
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
self.assertAllClose(self.evaluate(m_obj.result()), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = constant_op.constant([1], dtype=dtypes.float32)
y_true = constant_op.constant([1])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 1 / (1 + 1 - 1)) / 1
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
class MeanTensorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_config(self):
m = metrics.MeanTensor(name='mean_by_element')
# check config
self.assertEqual(m.name, 'mean_by_element')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, dtypes.float32)
self.assertEqual(len(m.variables), 0)
with self.assertRaisesRegexp(ValueError, 'does not have any result yet'):
m.result()
self.evaluate(m([[3], [5], [3]]))
self.assertAllEqual(m._shape, [3, 1])
m2 = metrics.MeanTensor.from_config(m.get_config())
self.assertEqual(m2.name, 'mean_by_element')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, dtypes.float32)
self.assertEqual(len(m2.variables), 0)
@test_util.run_in_graph_and_eager_modes
def test_unweighted(self):
m = metrics.MeanTensor(dtype=dtypes.float64)
# check __call__()
self.assertAllClose(self.evaluate(m([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAllClose(self.evaluate(m.result()), [50.5, 22.5])
self.assertAllClose(self.evaluate(m.total), [101, 45])
self.assertAllClose(self.evaluate(m.count), [2, 2])
# check reset_states()
m.reset_states()
self.assertAllClose(self.evaluate(m.total), [0, 0])
self.assertAllClose(self.evaluate(m.count), [0, 0])
@test_util.run_in_graph_and_eager_modes
def test_weighted(self):
m = metrics.MeanTensor(dtype=dtypes.float64)
self.assertEqual(m.dtype, dtypes.float64)
# check scalar weight
result_t = m([100, 30], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [100, 30])
self.assertAllClose(self.evaluate(m.total), [50, 15])
self.assertAllClose(self.evaluate(m.count), [0.5, 0.5])
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAllClose(result, [51 / 1.5, 16 / 0.7], 2)
self.assertAllClose(self.evaluate(m.total), [51, 16])
self.assertAllClose(self.evaluate(m.count), [1.5, 0.7])
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [51.5 / 2, 17 / 1.2])
self.assertAllClose(self.evaluate(m.total), [51.5, 17])
self.assertAllClose(self.evaluate(m.count), [2, 1.2])
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAllClose(self.evaluate(result_t), [52.5 / 3, 18 / 1.4])
self.assertAllClose(self.evaluate(m.total), [52.5, 18])
self.assertAllClose(self.evaluate(m.count), [3, 1.4])
# check weights expand
m = metrics.MeanTensor(dtype=dtypes.float64)
self.evaluate(variables.variables_initializer(m.variables))
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAllClose(self.evaluate(result_t), [[1], [5]])
self.assertAllClose(self.evaluate(m.total), [[1], [1]])
self.assertAllClose(self.evaluate(m.count), [[1], [0.2]])
@test_util.run_in_graph_and_eager_modes
def test_invalid_value_shape(self):
m = metrics.MeanTensor(dtype=dtypes.float64)
m([1])
with self.assertRaisesRegexp(
ValueError, 'MeanTensor input values must always have the same shape'):
m([1, 5])
@test_util.run_in_graph_and_eager_modes
def test_build_in_tf_function(self):
"""Ensure that variables are created correctly in a tf function."""
m = metrics.MeanTensor(dtype=dtypes.float64)
@eager_function.defun
def call_metric(x):
return m(x)
self.assertAllClose(self.evaluate(call_metric([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
self.assertAllClose(self.evaluate(call_metric([20, 2])), [60, 21])
def test_in_keras_model(self):
with context.eager_mode():
class ModelWithMetric(Model):
def __init__(self):
super(ModelWithMetric, self).__init__()
self.dense1 = layers.Dense(
3, activation='relu', kernel_initializer='ones')
self.dense2 = layers.Dense(
1, activation='sigmoid', kernel_initializer='ones')
self.mean_tensor = metrics.MeanTensor()
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
self.mean_tensor(self.dense1.kernel)
return x
model = ModelWithMetric()
model.compile(
loss='mae',
optimizer='rmsprop',
run_eagerly=True)
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y, batch_size=50)
self.assertAllClose(self.evaluate(model.mean_tensor.result()),
np.ones((4, 3)))
self.assertAllClose(self.evaluate(model.mean_tensor.total),
np.full((4, 3), 2))
self.assertAllClose(self.evaluate(model.mean_tensor.count),
np.full((4, 3), 2))
model.evaluate(x, y, batch_size=25)
self.assertAllClose(self.evaluate(model.mean_tensor.result()),
np.ones((4, 3)))
self.assertAllClose(self.evaluate(model.mean_tensor.total),
np.full((4, 3), 4))
self.assertAllClose(self.evaluate(model.mean_tensor.count),
np.full((4, 3), 4))
@test_util.run_all_in_graph_and_eager_modes
class BinaryCrossentropyTest(test.TestCase):
def test_config(self):
bce_obj = metrics.BinaryCrossentropy(
name='bce', dtype=dtypes.int32, label_smoothing=0.2)
self.assertEqual(bce_obj.name, 'bce')
self.assertEqual(bce_obj._dtype, dtypes.int32)
old_config = bce_obj.get_config()
self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)
# Check save and restore config
bce_obj2 = metrics.BinaryCrossentropy.from_config(old_config)
self.assertEqual(bce_obj2.name, 'bce')
self.assertEqual(bce_obj2._dtype, dtypes.int32)
new_config = bce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(variables.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
result = bce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Reduced metric = 7.665 / 2
self.assertAllClose(self.evaluate(result), 3.833, atol=1e-3)
def test_unweighted_with_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(bce_obj.variables))
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
y_pred = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
result = bce_obj(y_true, y_pred)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [((100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))),
# ((100 - 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 1 + log(1 + exp(-100))))]
# = [(0 + 0 + 0) / 3, 200 / 3]
# Reduced metric = (0 + 66.666) / 2
self.assertAllClose(self.evaluate(result), 33.333, atol=1e-3)
def test_weighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(variables.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = constant_op.constant([1.5, 2.])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Weighted metric = [7.665 * 1.5, 0]
# Reduced metric = 7.665 * 1.5 / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 3.285, atol=1e-3)
def test_weighted_from_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(bce_obj.variables))
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
y_pred = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
sample_weight = constant_op.constant([2., 2.5])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [(0 + 0 + 0) / 3, 200 / 3]
# Weighted metric = [0, 66.666 * 2.5]
# Reduced metric = 66.666 * 2.5 / (2 + 2.5)
self.assertAllClose(self.evaluate(result), 37.037, atol=1e-3)
def test_label_smoothing(self):
logits = constant_op.constant(((100., -100., -100.)))
y_true = constant_op.constant(((1, 0, 1)))
label_smoothing = 0.1
# Metric: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# After label smoothing, label 1 becomes 1 - 0.5L
# label 0 becomes 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = metrics.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
self.evaluate(variables.variables_initializer(bce_obj.variables))
result = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAllClose(expected_value, self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalCrossentropyTest(test.TestCase):
def test_config(self):
cce_obj = metrics.CategoricalCrossentropy(
name='cce', dtype=dtypes.int32, label_smoothing=0.2)
self.assertEqual(cce_obj.name, 'cce')
self.assertEqual(cce_obj._dtype, dtypes.int32)
old_config = cce_obj.get_config()
self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)
# Check save and restore config
cce_obj2 = metrics.CategoricalCrossentropy.from_config(old_config)
self.assertEqual(cce_obj2.name, 'cce')
self.assertEqual(cce_obj2._dtype, dtypes.int32)
new_config = cce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(variables.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = cce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Reduced metric = (0.051 + 2.302) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = cce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(variables.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = constant_op.constant([1.5, 2.])
result = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Weighted metric = [0.051 * 1.5, 2.302 * 2.]
# Reduced metric = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = constant_op.constant([1.5, 2.])
result = cce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_label_smoothing(self):
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
label_smoothing = 0.1
# Label smoothing: z' = z * (1 - L) + L/n,
# where L = label smoothing value and n = num classes
# Label value 1 becomes: 1 - L + L/n
# Label value 0 becomes: L/n
# y_true with label_smoothing = [[0.0333, 0.9333, 0.0333],
# [0.0333, 0.0333, 0.9333]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[-0.26641, -0.00042, -0.29971],
# [-0.23316, -0.00006, -6.53479]]
# xent = [0.56654, 6.76801]
# Reduced xent = (0.56654 + 6.76801) / 2
cce_obj = metrics.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
self.evaluate(variables.variables_initializer(cce_obj.variables))
loss = cce_obj(y_true, logits)
self.assertAllClose(self.evaluate(loss), 3.667, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class SparseCategoricalCrossentropyTest(test.TestCase):
def test_config(self):
scce_obj = metrics.SparseCategoricalCrossentropy(
name='scce', dtype=dtypes.int32)
self.assertEqual(scce_obj.name, 'scce')
self.assertEqual(scce_obj.dtype, dtypes.int32)
old_config = scce_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config
scce_obj2 = metrics.SparseCategoricalCrossentropy.from_config(old_config)
self.assertEqual(scce_obj2.name, 'scce')
self.assertEqual(scce_obj2.dtype, dtypes.int32)
new_config = scce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = scce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# y_true * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = constant_op.constant([1.5, 2.])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Weighted xent = [0.051 * 1.5, 2.302 * 2.]
# Reduced xent = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = constant_op.constant([1.5, 2.])
result = scce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_axis(self):
scce_obj = metrics.SparseCategoricalCrossentropy(axis=0)
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.1], [0.95, 0.8], [0, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# logits = log(y`) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 0], [1, 0], [0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# sum(exp(logits)) = [1, 1]
# softmax = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# log(softmax) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# y * log(softmax) = [[0, 0], [-0.0513, 0], [0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
class BinaryTruePositives(metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, dtypes.bool)
y_pred = math_ops.cast(y_pred, dtypes.bool)
values = math_ops.logical_and(
math_ops.equal(y_true, True), math_ops.equal(y_pred, True))
values = math_ops.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, dtype=self.dtype)
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
values = math_ops.multiply(values, sample_weight)
self.true_positives.assign_add(math_ops.reduce_sum(values))
def result(self):
return self.true_positives
@test_util.run_all_in_graph_and_eager_modes
class CustomMetricsTest(test.TestCase):
def test_config(self):
btp_obj = BinaryTruePositives(name='btp', dtype=dtypes.int32)
self.assertEqual(btp_obj.name, 'btp')
self.assertEqual(btp_obj.dtype, dtypes.int32)
# Check save and restore config
btp_obj2 = BinaryTruePositives.from_config(btp_obj.get_config())
self.assertEqual(btp_obj2.name, 'btp')
self.assertEqual(btp_obj2.dtype, dtypes.int32)
def test_unweighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(variables.variables_initializer(btp_obj.variables))
y_true = constant_op.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = constant_op.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
update_op = btp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = btp_obj.result()
self.assertEqual(7, self.evaluate(result))
def test_weighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(variables.variables_initializer(btp_obj.variables))
y_true = constant_op.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = constant_op.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
sample_weight = constant_op.constant([[1.], [1.5], [2.], [2.5]])
result = btp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(12, self.evaluate(result))
def _get_model(compile_metrics):
model_layers = [
layers.Dense(3, activation='relu', kernel_initializer='ones'),
layers.Dense(1, activation='sigmoid', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss='mae',
metrics=compile_metrics,
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ResetStatesTest(keras_parameterized.TestCase):
def test_reset_states_false_positives(self):
fp_obj = metrics.FalsePositives()
model = _get_model([fp_obj])
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
def test_reset_states_false_negatives(self):
fn_obj = metrics.FalseNegatives()
model = _get_model([fn_obj])
x = np.zeros((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
def test_reset_states_true_negatives(self):
tn_obj = metrics.TrueNegatives()
model = _get_model([tn_obj])
x = np.zeros((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
def test_reset_states_true_positives(self):
tp_obj = metrics.TruePositives()
model = _get_model([tp_obj])
x = np.ones((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
def test_reset_states_precision(self):
p_obj = metrics.Precision()
model = _get_model([p_obj])
x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
def test_reset_states_recall(self):
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
def test_reset_states_sensitivity_at_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_states_specificity_at_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_states_auc(self):
auc_obj = metrics.AUC(num_thresholds=3)
model = _get_model([auc_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)
def test_reset_states_auc_manual_thresholds(self):
auc_obj = metrics.AUC(thresholds=[0.5])
model = _get_model([auc_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)
def test_reset_states_mean_iou(self):
m_obj = metrics.MeanIoU(num_classes=2)
model = _get_model([m_obj])
x = np.asarray([[0, 0, 0, 0], [1, 1, 1, 1], [1, 0, 1, 0], [0, 1, 0, 1]],
dtype=np.float32)
y = np.asarray([[0], [1], [1], [1]], dtype=np.float32)
model.evaluate(x, y)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)
model.evaluate(x, y)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/metrics_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in regularizers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.regularizers.Regularizer')
class Regularizer(object):
"""Regularizer base class.
"""
def __call__(self, x):
return 0.
@classmethod
def from_config(cls, config):
return cls(**config)
@keras_export('keras.regularizers.L1L2')
class L1L2(Regularizer):
"""Regularizer for L1 and L2 regularization.
Arguments:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.): # pylint: disable=redefined-outer-name
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
if not self.l1 and not self.l2:
return K.constant(0.)
regularization = 0.
if self.l1:
regularization += self.l1 * math_ops.reduce_sum(math_ops.abs(x))
if self.l2:
regularization += self.l2 * math_ops.reduce_sum(math_ops.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1), 'l2': float(self.l2)}
# Aliases.
@keras_export('keras.regularizers.l1')
def l1(l=0.01):
return L1L2(l1=l)
@keras_export('keras.regularizers.l2')
def l2(l=0.01):
return L1L2(l2=l)
@keras_export('keras.regularizers.l1_l2')
def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name
return L1L2(l1=l1, l2=l2)
@keras_export('keras.regularizers.serialize')
def serialize(regularizer):
return serialize_keras_object(regularizer)
@keras_export('keras.regularizers.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
@keras_export('keras.regularizers.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
identifier = str(identifier)
# We have to special-case functions that return classes.
# TODO(omalleyt): Turn these into classes or class aliases.
special_cases = ['l1', 'l2', 'l1_l2']
if identifier in special_cases:
# Treat like a class.
return deserialize({'class_name': identifier, 'config': {}})
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret regularizer identifier:', identifier)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/regularizers.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests metrics correctness using Keras model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops.losses import loss_reduction
from tensorflow.python.platform import test
def get_multi_io_model():
inp_1 = layers.Input(shape=(1,), name='input_1')
inp_2 = layers.Input(shape=(1,), name='input_2')
x = layers.Dense(3, kernel_initializer='ones', trainable=False)
out_1 = layers.Dense(
1, kernel_initializer='ones', name='output_1', trainable=False)
out_2 = layers.Dense(
1, kernel_initializer='ones', name='output_2', trainable=False)
branch_a = [inp_1, x, out_1]
branch_b = [inp_2, x, out_2]
return testing_utils.get_multi_io_model(branch_a, branch_b)
def custom_generator_multi_io(sample_weights=None):
batch_size = 2
num_samples = 4
inputs = np.asarray([[1.], [2.], [3.], [4.]])
targets_1 = np.asarray([[2.], [4.], [6.], [8.]])
targets_2 = np.asarray([[1.], [2.], [3.], [4.]])
if sample_weights:
assert len(sample_weights) == 2
w1 = sample_weights[0]
w2 = sample_weights[1]
else:
w1 = None
w2 = None
i = 0
while True:
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + batch_size
x = [inputs[start:end], inputs[start:end]]
y = [targets_1[start:end], targets_2[start:end]]
if sample_weights:
w = [
None if w1 is None else w1[start:end],
None if w2 is None else w2[start:end]
]
else:
w = None
yield x, y, w
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
class TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):
def _get_compiled_multi_io_model(self):
model = get_multi_io_model()
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
weighted_metrics=[
metrics.MeanSquaredError(name='mean_squared_error_2')
],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def setUp(self):
super(TestMetricsCorrectnessMultiIO, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y1 = np.asarray([[2.], [4.], [6.], [8.]])
self.y2 = np.asarray([[1.], [2.], [3.], [4.]])
self.sample_weight_1 = np.asarray([2., 3., 4., 5.])
self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])
self.class_weight_1 = {2: 2, 4: 3, 6: 4, 8: 5}
self.class_weight_2 = {1: 3.5, 2: 2.5, 3: 1.5, 4: 0.5}
# y_true_1 = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# y_true_2 = [[1.], [2.], [3.], [4.]], y_pred = [[3.], [6.], [9.], [12.]]
# Weighted metric `output_1`:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = (2 + 3) + (4 + 5)
# Result = 9.2857141
# Weighted metric `output_2`:
# Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) +
# ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5)
# = 140
# Count = (3.5 + 2.5) + (1.5 + 0.5)
# Result = 17.5
# Loss `output_1` with weights:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = 2 + 2
# Result = 32.5
# Loss `output_1` without weights/Metric `output_1`:
# Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30
# Count = 2 + 2
# Result = 7.5
# Loss `output_2` with weights:
# Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) +
# ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5)
# = 140
# Count = 2 + 2
# Result = 35
# Loss `output_2` without weights/Metric `output_2`:
# Total = ((3 - 1)^2 + (6 - 2)^2) + ((9 - 3)^2 + (12 - 4)^2) = 120
# Count = 2 + 2
# Result = 30
# Total loss with weights = 32.5 + 35 = 67.5
# Total loss without weights = 7.5 + 30 = 37.5
self.wmse = 'mean_squared_error_2'
if not tf2.enabled():
self.wmse = 'weighted_' + self.wmse
self.expected_fit_result_with_weights = {
'output_1_mean_squared_error': [7.5, 7.5],
'output_2_mean_squared_error': [30, 30],
'output_1_' + self.wmse: [9.286, 9.286],
'output_2_' + self.wmse: [17.5, 17.5],
'loss': [67.5, 67.5],
'output_1_loss': [32.5, 32.5],
'output_2_loss': [35, 35],
}
self.expected_fit_result_with_weights_output_2 = {
'output_1_mean_squared_error': [7.5, 7.5],
'output_2_mean_squared_error': [30, 30],
'output_1_' + self.wmse: [7.5, 7.5],
'output_2_' + self.wmse: [17.5, 17.5],
'loss': [42.5, 42.5],
'output_1_loss': [7.5, 7.5],
'output_2_loss': [35, 35],
}
self.expected_fit_result = {
'output_1_mean_squared_error': [7.5, 7.5],
'output_2_mean_squared_error': [30, 30],
'output_1_' + self.wmse: [7.5, 7.5],
'output_2_' + self.wmse: [30, 30],
'loss': [37.5, 37.5],
'output_1_loss': [7.5, 7.5],
'output_2_loss': [30, 30],
}
# In the order: 'loss', 'output_1_loss', 'output_2_loss',
# 'output_1_mean_squared_error', 'output_1_mean_squared_error_2',
# 'output_2_mean_squared_error', 'output_2_mean_squared_error_2'
self.expected_batch_result_with_weights = [
67.5, 32.5, 35, 7.5, 9.286, 30, 17.5
]
self.expected_batch_result_with_weights_output_2 = [
42.5, 7.5, 35, 7.5, 7.5, 30, 17.5
]
self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]
def test_fit(self):
model = self._get_compiled_multi_io_model()
history = model.fit([self.x, self.x], [self.y1, self.y2],
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
history = model.fit([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
# Set weights for one output (use batch size).
history = model.fit([self.x, self.x], [self.y1, self.y2],
sample_weight={'output_2': self.sample_weight_2},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_with_class_weight(self):
model = self._get_compiled_multi_io_model()
history = model.fit([self.x, self.x], [self.y1, self.y2],
class_weight={
'output_1': self.class_weight_1,
'output_2': self.class_weight_2,
},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
# Set weights for one output.
history = model.fit([self.x, self.x], [self.y1, self.y2],
class_weight={'output_2': self.class_weight_2},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
def test_eval_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=2,
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
# Set weights for one output.
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=2,
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(eval_result,
self.expected_batch_result_with_weights_output_2, 1e-3)
# Verify that metric value is same with arbitrary weights and batch size.
x = np.random.random((50, 1))
y = np.random.random((50, 1))
w = np.random.random((50,))
mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w], batch_size=5)[3]
mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],
batch_size=10)[3]
self.assertAllClose(mse1, mse2, 1e-3)
def test_train_on_batch(self):
model = self._get_compiled_multi_io_model()
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_train_on_batch_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
# Set weights for one output.
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(result,
self.expected_batch_result_with_weights_output_2, 1e-3)
def test_train_on_batch_with_class_weight(self):
model = self._get_compiled_multi_io_model()
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
class_weight={
'output_1': self.class_weight_1,
'output_2': self.class_weight_2,
})
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
# Set weights for one output.
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
class_weight={
'output_2': self.class_weight_2,
})
self.assertAllClose(result,
self.expected_batch_result_with_weights_output_2, 1e-3)
def test_test_on_batch(self):
model = self._get_compiled_multi_io_model()
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_test_on_batch_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
# Set weights for one output.
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(result,
self.expected_batch_result_with_weights_output_2, 1e-3)
def test_fit_generator(self):
model = self._get_compiled_multi_io_model()
history = model.fit_generator(
custom_generator_multi_io(), steps_per_epoch=2, epochs=2)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_generator_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
history = model.fit_generator(
custom_generator_multi_io(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
# Set weights for one output.
history = model.fit_generator(
custom_generator_multi_io(sample_weights=[None, self.sample_weight_2]),
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_generator_with_class_weight(self):
model = self._get_compiled_multi_io_model()
history = model.fit_generator(
custom_generator_multi_io(),
class_weight={
'output_1': self.class_weight_1,
'output_2': self.class_weight_2,
},
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
# Set weights for one output.
history = model.fit_generator(
custom_generator_multi_io(),
class_weight={'output_2': self.class_weight_2},
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval_generator(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate_generator(custom_generator_multi_io(), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
def test_eval_generator_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate_generator(
custom_generator_multi_io(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps=2)
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
# Set weights for one output.
eval_result = model.evaluate_generator(
custom_generator_multi_io(sample_weights=[None, self.sample_weight_2]),
steps=2)
self.assertAllClose(eval_result,
self.expected_batch_result_with_weights_output_2, 1e-3)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):
def _get_model(self):
x = layers.Dense(3, kernel_initializer='ones', trainable=False)
out = layers.Dense(
1, kernel_initializer='ones', name='output', trainable=False)
model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
weighted_metrics=[
metrics.MeanSquaredError(name='mean_squared_error_2')
],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def _custom_generator(self, sample_weight=None):
batch_size = 2
num_samples = 4
x = np.asarray([[1.], [2.], [3.], [4.]])
y = np.asarray([[2.], [4.], [6.], [8.]])
w = sample_weight
i = 0
while True:
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + batch_size
yield x[start:end], y[start:end], None if w is None else w[start:end]
def setUp(self):
super(TestMetricsCorrectnessSingleIO, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y = np.asarray([[2.], [4.], [6.], [8.]])
self.sample_weight = np.asarray([2., 3., 4., 5.])
self.class_weight = {2: 2, 4: 3, 6: 4, 8: 5}
# y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# Metric:
# Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30,
# Count = 2 + 2
# Result = 7.5
# Weighted metric:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = (2 + 3) + (4 + 5)
# Result = 9.2857141
# Total loss with weights:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130,
# Count = 2 + 2
# Result = 32.5
# Total loss without weights:
# Total = ((3 - 2)^2 + (6 - 4)^2) +
# ((9 - 6)^2 + (12 - 8)^2)
# = 30,
# Count = 2 + 2
# Result = 7.5
wmse = 'mean_squared_error_2'
if not tf2.enabled():
wmse = 'weighted_' + wmse
self.expected_fit_result_with_weights = {
'mean_squared_error': [7.5, 7.5],
wmse: [9.286, 9.286],
'loss': [32.5, 32.5]
}
self.expected_fit_result = {
'mean_squared_error': [7.5, 7.5],
wmse: [7.5, 7.5],
'loss': [7.5, 7.5]
}
# In the order: 'loss', 'mean_squared_error', 'mean_squared_error_2'
self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]
self.expected_batch_result = [7.5, 7.5, 7.5]
def test_fit(self):
model = self._get_model()
history = model.fit(
self.x,
self.y,
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_with_sample_weight(self):
model = self._get_model()
history = model.fit(
self.x,
self.y,
sample_weight=self.sample_weight,
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_with_class_weight(self):
model = self._get_model()
history = model.fit(
self.x,
self.y,
class_weight=self.class_weight,
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval(self):
model = self._get_model()
eval_result = model.evaluate(self.x, self.y, batch_size=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
def test_eval_with_sample_weight(self):
model = self._get_model()
eval_result = model.evaluate(
self.x, self.y, batch_size=2, sample_weight=self.sample_weight)
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
# Verify that metric value is same with arbitrary weights and batch size.
x = np.random.random((50, 1))
y = np.random.random((50, 1))
w = np.random.random((50,))
mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]
mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]
self.assertAllClose(mse1, mse2, 1e-3)
def test_train_on_batch(self):
model = self._get_model()
result = model.train_on_batch(self.x, self.y)
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_train_on_batch_with_sample_weight(self):
model = self._get_model()
result = model.train_on_batch(
self.x, self.y, sample_weight=self.sample_weight)
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
def test_train_on_batch_with_class_weight(self):
model = self._get_model()
result = model.train_on_batch(
self.x, self.y, class_weight=self.class_weight)
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
def test_test_on_batch(self):
model = self._get_model()
result = model.test_on_batch(self.x, self.y)
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_test_on_batch_with_sample_weight(self):
model = self._get_model()
result = model.test_on_batch(
self.x, self.y, sample_weight=self.sample_weight)
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
def test_fit_generator(self):
model = self._get_model()
history = model.fit_generator(
self._custom_generator(), steps_per_epoch=2, epochs=2)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_generator_with_sample_weight(self):
model = self._get_model()
history = model.fit_generator(
self._custom_generator(sample_weight=self.sample_weight),
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_generator_with_class_weight(self):
model = self._get_model()
history = model.fit_generator(
self._custom_generator(),
steps_per_epoch=2,
epochs=2,
class_weight=self.class_weight)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval_generator(self):
model = self._get_model()
eval_result = model.evaluate_generator(self._custom_generator(), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
def test_eval_generator_with_sample_weight(self):
model = self._get_model()
eval_result = model.evaluate_generator(
self._custom_generator(sample_weight=self.sample_weight), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
@parameterized.parameters([
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,
loss_reduction.ReductionV2.AUTO,
loss_reduction.ReductionV2.SUM
])
class TestOutputLossMetrics(keras_parameterized.TestCase):
def _get_compiled_multi_io_model(self, loss):
model = get_multi_io_model()
model.compile(
optimizer='rmsprop',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def setUp(self):
super(TestOutputLossMetrics, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y1 = np.asarray([[2.], [4.], [6.], [8.]])
self.y2 = np.asarray([[1.], [2.], [3.], [4.]])
self.sample_weight_1 = np.asarray([2., 3., 4., 5.])
self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])
# y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# Loss `output_1`:
# Per-sample weighted losses
# Batch 1 = [(3 - 2)^2 * 2, (6 - 4)^2 * 3)] = [2, 12]
# Batch 2 = [((9 - 6)^2 * 4, (12 - 8)^2 * 5)] = [36, 80]
# Result (reduction=SUM) = ((2 + 12) + (36 + 80))/2 = 65
# Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 130 / 4 = 32.5
# Loss `output_2`:
# Per-sample weighted losses
# Batch 1 = [(3 - 1)^2 * 3.5, (6 - 2)^2 * 2.5)] = [14, 40]
# Batch 2 = [(9 - 3)^2 * 1.5, (12 - 4)^2 * 0.5)] = [54, 32]
# Result (reduction=SUM) = ((14 + 40) + (54 + 32))/2 = 70
# Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 140 / 4 = 35
# When reduction is 'NONE' loss value that is passed to the optimizer will
# be vector loss but what is reported is a scalar, which is an average of
# all the values in all the batch vectors.
# Total loss = Output_loss_1 + Output_loss_2
sum_over_batch_size_fit_result = {
'loss': [67.5, 67.5],
'output_1_loss': [32.5, 32.5],
'output_2_loss': [35, 35],
}
self.expected_fit_result = {
loss_reduction.ReductionV2.NONE:
sum_over_batch_size_fit_result,
loss_reduction.ReductionV2.SUM: {
'loss': [135, 135],
'output_1_loss': [65, 65],
'output_2_loss': [70, 70],
},
loss_reduction.ReductionV2.AUTO:
sum_over_batch_size_fit_result,
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE:
sum_over_batch_size_fit_result,
}
# In the order: 'loss', 'output_1_loss', 'output_2_loss',
self.expected_batch_result = {
loss_reduction.ReductionV2.NONE: [67.5, 32.5, 35],
loss_reduction.ReductionV2.SUM: [135, 65, 70],
loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35],
}
def test_fit(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
history = model.fit([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result[reduction].items():
self.assertAllClose(history.history[key], value)
def test_eval(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=2,
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(eval_result, self.expected_batch_result[reduction])
def test_train_on_batch(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
expected_values = self.expected_batch_result[reduction]
if reduction == loss_reduction.ReductionV2.SUM:
# We are taking all the data as one batch, so undo the averaging here.
expected_values = [x * 2 for x in self.expected_batch_result[reduction]]
self.assertAllClose(result, expected_values)
def test_test_on_batch(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
expected_values = self.expected_batch_result[reduction]
if reduction == loss_reduction.ReductionV2.SUM:
# We are taking all the data as one batch, so undo the averaging here.
expected_values = [x * 2 for x in self.expected_batch_result[reduction]]
self.assertAllClose(result, expected_values)
def test_fit_generator(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
history = model.fit_generator(
custom_generator_multi_io(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result[reduction].items():
self.assertAllClose(history.history[key], value)
def test_eval_generator(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
eval_result = model.evaluate_generator(
custom_generator_multi_io(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps=2)
self.assertAllClose(eval_result, self.expected_batch_result[reduction])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/metrics_correctness_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util.tf_export import keras_export
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
# checks that clipnorm >= 0 and clipvalue >= 0
if kwargs[k] < 0:
raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
grads = K.gradients(loss, params)
if None in grads:
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
if hasattr(self, 'clipnorm'):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, 'clipvalue'):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Arguments:
weights: a list of Numpy arrays. The number of arrays and their shape
must match number of the dimensions of the weights of the optimizer
(i.e. it should match the output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('Length of the specified weight list (' +
str(len(weights)) +
') does not match the number of weights '
'of the optimizer (' + str(len(params)) + ')')
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Optimizer weight shape ' + str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Arguments:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD in the relevant
direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
Arguments:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
"""Adadelta optimizer.
Adadelta is a more robust extension of Adagrad
that adapts learning rates based on a moving window of gradient updates,
instead of accumulating all past gradients. This way, Adadelta continues
learning even when many updates have been done. Compared to Adagrad, in the
original version of Adadelta you don't have to set an initial learning
rate. In this version, initial learning rate and decay factor can
be set, as in most other Keras optimizers.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate, defaults to 1.
It is recommended to leave it at the default value.
rho: float >= 0. Adadelta decay factor, corresponding to fraction of
gradient to keep at each time step.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Initial learning rate decay.
# References
- [Adadelta - an adaptive learning rate
method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and Beyond".
"""
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr * (
K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
(1. - math_ops.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
if self.amsgrad:
vhat_t = math_ops.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(state_ops.assign(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr / (1. - math_ops.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations, self.m_schedule] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
m_t_bar = (1. -
momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer, trackable.Trackable):
"""Wrapper class for native TensorFlow optimizers."""
def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called
self.optimizer = optimizer
self._track_trackable(optimizer, name='optimizer')
if iterations is None:
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
else:
self.iterations = iterations
self._track_trackable(self.iterations, name='global_step')
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads, global_step=self.iterations)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
if distribution_strategy_context.has_strategy():
self.updates = []
if not params:
# After the model vars have been created, the second call to get_updates
# is called with params as an empty list. This ensures that we call
# compute_gradients with params=None.
grads = self.optimizer.compute_gradients(loss)
else:
grads = self.optimizer.compute_gradients(loss, params)
global_step = training_util.get_global_step()
opt_update = self.optimizer.apply_gradients(grads, global_step)
else:
if not params:
self.updates = [state_ops.assign_add(self.iterations, 1)]
return self.updates
# Updates list starts out empty because the iterations variable is
# incremented in optimizer.apply_gradients()
self.updates = []
grads = self.optimizer.compute_gradients(loss, params)
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
@keras_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@keras_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Arguments:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD,
'ftrl': ftrl.Ftrl
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@keras_export('keras.optimizers.get')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Arguments:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary. - Keras Optimizer instance (it
will be returned unchanged). - TensorFlow Optimizer instance (it
will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):
return identifier
# Wrap TF optimizer instances
elif isinstance(identifier, tf_optimizer_module.Optimizer):
opt = TFOptimizer(identifier)
K.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/optimizers.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backend_config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BackendConfigTest(test.TestCase):
def test_backend(self):
self.assertEqual(keras.backend.backend(), 'tensorflow')
def test_espilon(self):
epsilon = 1e-2
keras.backend_config.set_epsilon(epsilon)
self.assertEqual(keras.backend_config.epsilon(), epsilon)
keras.backend_config.set_epsilon(1e-7)
self.assertEqual(keras.backend_config.epsilon(), 1e-7)
def test_floatx(self):
floatx = 'float64'
keras.backend_config.set_floatx(floatx)
self.assertEqual(keras.backend_config.floatx(), floatx)
keras.backend_config.set_floatx('float32')
self.assertEqual(keras.backend_config.floatx(), 'float32')
def test_image_data_format(self):
image_data_format = 'channels_first'
keras.backend_config.set_image_data_format(image_data_format)
self.assertEqual(keras.backend_config.image_data_format(),
image_data_format)
keras.backend_config.set_image_data_format('channels_last')
self.assertEqual(keras.backend_config.image_data_format(), 'channels_last')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/backend_config_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for exporting TensorFlow ops under tf.keras.*."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=bad-continuation
keras_export(v1=["keras.initializers.Initializer"])(
init_ops.Initializer)
keras_export(v1=["keras.initializers.Zeros", "keras.initializers.zeros"])(
init_ops.Zeros)
keras_export(v1=["keras.initializers.Ones", "keras.initializers.ones"])(
init_ops.Ones)
keras_export(v1=["keras.initializers.Constant", "keras.initializers.constant"])(
init_ops.Constant)
keras_export(v1=["keras.initializers.VarianceScaling"])(
init_ops.VarianceScaling)
keras_export(v1=["keras.initializers.Orthogonal",
"keras.initializers.orthogonal"])(
init_ops.Orthogonal)
keras_export(v1=["keras.initializers.Identity",
"keras.initializers.identity"])(
init_ops.Identity)
keras_export(v1=["keras.initializers.glorot_uniform"])(
init_ops.GlorotUniform)
keras_export(v1=["keras.initializers.glorot_normal"])(
init_ops.GlorotNormal)
keras_export(v1=["keras.initializers.lecun_normal"])(
init_ops.lecun_normal)
keras_export(v1=["keras.initializers.lecun_uniform"])(
init_ops.lecun_uniform)
keras_export(v1=["keras.initializers.he_normal"])(
init_ops.he_normal)
keras_export(v1=["keras.initializers.he_uniform"])(
init_ops.he_uniform)
keras_export("keras.initializers.Initializer", v1=[])(
init_ops_v2.Initializer)
keras_export(
"keras.initializers.Zeros", "keras.initializers.zeros", v1=[])(
init_ops_v2.Zeros)
keras_export(
"keras.initializers.Ones", "keras.initializers.ones", v1=[])(
init_ops_v2.Ones)
keras_export(
"keras.initializers.Constant", "keras.initializers.constant", v1=[])(
init_ops_v2.Constant)
keras_export("keras.initializers.VarianceScaling", v1=[])(
init_ops_v2.VarianceScaling)
keras_export(
"keras.initializers.Orthogonal", "keras.initializers.orthogonal", v1=[])(
init_ops_v2.Orthogonal)
keras_export(
"keras.initializers.Identity", "keras.initializers.identity", v1=[])(
init_ops_v2.Identity)
keras_export(
"keras.initializers.GlorotUniform",
"keras.initializers.glorot_uniform",
v1=[])(
init_ops_v2.GlorotUniform)
keras_export(
"keras.initializers.GlorotNormal",
"keras.initializers.glorot_normal",
v1=[])(
init_ops_v2.GlorotNormal)
keras_export("keras.initializers.lecun_normal", v1=[])(
init_ops_v2.lecun_normal)
keras_export("keras.initializers.lecun_uniform", v1=[])(
init_ops_v2.lecun_uniform)
keras_export("keras.initializers.he_normal", v1=[])(
init_ops_v2.he_normal)
keras_export("keras.initializers.he_uniform", v1=[])(
init_ops_v2.he_uniform)
keras_export("keras.initializers.RandomNormal", v1=[])(
init_ops_v2.RandomNormal)
keras_export("keras.initializers.RandomUniform", v1=[])(
init_ops_v2.RandomUniform)
keras_export("keras.initializers.TruncatedNormal", v1=[])(
init_ops_v2.TruncatedNormal)
# pylint: enable=bad-continuation
keras_export(v1=["keras.backend.name_scope"])(ops.name_scope)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializer serialization / deserialization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python import tf2
from tensorflow.python.framework import dtypes
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import init_ops_v2
# These imports are brought in so that keras.initializers.deserialize
# has them available in module_objects.
from tensorflow.python.ops.init_ops import Constant
from tensorflow.python.ops.init_ops import GlorotNormal
from tensorflow.python.ops.init_ops import GlorotUniform
from tensorflow.python.ops.init_ops import he_normal # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import he_uniform # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Identity
from tensorflow.python.ops.init_ops import Initializer # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import lecun_normal # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import lecun_uniform # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Ones
from tensorflow.python.ops.init_ops import Orthogonal
from tensorflow.python.ops.init_ops import RandomNormal as TFRandomNormal
from tensorflow.python.ops.init_ops import RandomUniform as TFRandomUniform
from tensorflow.python.ops.init_ops import TruncatedNormal as TFTruncatedNormal
from tensorflow.python.ops.init_ops import VarianceScaling # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Zeros
# pylint: disable=unused-import, disable=line-too-long
from tensorflow.python.ops.init_ops_v2 import Constant as ConstantV2
from tensorflow.python.ops.init_ops_v2 import GlorotNormal as GlorotNormalV2
from tensorflow.python.ops.init_ops_v2 import GlorotUniform as GlorotUniformV2
from tensorflow.python.ops.init_ops_v2 import he_normal as he_normalV2
from tensorflow.python.ops.init_ops_v2 import he_uniform as he_uniformV2
from tensorflow.python.ops.init_ops_v2 import Identity as IdentityV2
from tensorflow.python.ops.init_ops_v2 import Initializer as InitializerV2
from tensorflow.python.ops.init_ops_v2 import lecun_normal as lecun_normalV2
from tensorflow.python.ops.init_ops_v2 import lecun_uniform as lecun_uniformV2
from tensorflow.python.ops.init_ops_v2 import Ones as OnesV2
from tensorflow.python.ops.init_ops_v2 import Orthogonal as OrthogonalV2
from tensorflow.python.ops.init_ops_v2 import RandomNormal as RandomNormalV2
from tensorflow.python.ops.init_ops_v2 import RandomUniform as RandomUniformV2
from tensorflow.python.ops.init_ops_v2 import TruncatedNormal as TruncatedNormalV2
from tensorflow.python.ops.init_ops_v2 import VarianceScaling as VarianceScalingV2
from tensorflow.python.ops.init_ops_v2 import Zeros as ZerosV2
# pylint: enable=unused-import, enable=line-too-long
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal'])
class TruncatedNormal(TFTruncatedNormal):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate. Defaults to 0.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate. Defaults to 0.05.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
A TruncatedNormal instance.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(TruncatedNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.RandomUniform',
'keras.initializers.uniform',
'keras.initializers.random_uniform'])
class RandomUniform(TFRandomUniform):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate. Defaults to -0.05.
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate. Defaults to 0.05.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type.
Returns:
A RandomUniform instance.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None,
dtype=dtypes.float32):
super(RandomUniform, self).__init__(
minval=minval, maxval=maxval, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.RandomNormal',
'keras.initializers.normal',
'keras.initializers.random_normal'])
class RandomNormal(TFRandomNormal):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate. Defaults to 0.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate. Defaults to 0.05.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
RandomNormal instance.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(RandomNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
glorot_normal = GlorotNormal
glorot_uniform = GlorotUniform
# Utility functions
@keras_export('keras.initializers.serialize')
def serialize(initializer):
return serialize_keras_object(initializer)
@keras_export('keras.initializers.deserialize')
def deserialize(config, custom_objects=None):
"""Return an `Initializer` object from its config."""
if tf2.enabled():
# Class names are the same for V1 and V2 but the V2 classes
# are aliased in this file so we need to grab them directly
# from `init_ops_v2`.
module_objects = {
obj_name: getattr(init_ops_v2, obj_name)
for obj_name in dir(init_ops_v2)
}
else:
module_objects = globals()
return deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='initializer')
@keras_export('keras.initializers.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
identifier = str(identifier)
# We have to special-case functions that return classes.
# TODO(omalleyt): Turn these into classes or class aliases.
special_cases = ['he_normal', 'he_uniform', 'lecun_normal', 'lecun_uniform']
if identifier in special_cases:
# Treat like a class.
return deserialize({'class_name': identifier, 'config': {}})
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret initializer identifier: ' +
str(identifier))
# pylint: enable=invalid-name
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/initializers.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import metrics
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FalsePositivesTest(test.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertEqual(len(fp_obj.variables), 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertEqual(len(fp_obj2.variables), 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@test_util.run_all_in_graph_and_eager_modes
class FalseNegativesTest(test.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertEqual(len(fn_obj.variables), 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertEqual(len(fn_obj2.variables), 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TrueNegativesTest(test.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertEqual(len(tn_obj.variables), 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertEqual(len(tn_obj2.variables), 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TruePositivesTest(test.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertEqual(len(tp_obj.variables), 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertEqual(len(tp_obj2.variables), 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class PrecisionTest(test.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name='my_precision', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(p_obj.name, 'my_precision')
self.assertEqual(len(p_obj.variables), 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertEqual(len(p_obj2.variables), 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1. / 3, self.evaluate(result))
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
self.evaluate(
p_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = p_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_unweighted_class_id(self):
p_obj = metrics.Precision(class_id=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_class_id(self):
p_obj = metrics.Precision(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
@test_util.run_all_in_graph_and_eager_modes
class RecallTest(test.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name='my_recall', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(r_obj.name, 'my_recall')
self.assertEqual(len(r_obj.variables), 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertEqual(len(r_obj2.variables), 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
self.evaluate(
r_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = r_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_unweighted_class_id(self):
r_obj = metrics.Recall(class_id=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_class_id(self):
r_obj = metrics.Recall(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([1, 1, 1, 0, 1], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.25, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(3, self.evaluate(r_obj.false_negatives))
@test_util.run_all_in_graph_and_eager_modes
class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4, num_thresholds=100, name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegexp(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4, num_thresholds=100, name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegexp(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class AUCTest(test.TestCase):
def setup(self):
self.num_thresholds = 3
self.y_pred = constant_op.constant([0, 0.5, 0.3, 0.9], dtype=dtypes.float32)
self.y_true = constant_op.constant([0, 0, 1, 1])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
auc_obj = metrics.AUC(
num_thresholds=100,
curve='PR',
summation_method='majoring',
name='auc_1')
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_config_manual_thresholds(self):
auc_obj = metrics.AUC(
num_thresholds=None,
curve='PR',
summation_method='majoring',
name='auc_1',
thresholds=[0.3, 0.5])
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 4)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.3, 0.5, 1.0])
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 4)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_value_is_idempotent(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=3)
self.evaluate(variables.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(initial_auc, self.evaluate(auc_obj.result()), 1e-3)
def test_unweighted_all_correct(self):
self.setup()
auc_obj = metrics.AUC()
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_true)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
self.setup()
# Verify that when specified, thresholds are used instead of num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5])
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.7855 * 1 + 0.2855 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (1 * 1 + 0.571 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.571 * 1 + 0 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (1 * 0.429 + 1 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (0.7 * 0.429 + 0 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve='PR')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = (2.416/7 + 4/7)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=1)
def test_invalid_curve(self):
with self.assertRaisesRegexp(ValueError,
'Invalid AUC curve value "Invalid".'):
metrics.AUC(curve='Invalid')
def test_invalid_summation_method(self):
with self.assertRaisesRegexp(
ValueError, 'Invalid AUC summation method value "Invalid".'):
metrics.AUC(summation_method='Invalid')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/metrics_confusion_matrix_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import csv
import io
import json
import os
import re
import tempfile
import time
import numpy as np
import six
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.distribute import multi_worker_training_state as training_state
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.compat import collections_abc
try:
import requests
except ImportError:
requests = None
def configure_callbacks(callbacks,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
count_mode='steps',
mode=ModeKeys.TRAIN):
"""Configures callbacks for use in various training loops.
Arguments:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
Returns:
Instance of CallbackList used to control all Callbacks.
"""
# Check if callbacks have already been configured.
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
# Add additional callbacks during training.
if mode == ModeKeys.TRAIN:
model.history = History()
callbacks = [BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(ProgbarLogger(count_mode))
callback_list = CallbackList(callbacks)
# Set callback model
callback_model = model._get_callback_model() # pylint: disable=protected-access
callback_list.set_model(callback_model)
set_callback_parameters(
callback_list,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=samples,
verbose=verbose,
mode=mode)
callback_list.model.stop_training = False
return callback_list
def set_callback_parameters(callback_list,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
mode=ModeKeys.TRAIN):
"""Sets callback parameters.
Arguments:
callback_list: CallbackList instance.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
"""
for cbk in callback_list:
if isinstance(cbk, (BaseLogger, ProgbarLogger)):
cbk.stateful_metrics = model.metrics_names[1:] # Exclude `loss`
# Set callback parameters
callback_metrics = []
# When we have deferred build scenario with iterator input, we will compile
# when we standardize first batch of data.
if mode != ModeKeys.PREDICT and hasattr(model, 'metrics_names'):
callback_metrics = copy.copy(model.metrics_names)
if do_validation:
callback_metrics += ['val_' + n for n in model.metrics_names]
callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
}
callback_list.set_params(callback_params)
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (hasattr(data, 'next') or hasattr(data, '__next__') or isinstance(
data, (Sequence, iterator_ops.Iterator, iterator_ops.IteratorV2)))
def make_logs(model, logs, outputs, mode, prefix=''):
"""Computes logs for sending to `on_batch_end` methods."""
if mode in {ModeKeys.TRAIN, ModeKeys.TEST}:
if hasattr(model, 'metrics_names'):
for label, output in zip(model.metrics_names, outputs):
logs[prefix + label] = output
else:
logs['outputs'] = outputs
return logs
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
self.params = {}
self.model = None
self._reset_batch_timing()
def _reset_batch_timing(self):
self._delta_t_batch = 0.
self._delta_ts = collections.defaultdict(
lambda: collections.deque([], maxlen=self.queue_length))
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
"""Helper function for all batch_{begin | end} methods."""
if not self.callbacks:
return
hook_name = 'on_{mode}_batch_{hook}'.format(mode=mode, hook=hook)
if hook == 'begin':
self._t_enter_batch = time.time()
if hook == 'end':
# Batch is ending, calculate batch time.
self._delta_t_batch = time.time() - self._t_enter_batch
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
batch_hook = getattr(callback, hook_name)
batch_hook(batch, logs)
self._delta_ts[hook_name].append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts[hook_name])
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning(
'Method (%s) is slow compared '
'to the batch update (%f). Check your callbacks.', hook_name,
delta_t_median)
def _call_begin_hook(self, mode):
"""Helper function for on_{train|test|predict}_begin methods."""
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
"""Helper function for on_{train|test|predict}_end methods."""
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_batch_end(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
"""Calls the `on_epoch_begin` methods of its callbacks.
This function should only be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._reset_batch_timing()
def on_epoch_end(self, epoch, logs=None):
"""Calls the `on_epoch_end` methods of its callbacks.
This function should only be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)
def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_train_end(logs)
def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_test_end(logs)
def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_predict_end(logs)
def __iter__(self):
return iter(self.callbacks)
@keras_export('keras.callbacks.Callback')
class Callback(object):
"""Abstract base class used to build new callbacks.
Attributes:
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
validation_data: Deprecated. Do not use.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Model` class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
self.model = None
# Whether this Callback should only run on the chief worker in a
# Multi-Worker setting.
# TODO(omalleyt): Make this attr public once solution is stable.
self._chief_worker_only = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@keras_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Arguments:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@keras_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
"""
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
@keras_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
self.stateful_metrics = set(stateful_metrics or [])
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
if self.use_steps:
self.target = self.params['steps']
else:
self.target = self.params['samples']
if self.verbose:
if self.epochs > 1:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name='step' if self.use_steps else 'sample')
def on_batch_begin(self, batch, logs=None):
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
if self.use_steps:
self.seen += num_steps
else:
self.seen += batch_size * num_steps
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and (self.target is None or self.seen < self.target):
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
@keras_export('keras.callbacks.History')
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
@keras_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, the latest best model according
to the quantity monitored will not be overwritten.
mode: one of {auto, min, max}. If `save_best_only=True`, the decision to
overwrite the current save file is made based on either the maximization
or the minimization of the monitored quantity. For `val_acc`, this
should be `max`, for `val_loss` this should be `min`, etc. In `auto`
mode, the direction is automatically inferred from the name of the
monitored quantity.
save_weights_only: if True, then only the model's weights will be saved
(`model.save_weights(filepath)`), else the full model is saved
(`model.save(filepath)`).
save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
the model after each epoch. When using integer, the callback saves the
model at end of a batch at which this many samples have been seen since
last saving. Note that if the saving isn't aligned to epochs, the
monitored metric may potentially be less reliable (it could reflect as
little as 1 batch, since the metrics get reset every epoch). Defaults to
`'epoch'`
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
**kwargs):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.epochs_since_last_save = 0
self._samples_seen_since_last_saving = 0
# Deprecated field `load_weights_on_restart` is for loading the checkpoint
# file from `filepath` at the start of `model.fit()`
# TODO(rchao): Remove the arg during next breaking release.
if 'load_weights_on_restart' in kwargs:
self.load_weights_on_restart = kwargs['load_weights_on_restart']
logging.warning('`load_weights_on_restart` argument is deprecated. '
'Please use `model.load_weights()` for loading weights '
'before the start of `model.fit()`.')
else:
self.load_weights_on_restart = False
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of samples seen.')
else:
self.period = 1
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def set_model(self, model):
self.model = model
# Use name matching rather than `isinstance` to avoid circular dependencies.
if (not self.save_weights_only and
not model._is_graph_network and # pylint: disable=protected-access
model.__class__.__name__ != 'Sequential'):
self.save_weights_only = True
def on_train_begin(self, logs=None):
# pylint: disable=protected-access
if self.model._in_multi_worker_mode():
# MultiWorkerTrainingState is used to manage the training state needed
# for preemption-recovery of a worker in multi-worker training.
self.model._training_state = (
training_state.MultiWorkerTrainingState(self.model, self.filepath))
self._training_state = self.model._training_state
if self._training_state.restore():
# If the training state needs to be and is successfully restored,
# it is recovering from a previous failure (or preemption). In such
# case, do not load the weights from user specified file path.
return
# If this is not multi worker training, restoring is not needed, or
# restoring failed, check if it should load weights on restart.
if self.load_weights_on_restart:
if (not self.model._in_multi_worker_mode() or
multi_worker_util.should_load_checkpoint()):
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(
self.filepath))
if (filepath_to_load is not None and
training_state.checkpoint_exists(filepath_to_load)):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError('Error loading file from {}. Reason: {}'.format(
filepath_to_load, e))
def on_train_end(self, logs=None):
# pylint: disable=protected-access
if self.model._in_multi_worker_mode():
# In multi-worker training, on successful exit of training, delete the
# training state backup file that was saved for the purpose of worker
# recovery.
self._training_state.delete_backup()
# Restore the training state so the model is ready for next (possible)
# multi worker training.
del self._training_state
del self.model._training_state
def on_batch_end(self, batch, logs=None):
logs = logs or {}
if isinstance(self.save_freq, int):
self._samples_seen_since_last_saving += logs.get('size', 1)
if self._samples_seen_since_last_saving >= self.save_freq:
self._save_model(epoch=self._current_epoch, logs=logs)
self._samples_seen_since_last_saving = 0
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
# pylint: disable=protected-access
if self.save_freq == 'epoch':
if self.model._in_multi_worker_mode():
# Exclude training state variables in user-requested checkpoint file.
with self._training_state.untrack_vars():
self._save_model(epoch=epoch, logs=logs)
else:
self._save_model(epoch=epoch, logs=logs)
if self.model._in_multi_worker_mode():
# For multi-worker training, back up the weights and current training
# state for possible future recovery.
# TODO(rchao): Call `back_up` at finer period such as N steps.
self._training_state.back_up(epoch)
def _save_model(self, epoch, logs):
"""Saves the model.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq,
int) or self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
self._maybe_remove_file()
def _get_file_path(self, epoch, logs):
"""Returns the file path for checkpoint."""
# pylint: disable=protected-access
if not self.model._in_multi_worker_mode(
) or multi_worker_util.should_save_checkpoint():
return self.filepath.format(epoch=epoch + 1, **logs)
else:
# If this is multi-worker training, and this worker should not
# save checkpoint, we use a temp filepath to store a dummy checkpoint, so
# it writes to a file that will be removed at the end of `_save_model()`
# call. This is because the SyncOnReadVariable needs to be synced across
# all the workers in order to be read, and all workers need to initiate
# that.
self._temp_file_dir = tempfile.mkdtemp()
extension = os.path.splitext(self.filepath)[1]
return os.path.join(self._temp_file_dir, 'temp' + extension)
def _maybe_remove_file(self):
# Remove the checkpoint directory in multi-worker training where this worker
# should not checkpoint. It is a dummy directory previously saved for sync
# distributed training.
if (self.model._in_multi_worker_mode() and # pylint: disable=protected-access
not multi_worker_util.should_save_checkpoint()):
file_io.delete_recursively(self._temp_file_dir)
del self._temp_file_dir
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Arguments:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if file_io.file_exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
@keras_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
Example:
```python
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
# This callback will stop the training when there is no improvement in
# the validation loss for three consecutive epochs.
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
@keras_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If send_as_json is set to True, the content type of the request will be
application/json. Otherwise the serialized JSON will be sent within a form.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as application/json.
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@keras_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
```python
# This function keeps the learning rate at 0.001 for the first ten epochs
# and decreases it exponentially after that.
def scheduler(epoch):
if epoch < 10:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (10 - epoch))
callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
@keras_export('keras.callbacks.TensorBoard', v1=[])
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
batches. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling. Must run in TensorFlow eager mode.
embeddings_freq: frequency (in epochs) at which embedding layers will
be visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=False,
update_freq='epoch',
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs):
super(TensorBoard, self).__init__()
self._validate_kwargs(kwargs)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
self._samples_seen = 0
self._samples_seen_at_last_write = 0
self._current_batch = 0
# A collection of file writers currently in use, to be closed when
# training ends for this callback. Writers are keyed by the
# directory name under the root logdir: e.g., "train" or
# "validation".
self._train_run_name = 'train'
self._validation_run_name = 'validation'
self._writers = {}
self._profile_batch = profile_batch
# True when a trace is running.
self._is_tracing = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('batch_size', False):
logging.warning('`batch_size` is no longer needed in the '
'`TensorBoard` Callback and will be ignored '
'in TensorFlow 2.0.')
if kwargs.get('embeddings_layer_names', False):
logging.warning('`embeddings_layer_names` is not supported in '
'TensorFlow 2.0. Instead, all `Embedding` layers '
'will be visualized.')
if kwargs.get('embeddings_data', False):
logging.warning('`embeddings_data` is not supported in TensorFlow '
'2.0. Instead, all `Embedding` variables will be '
'visualized.')
unrecognized_kwargs = set(kwargs.keys()) - {
'write_grads', 'embeddings_layer_names', 'embeddings_data', 'batch_size'
}
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError('Unrecognized arguments in `TensorBoard` '
'Callback: ' + str(unrecognized_kwargs))
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self.model = model
with context.eager_mode():
self._close_writers()
if self.write_graph:
with self._get_writer(self._train_run_name).as_default():
with summary_ops_v2.always_record_summaries():
if not model.run_eagerly:
summary_ops_v2.graph(K.get_graph(), step=0)
summary_writable = (
self.model._is_graph_network or # pylint: disable=protected-access
self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access
if summary_writable:
summary_ops_v2.keras_model('keras', self.model, step=0)
if self.embeddings_freq:
self._configure_embeddings()
self._prev_summary_writer = context.context().summary_writer
self._prev_summary_recording = context.context().summary_recording
self._prev_summary_step = context.context().summary_step
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
# TODO(omalleyt): Add integration tests.
from tensorflow.python.keras.layers import embeddings
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
config = projector.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, embeddings.Embedding):
embedding = config.embeddings.add()
embedding.tensor_name = layer.embeddings.name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in embedding.metadata_path:
embedding.metadata_path = self.embeddings_metadata.pop(layer.name)
if self.embeddings_metadata:
raise ValueError('Unrecognized `Embedding` layer names passed to '
'`keras.callbacks.TensorBoard` `embeddings_metadata` '
'argument: ' + str(self.embeddings_metadata.keys()))
class DummyWriter(object):
"""Dummy writer to conform to `Projector` API."""
def __init__(self, logdir):
self.logdir = logdir
def get_logdir(self):
return self.logdir
writer = DummyWriter(self.log_dir)
projector.visualize_embeddings(writer, config)
def _close_writers(self):
"""Close all remaining open file writers owned by this callback.
If there are no such file writers, this is a no-op.
"""
with context.eager_mode():
for writer in six.itervalues(self._writers):
writer.close()
self._writers.clear()
def _get_writer(self, writer_name):
"""Get a summary writer for the given subdirectory under the logdir.
A writer will be created if it does not yet exist.
Arguments:
writer_name: The name of the directory for which to create or
retrieve a writer. Should be either `self._train_run_name` or
`self._validation_run_name`.
Returns:
A `SummaryWriter` object.
"""
if writer_name not in self._writers:
path = os.path.join(self.log_dir, writer_name)
writer = summary_ops_v2.create_file_writer_v2(path)
self._writers[writer_name] = writer
return self._writers[writer_name]
def _set_default_writer(self, writer_name):
"""Sets the default writer for custom batch-level summaries."""
if self.update_freq == 'epoch':
# Writer is only used for custom summaries, which are written
# batch-by-batch.
return
writer = self._get_writer(writer_name)
step = self._total_batches_seen[writer_name]
context.context().summary_writer = writer
def _should_record():
return math_ops.equal(step % self.update_freq, 0)
context.context().summary_recording = _should_record
summary_ops_v2.set_step(step)
def _init_batch_steps(self):
"""Create the total batch counters."""
if ops.executing_eagerly_outside_functions():
# Variables are needed for the `step` value of custom tf.summaries
# to be updated inside a tf.function.
self._total_batches_seen = {
self._train_run_name: variables.Variable(0, dtype='int64'),
self._validation_run_name: variables.Variable(0, dtype='int64')
}
else:
# Custom tf.summaries are not supported in legacy graph mode.
self._total_batches_seen = {
self._train_run_name: 0,
self._validation_run_name: 0
}
def _increment_step(self, writer_name):
step = self._total_batches_seen[writer_name]
if isinstance(step, variables.Variable):
step.assign_add(1)
else:
self._total_batches_seen[writer_name] += 1
def on_train_begin(self, logs=None):
self._init_batch_steps()
if self._profile_batch == 1:
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def on_test_begin(self, logs=None):
self._set_default_writer(self._validation_run_name)
def on_train_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
Arguments:
batch: Integer, index of batch within the current epoch.
logs: Dict. Metric results for this batch.
"""
if self.update_freq == 'epoch' and self._profile_batch is None:
return
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
train_batches = self._total_batches_seen[self._train_run_name]
if self.update_freq != 'epoch' and batch % self.update_freq == 0:
self._log_metrics(logs, prefix='batch_', step=train_batches)
self._increment_step(self._train_run_name)
if context.executing_eagerly():
if self._is_tracing:
self._log_trace()
elif (not self._is_tracing and
math_ops.equal(train_batches, self._profile_batch - 1)):
self._enable_trace()
def on_test_batch_end(self, batch, logs=None):
if self.update_freq == 'epoch':
return
self._increment_step(self._validation_run_name)
def on_epoch_begin(self, epoch, logs=None):
self._set_default_writer(self._train_run_name)
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
self._log_metrics(logs, prefix='epoch_', step=epoch)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def on_train_end(self, logs=None):
if self._is_tracing:
self._log_trace()
self._close_writers()
context.context().summary_writer = self._prev_summary_writer
context.context().summary_recording = self._prev_summary_recording
context.context().summary_step = self._prev_summary_step
def _enable_trace(self):
if context.executing_eagerly():
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def _log_trace(self):
"""Logs the trace graph to TensorBoard."""
if context.executing_eagerly():
with self._get_writer(self._train_run_name).as_default(), \
summary_ops_v2.always_record_summaries():
# TODO(b/126388999): Remove step info in the summary name.
step = K.get_value(self._total_batches_seen[self._train_run_name])
summary_ops_v2.trace_export(
name='batch_%d' % step,
step=step,
profiler_outdir=os.path.join(self.log_dir, 'train'))
self._is_tracing = False
def _log_metrics(self, logs, prefix, step):
"""Writes metrics out as custom scalar summaries.
Arguments:
logs: Dict. Keys are scalar summary names, values are NumPy scalars.
prefix: String. The prefix to apply to the scalar summary names.
step: Int. The global step to use for TensorBoard.
"""
if logs is None:
logs = {}
# Group metrics by the name of their associated file writer. Values
# are lists of metrics, as (name, scalar_value) pairs.
logs_by_writer = {
self._train_run_name: [],
self._validation_run_name: [],
}
validation_prefix = 'val_'
for (name, value) in logs.items():
if name in ('batch', 'size', 'num_steps'):
# Scrub non-metric items.
continue
if name.startswith(validation_prefix):
name = name[len(validation_prefix):]
writer_name = self._validation_run_name
else:
writer_name = self._train_run_name
name = prefix + name # assign batch or epoch prefix
logs_by_writer[writer_name].append((name, value))
with context.eager_mode():
with summary_ops_v2.always_record_summaries():
for writer_name in logs_by_writer:
these_logs = logs_by_writer[writer_name]
if not these_logs:
# Don't create a "validation" events file if we don't
# actually have any validation data.
continue
writer = self._get_writer(writer_name)
with writer.as_default():
for (name, value) in these_logs:
summary_ops_v2.scalar(name, value, step=step)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
writer = self._get_writer(self._train_run_name)
with context.eager_mode(), \
writer.as_default(), \
summary_ops_v2.always_record_summaries():
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
with ops.init_scope():
weight = K.get_value(weight)
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if K.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = K.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
summary_ops_v2.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(self.log_dir, 'train',
'keras_embedding.ckpt-{}'.format(epoch))
self.model.save_weights(embeddings_ckpt)
@keras_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will be reduced. new_lr = lr *
factor
patience: number of epochs with no improvement after which learning rate
will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode, lr will be reduced when the
quantity monitored has stopped decreasing; in `max` mode it will be
reduced when the quantity monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred from the name of the
monitored quantity.
min_delta: threshold for measuring the new optimum, to only focus on
significant changes.
cooldown: number of epochs to wait before resuming normal operation after
lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@keras_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename,
mode + self.file_flags,
**self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@keras_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/callbacks.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import scipy.sparse
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def compare_single_input_op_to_numpy(keras_op,
np_op,
input_shape,
dtype='float32',
negative_values=True,
keras_args=None,
keras_kwargs=None,
np_args=None,
np_kwargs=None):
keras_args = keras_args or []
keras_kwargs = keras_kwargs or {}
np_args = np_args or []
np_kwargs = np_kwargs or {}
inputs = 2. * np.random.random(input_shape)
if negative_values:
inputs -= 1.
keras_output = keras_op(keras.backend.variable(inputs, dtype=dtype),
*keras_args, **keras_kwargs)
keras_output = keras.backend.eval(keras_output)
np_output = np_op(inputs.astype(dtype), *np_args, **np_kwargs)
try:
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
except AssertionError:
raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '
'Expected ' + str(np_output) + ' but got ' +
str(keras_output))
def compare_two_inputs_op_to_numpy(keras_op,
np_op,
input_shape_a,
input_shape_b,
dtype='float32',
keras_args=None,
keras_kwargs=None,
np_args=None,
np_kwargs=None):
keras_args = keras_args or []
keras_kwargs = keras_kwargs or {}
np_args = np_args or []
np_kwargs = np_kwargs or {}
input_a = np.random.random(input_shape_a)
input_b = np.random.random(input_shape_b)
keras_output = keras_op(keras.backend.variable(input_a, dtype=dtype),
keras.backend.variable(input_b, dtype=dtype),
*keras_args, **keras_kwargs)
keras_output = keras.backend.eval(keras_output)
np_output = np_op(input_a.astype(dtype), input_b.astype(dtype),
*np_args, **np_kwargs)
try:
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
except AssertionError:
raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '
'Expected ' + str(np_output) + ' but got ' +
str(keras_output))
class BackendResetTest(test.TestCase, parameterized.TestCase):
@test_util.run_all_in_graph_and_eager_modes
def test_new_config(self):
# User defined jit setting
config.set_optimizer_jit(False)
sess = keras.backend.get_session()
default_config = context.context().config
self.assertEqual(
sess._config.graph_options.optimizer_options.global_jit_level,
default_config.graph_options.optimizer_options.global_jit_level)
keras.backend.clear_session()
# New session has the same jit setting
sess = keras.backend.get_session()
default_config = context.context().config
self.assertEqual(
sess._config.graph_options.optimizer_options.global_jit_level,
default_config.graph_options.optimizer_options.global_jit_level)
keras.backend.clear_session()
# Change respected
config.set_optimizer_jit(True)
sess = keras.backend.get_session()
default_config = context.context().config
self.assertEqual(
sess._config.graph_options.optimizer_options.global_jit_level,
default_config.graph_options.optimizer_options.global_jit_level)
keras.backend.clear_session()
# We can't use the normal parameterized decorator because the test session
# will block graph clearing.
@parameterized.named_parameters(('_v1', context.graph_mode),
('_v2', context.eager_mode))
def test_new_graph(self, test_context):
with test_context():
g_old = keras.backend.get_graph()
keras.backend.clear_session()
g = keras.backend.get_graph()
assert g_old is not g
@test_util.run_all_in_graph_and_eager_modes
class BackendUtilsTest(test.TestCase):
def test_backend(self):
self.assertEqual(keras.backend.backend(), 'tensorflow')
def test_get_reset_uids(self):
self.assertEqual(keras.backend.get_uid('foo'), 1)
self.assertEqual(keras.backend.get_uid('foo'), 2)
keras.backend.reset_uids()
self.assertEqual(keras.backend.get_uid('foo'), 1)
def test_learning_phase(self):
with self.cached_session() as sess:
with self.assertRaises(ValueError):
keras.backend.set_learning_phase(2)
# Test running with a learning-phase-consuming layer
with keras.backend.learning_phase_scope(0):
x = keras.Input((3,))
y = keras.layers.BatchNormalization()(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
sess.run(y, feed_dict={x: np.random.random((2, 3))})
def test_learning_phase_name(self):
with ops.name_scope('test_scope'):
# Test that outer name scopes do not affect the learning phase's name.
lp = keras.backend.symbolic_learning_phase()
self.assertEqual(lp.name, 'keras_learning_phase:0')
def test_learning_phase_scope(self):
initial_learning_phase = keras.backend.learning_phase()
with keras.backend.learning_phase_scope(1):
self.assertEqual(keras.backend.learning_phase(), 1)
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
with keras.backend.learning_phase_scope(0):
self.assertEqual(keras.backend.learning_phase(), 0)
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
with self.assertRaises(ValueError):
with keras.backend.learning_phase_scope(None):
pass
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
new_learning_phase = 0
keras.backend.set_learning_phase(new_learning_phase)
self.assertEqual(keras.backend.learning_phase(), new_learning_phase)
with keras.backend.learning_phase_scope(1):
self.assertEqual(keras.backend.learning_phase(), 1)
self.assertEqual(keras.backend.learning_phase(), new_learning_phase)
def test_learning_phase_scope_in_graph(self):
initial_learning_phase_outside_graph = keras.backend.learning_phase()
with keras.backend.get_graph().as_default():
initial_learning_phase_in_graph = keras.backend.learning_phase()
self.assertEqual(keras.backend.learning_phase(),
initial_learning_phase_outside_graph)
with keras.backend.learning_phase_scope(1):
self.assertEqual(keras.backend.learning_phase(), 1)
self.assertEqual(keras.backend.learning_phase(),
initial_learning_phase_outside_graph)
with keras.backend.get_graph().as_default():
self.assertIs(keras.backend.learning_phase(),
initial_learning_phase_in_graph)
self.assertEqual(keras.backend.learning_phase(),
initial_learning_phase_outside_graph)
def test_int_shape(self):
x = keras.backend.ones(shape=(3, 4))
self.assertEqual(keras.backend.int_shape(x), (3, 4))
if not context.executing_eagerly():
x = keras.backend.placeholder(shape=(None, 4))
self.assertEqual(keras.backend.int_shape(x), (None, 4))
def test_in_train_phase(self):
y1 = keras.backend.variable(1)
y2 = keras.backend.variable(2)
if context.executing_eagerly():
with keras.backend.learning_phase_scope(0):
y_val_test = keras.backend.in_train_phase(y1, y2).numpy()
with keras.backend.learning_phase_scope(1):
y_val_train = keras.backend.in_train_phase(y1, y2).numpy()
else:
y = keras.backend.in_train_phase(y1, y2)
f = keras.backend.function([keras.backend.learning_phase()], [y])
y_val_test = f([0])[0]
y_val_train = f([1])[0]
self.assertAllClose(y_val_test, 2)
self.assertAllClose(y_val_train, 1)
def test_is_keras_tensor(self):
x = keras.backend.variable(1)
self.assertEqual(keras.backend.is_keras_tensor(x), False)
x = keras.Input(shape=(1,))
self.assertEqual(keras.backend.is_keras_tensor(x), True)
with self.assertRaises(ValueError):
keras.backend.is_keras_tensor(0)
def test_stop_gradient(self):
x = keras.backend.variable(1)
y = keras.backend.stop_gradient(x)
if not context.executing_eagerly():
self.assertEqual(y.op.name[:12], 'StopGradient')
xs = [keras.backend.variable(1) for _ in range(3)]
ys = keras.backend.stop_gradient(xs)
if not context.executing_eagerly():
for y in ys:
self.assertEqual(y.op.name[:12], 'StopGradient')
def test_placeholder(self):
x = keras.backend.placeholder(shape=(3, 4))
self.assertEqual(x.shape.as_list(), [3, 4])
x = keras.backend.placeholder(shape=(3, 4), sparse=True)
self.assertEqual(x.shape.as_list(), [3, 4])
def test_is_placeholder(self):
x = keras.backend.placeholder(shape=(1,))
self.assertEqual(keras.backend.is_placeholder(x), True)
x = keras.backend.variable(1)
self.assertEqual(keras.backend.is_placeholder(x), False)
def test_print_tensor(self):
# Unfortunately it seems impossible to use `mock` (or any other method)
# to capture stdout when used inside a graph or graph function, thus
# we cannot test correctness.
# The message gets correctly printed in practice.
x = keras.backend.placeholder(shape=())
y = keras.backend.print_tensor(x, 'eager=%s' % context.executing_eagerly())
f = keras.backend.function(x, y)
f(0)
@test_util.run_all_in_graph_and_eager_modes
class BackendVariableTest(test.TestCase):
def test_zeros(self):
x = keras.backend.zeros((3, 4))
val = keras.backend.eval(x)
self.assertAllClose(val, np.zeros((3, 4)))
def test_ones(self):
x = keras.backend.ones((3, 4))
val = keras.backend.eval(x)
self.assertAllClose(val, np.ones((3, 4)))
def test_eye(self):
x = keras.backend.eye(4)
val = keras.backend.eval(x)
self.assertAllClose(val, np.eye(4))
def test_zeros_like(self):
x = keras.backend.zeros((3, 4))
y = keras.backend.zeros_like(x)
val = keras.backend.eval(y)
self.assertAllClose(val, np.zeros((3, 4)))
def test_ones_like(self):
x = keras.backend.zeros((3, 4))
y = keras.backend.ones_like(x)
val = keras.backend.eval(y)
self.assertAllClose(val, np.ones((3, 4)))
def test_random_uniform_variable(self):
x = keras.backend.random_uniform_variable((30, 20), low=1, high=2, seed=0)
val = keras.backend.eval(x)
self.assertAllClose(val.mean(), 1.5, atol=1e-1)
self.assertAllClose(val.max(), 2., atol=1e-1)
self.assertAllClose(val.min(), 1., atol=1e-1)
def test_random_normal_variable(self):
x = keras.backend.random_normal_variable((30, 20), 1., 0.5, seed=0)
val = keras.backend.eval(x)
self.assertAllClose(val.mean(), 1., atol=1e-1)
self.assertAllClose(val.std(), 0.5, atol=1e-1)
def test_count_params(self):
x = keras.backend.zeros((4, 5))
val = keras.backend.count_params(x)
self.assertAllClose(val, 20)
def test_constant(self):
ref_val = np.random.random((3, 4)).astype('float32')
x = keras.backend.constant(ref_val)
val = keras.backend.eval(x)
self.assertAllClose(val, ref_val)
def test_sparse_variable(self):
val = scipy.sparse.eye(10)
x = keras.backend.variable(val)
self.assertTrue(isinstance(x, sparse_tensor.SparseTensor))
y = keras.backend.to_dense(x)
self.assertFalse(keras.backend.is_sparse(y))
@test_util.run_all_in_graph_and_eager_modes
class BackendLinearAlgebraTest(test.TestCase):
def test_dot(self):
x = keras.backend.ones(shape=(2, 3))
y = keras.backend.ones(shape=(3, 4))
xy = keras.backend.dot(x, y)
self.assertEqual(xy.shape.as_list(), [2, 4])
x = keras.backend.ones(shape=(32, 28, 3))
y = keras.backend.ones(shape=(3, 4))
xy = keras.backend.dot(x, y)
self.assertEqual(xy.shape.as_list(), [32, 28, 4])
def test_batch_dot(self):
x = keras.backend.ones(shape=(32, 20, 1))
y = keras.backend.ones(shape=(32, 30, 20))
xy = keras.backend.batch_dot(x, y, axes=[1, 2])
self.assertEqual(xy.shape.as_list(), [32, 1, 30])
# TODO(fchollet): insufficiently tested.
def test_reduction_ops(self):
ops_to_test = [
(keras.backend.max, np.max),
(keras.backend.min, np.min),
(keras.backend.sum, np.sum),
(keras.backend.prod, np.prod),
(keras.backend.var, np.var),
(keras.backend.std, np.std),
(keras.backend.mean, np.mean),
(keras.backend.argmin, np.argmin),
(keras.backend.argmax, np.argmax),
]
for keras_op, np_op in ops_to_test:
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),
keras_kwargs={'axis': 1},
np_kwargs={'axis': 1})
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),
keras_kwargs={'axis': -1},
np_kwargs={'axis': -1})
if 'keepdims' in tf_inspect.getargspec(keras_op).args:
compare_single_input_op_to_numpy(keras_op, np_op,
input_shape=(4, 7, 5),
keras_kwargs={'axis': 1,
'keepdims': True},
np_kwargs={'axis': 1,
'keepdims': True})
def test_elementwise_ops(self):
ops_to_test = [
(keras.backend.square, np.square),
(keras.backend.abs, np.abs),
(keras.backend.round, np.round),
(keras.backend.sign, np.sign),
(keras.backend.sin, np.sin),
(keras.backend.cos, np.cos),
(keras.backend.exp, np.exp),
]
for keras_op, np_op in ops_to_test:
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7))
ops_to_test = [
(keras.backend.sqrt, np.sqrt),
(keras.backend.log, np.log),
]
for keras_op, np_op in ops_to_test:
compare_single_input_op_to_numpy(keras_op, np_op,
input_shape=(4, 7),
negative_values=False)
compare_single_input_op_to_numpy(
keras.backend.clip, np.clip,
input_shape=(6, 4),
keras_kwargs={'min_value': 0.1, 'max_value': 2.4},
np_kwargs={'a_min': 0.1, 'a_max': 1.4})
compare_single_input_op_to_numpy(
keras.backend.pow, np.power,
input_shape=(6, 4),
keras_args=[3],
np_args=[3])
def test_two_tensor_ops(self):
ops_to_test = [
(keras.backend.equal, np.equal),
(keras.backend.not_equal, np.not_equal),
(keras.backend.greater, np.greater),
(keras.backend.greater_equal, np.greater_equal),
(keras.backend.less, np.less),
(keras.backend.less_equal, np.less_equal),
(keras.backend.maximum, np.maximum),
(keras.backend.minimum, np.minimum),
]
for keras_op, np_op in ops_to_test:
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 7),
input_shape_b=(4, 7))
def test_relu(self):
x = ops.convert_to_tensor([[-4, 0], [2, 7]], 'float32')
# standard relu
relu_op = keras.backend.relu(x)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# alpha (leaky relu used)
relu_op = keras.backend.relu(x, alpha=0.5)
if not context.executing_eagerly():
self.assertTrue('LeakyRelu' in relu_op.name)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, 0], [2, 7]])
# max_value < some elements
relu_op = keras.backend.relu(x, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 5]])
# nn.relu6 used
relu_op = keras.backend.relu(x, max_value=6)
if not context.executing_eagerly():
self.assertTrue('Relu6' in relu_op.name) # uses tf.nn.relu6
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 6]])
# max value > 6
relu_op = keras.backend.relu(x, max_value=10)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# max value is float
relu_op = keras.backend.relu(x, max_value=4.3)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 4.3]])
# max value == 0
relu_op = keras.backend.relu(x, max_value=0)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 0]])
# alpha and max_value
relu_op = keras.backend.relu(x, alpha=0.25, max_value=3)
self.assertAllClose(keras.backend.eval(relu_op), [[-1, 0], [2, 3]])
# threshold
relu_op = keras.backend.relu(x, threshold=3)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 7]])
# threshold is float
relu_op = keras.backend.relu(x, threshold=1.5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# threshold is negative
relu_op = keras.backend.relu(x, threshold=-5)
self.assertAllClose(keras.backend.eval(relu_op), [[-4, 0], [2, 7]])
# threshold and max_value
relu_op = keras.backend.relu(x, threshold=3, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 5]])
# threshold and alpha
relu_op = keras.backend.relu(x, alpha=0.25, threshold=4)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, -1], [-0.5, 7]])
# threshold, alpha, and max_value
relu_op = keras.backend.relu(x, alpha=0.25, threshold=4, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, -1], [-0.5, 5]])
@test_util.run_all_in_graph_and_eager_modes
class BackendShapeOpsTest(test.TestCase):
def test_reshape(self):
compare_single_input_op_to_numpy(keras.backend.reshape, np.reshape,
input_shape=(4, 7),
keras_args=[(2, 14)],
np_args=[(2, 14)])
def test_concatenate(self):
a = keras.backend.variable(np.ones((1, 2, 3)))
b = keras.backend.variable(np.ones((1, 2, 2)))
y = keras.backend.concatenate([a, b], axis=-1)
self.assertEqual(y.shape.as_list(), [1, 2, 5])
def test_permute_dimensions(self):
compare_single_input_op_to_numpy(keras.backend.permute_dimensions,
np.transpose,
input_shape=(4, 7),
keras_args=[(1, 0)],
np_args=[(1, 0)])
def test_resize_images(self):
height_factor = 2
width_factor = 2
data_format = 'channels_last'
x = keras.backend.variable(np.ones((1, 2, 2, 3)))
y = keras.backend.resize_images(x,
height_factor,
width_factor,
data_format)
self.assertEqual(y.shape.as_list(), [1, 4, 4, 3])
data_format = 'channels_first'
x = keras.backend.variable(np.ones((1, 3, 2, 2)))
y = keras.backend.resize_images(x,
height_factor,
width_factor,
data_format)
self.assertEqual(y.shape.as_list(), [1, 3, 4, 4])
# Invalid use:
with self.assertRaises(ValueError):
keras.backend.resize_images(x,
height_factor,
width_factor,
data_format='unknown')
def test_resize_volumes(self):
height_factor = 2
width_factor = 2
depth_factor = 2
data_format = 'channels_last'
x = keras.backend.variable(np.ones((1, 2, 2, 2, 3)))
y = keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format)
self.assertEqual(y.shape.as_list(), [1, 4, 4, 4, 3])
data_format = 'channels_first'
x = keras.backend.variable(np.ones((1, 3, 2, 2, 2)))
y = keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format)
self.assertEqual(y.shape.as_list(), [1, 3, 4, 4, 4])
# Invalid use:
with self.assertRaises(ValueError):
keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format='unknown')
def test_repeat_elements(self):
x = keras.backend.variable(np.ones((1, 3, 2)))
y = keras.backend.repeat_elements(x, 3, axis=1)
self.assertEqual(y.shape.as_list(), [1, 9, 2])
# Use with a dynamic axis:
if not context.executing_eagerly():
x = keras.backend.placeholder(shape=(2, None, 2))
y = keras.backend.repeat_elements(x, 3, axis=1)
self.assertEqual(y.shape.as_list(), [2, None, 2])
def test_repeat(self):
x = keras.backend.variable(np.ones((1, 3)))
y = keras.backend.repeat(x, 2)
self.assertEqual(y.shape.as_list(), [1, 2, 3])
def test_flatten(self):
compare_single_input_op_to_numpy(keras.backend.flatten,
np.reshape,
input_shape=(4, 7, 6),
np_args=[(4 * 7 * 6,)])
def test_batch_flatten(self):
compare_single_input_op_to_numpy(keras.backend.batch_flatten,
np.reshape,
input_shape=(4, 7, 6),
np_args=[(4, 7 * 6)])
def test_temporal_padding(self):
def ref_op(x, padding):
shape = list(x.shape)
shape[1] += padding[0] + padding[1]
y = np.zeros(tuple(shape))
y[:, padding[0]:-padding[1], :] = x
return y
compare_single_input_op_to_numpy(keras.backend.temporal_padding,
ref_op,
input_shape=(4, 7, 6),
keras_args=[(2, 3)],
np_args=[(2, 3)])
def test_spatial_2d_padding(self):
def ref_op(x, padding, data_format='channels_last'):
shape = list(x.shape)
if data_format == 'channels_last':
shape[1] += padding[0][0] + padding[0][1]
shape[2] += padding[1][0] + padding[1][1]
y = np.zeros(tuple(shape))
y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], :] = x
else:
shape[2] += padding[0][0] + padding[0][1]
shape[3] += padding[1][0] + padding[1][1]
y = np.zeros(tuple(shape))
y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1]] = x
return y
compare_single_input_op_to_numpy(
keras.backend.spatial_2d_padding,
ref_op,
input_shape=(2, 3, 2, 3),
keras_args=[((2, 3), (1, 2))],
keras_kwargs={'data_format': 'channels_last'},
np_args=[((2, 3), (1, 2))],
np_kwargs={'data_format': 'channels_last'})
compare_single_input_op_to_numpy(
keras.backend.spatial_2d_padding,
ref_op,
input_shape=(2, 3, 2, 3),
keras_args=[((2, 3), (1, 2))],
keras_kwargs={'data_format': 'channels_first'},
np_args=[((2, 3), (1, 2))],
np_kwargs={'data_format': 'channels_first'})
def test_spatial_3d_padding(self):
def ref_op(x, padding, data_format='channels_last'):
shape = list(x.shape)
if data_format == 'channels_last':
shape[1] += padding[0][0] + padding[0][1]
shape[2] += padding[1][0] + padding[1][1]
shape[3] += padding[2][0] + padding[2][1]
y = np.zeros(tuple(shape))
y[:,
padding[0][0]:-padding[0][1],
padding[1][0]:-padding[1][1],
padding[2][0]:-padding[2][1],
:] = x
else:
shape[2] += padding[0][0] + padding[0][1]
shape[3] += padding[1][0] + padding[1][1]
shape[4] += padding[2][0] + padding[2][1]
y = np.zeros(tuple(shape))
y[:, :,
padding[0][0]:-padding[0][1],
padding[1][0]:-padding[1][1],
padding[2][0]:-padding[2][1]] = x
return y
compare_single_input_op_to_numpy(
keras.backend.spatial_3d_padding,
ref_op,
input_shape=(2, 3, 2, 3, 2),
keras_args=[((2, 3), (1, 2), (2, 3))],
keras_kwargs={'data_format': 'channels_last'},
np_args=[((2, 3), (1, 2), (2, 3))],
np_kwargs={'data_format': 'channels_last'})
compare_single_input_op_to_numpy(
keras.backend.spatial_3d_padding,
ref_op,
input_shape=(2, 3, 2, 3, 2),
keras_args=[((2, 3), (1, 2), (2, 3))],
keras_kwargs={'data_format': 'channels_first'},
np_args=[((2, 3), (1, 2), (2, 3))],
np_kwargs={'data_format': 'channels_first'})
@test_util.run_all_in_graph_and_eager_modes
class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
def test_bias_add(self):
keras_op = keras.backend.bias_add
np_op = np.add
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 2, 7),
input_shape_b=(7,))
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
x = keras.backend.variable((3, 4))
b = keras.backend.variable((3, 4))
keras.backend.bias_add(x, b)
with self.assertRaises(ValueError):
x = keras.backend.variable((3, 4))
b = keras.backend.variable((4,))
keras.backend.bias_add(x, b, data_format='unknown')
def test_bias_add_channels_first(self):
def keras_op(x, b):
return keras.backend.bias_add(x, b, data_format='channels_first')
def np_op(x, b):
if x.ndim == 3:
b = b.reshape((1, b.shape[0], 1))
if x.ndim == 4:
b = b.reshape((1, b.shape[0], 1, 1))
return x + b
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 7),
input_shape_b=(3,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 7),
input_shape_b=(3,))
def test_pool2d(self):
val = np.random.random((10, 3, 10, 10))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_first',
pool_mode='max')
self.assertEqual(y.shape.as_list(), [10, 3, 9, 9])
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_first',
pool_mode='avg')
self.assertEqual(y.shape.as_list(), [10, 3, 9, 9])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 9, 9, 3])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 3])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 3])
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2, 2), strides=(2, 2))
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2, 2))
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other')
def test_pool3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
val = np.random.random((10, 3, 10, 10, 10))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_first',
pool_mode='max')
self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9])
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_first',
pool_mode='avg')
self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 9, 9, 9, 3])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 3])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(2, 2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 3])
def test_conv1d(self):
val = np.random.random((10, 4, 10))
x = keras.backend.variable(val)
kernel_val = np.random.random((3, 4, 5))
k = keras.backend.variable(kernel_val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='valid', data_format='channels_first')
self.assertEqual(y.shape.as_list(), [10, 5, 8])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 8, 5])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 5])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(2,),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5])
def test_local_conv_channels_dim(self):
filters = 3
batch_size = 2
for input_shape in [(3, 5), (2, 3, 5), (2, 5, 3, 4)]:
channels_in = input_shape[0]
input_spatial_shape = input_shape[1:]
dim = len(input_spatial_shape)
inputs = np.random.normal(0, 1, (batch_size,) + input_shape)
inputs_cf = keras.backend.variable(inputs)
for kernel_size in [1, 2]:
for stride in [1, 2]:
kernel_sizes = (kernel_size,) * dim
strides = (stride,) * dim
output_shape = tuple([(i - kernel_size + stride) // stride
for i in input_spatial_shape])
kernel_shape = (np.prod(output_shape),
np.prod(kernel_sizes) * channels_in,
filters)
kernel = np.random.normal(
0,
1,
output_shape + (channels_in, np.prod(kernel_sizes), filters)
)
kernel_cf = np.reshape(kernel, kernel_shape)
kernel_cf = keras.backend.variable(kernel_cf)
conv_cf = keras.backend.local_conv(inputs_cf,
kernel_cf,
kernel_sizes,
strides,
output_shape,
'channels_first')
inputs_cl = np.transpose(inputs, [0, 2] + list(range(3, dim + 2)) +
[1])
inputs_cl = keras.backend.variable(inputs_cl)
kernel_cl = np.reshape(
np.transpose(kernel, list(range(dim)) + [dim + 1, dim, dim + 2]),
kernel_shape
)
kernel_cl = keras.backend.variable(kernel_cl)
conv_cl = keras.backend.local_conv(inputs_cl,
kernel_cl,
kernel_sizes,
strides,
output_shape,
'channels_last')
conv_cf = keras.backend.eval(conv_cf)
conv_cl = keras.backend.eval(conv_cl)
self.assertAllCloseAccordingToType(
conv_cf,
np.transpose(conv_cl,
[0, dim + 1] + list(range(1, dim + 1))),
atol=1e-5
)
@parameterized.named_parameters(
('local_conv1d', (5, 6), (3,), (1,), (3,)),
('local_conv2d', (4, 5, 6), (3, 3), (1, 1), (2, 3)))
def test_local_conv_1d_and_2d(self,
input_shape,
kernel_sizes,
strides,
output_shape):
filters = 3
batch_size = 2
inputs = np.random.normal(0, 1, (batch_size,) + input_shape)
inputs = keras.backend.variable(inputs)
kernel = np.random.normal(0, 1, (np.prod(output_shape),
np.prod(kernel_sizes) * input_shape[-1],
filters))
kernel = keras.backend.variable(kernel)
local_conv = keras.backend.local_conv(inputs,
kernel,
kernel_sizes,
strides,
output_shape,
'channels_last')
if len(output_shape) == 1:
local_conv_dim = keras.backend.local_conv1d(inputs,
kernel,
kernel_sizes,
strides,
'channels_last')
else:
local_conv_dim = keras.backend.local_conv2d(inputs,
kernel,
kernel_sizes,
strides,
output_shape,
'channels_last')
local_conv = keras.backend.eval(local_conv)
local_conv_dim = keras.backend.eval(local_conv_dim)
self.assertAllCloseAccordingToType(local_conv, local_conv_dim)
def test_conv2d(self):
kernel_val = np.random.random((3, 3, 4, 5))
k = keras.backend.variable(kernel_val)
# Test channels_first
val = np.random.random((10, 4, 10, 10))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k,
padding='valid', data_format='channels_first')
self.assertEqual(y.shape.as_list(), [10, 5, 8, 8])
# Test channels_last
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, strides=(1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 8, 8, 5])
# Test same padding
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k,
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])
# Test dilation_rate
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, dilation_rate=(2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])
# Test strides
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, strides=(2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 5])
# Test invalid arguments
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2, 2))
def test_conv2d_transpose(self):
input_size = (7, 8)
kernel_size = (3, 3)
input_depth = 6
filters = 6
batch_size = 2
kernel_val = np.random.random(kernel_size + (input_depth, filters))
k = keras.backend.variable(kernel_val)
# Test channels_first
input_val = np.random.random((batch_size, input_depth) + input_size)
x = keras.backend.variable(input_val)
y = keras.backend.conv2d_transpose(x, k, (batch_size, filters) + input_size,
padding='same',
data_format='channels_first')
self.assertEqual(
tuple(y.shape.as_list()), (batch_size, filters) + input_size)
# Test channels_last
input_val = np.random.random((batch_size,) + input_size + (input_depth,))
x = keras.backend.variable(input_val)
y = keras.backend.conv2d_transpose(
x, k, (batch_size,) + input_size + (filters,),
padding='same',
data_format='channels_last')
self.assertEqual(
tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))
# Test dilation_rate
y = keras.backend.conv2d_transpose(
x, k, (batch_size,) + input_size + (filters,),
padding='same',
data_format='channels_last',
dilation_rate=(2, 2))
self.assertEqual(
tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))
# Test batch size of None in output_shape
y = keras.backend.conv2d_transpose(x, k, (None,) + input_size + (filters,),
padding='same',
data_format='channels_last')
self.assertEqual(
tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))
# Test invalid values
with self.assertRaises(ValueError):
y = keras.backend.conv2d_transpose(x, k, (2, 2, 8, 9),
padding='other',
data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.conv2d_transpose(x, k, (2, 2, 8, 9),
data_format='other')
def test_separable_conv2d(self):
val = np.random.random((10, 4, 10, 10))
x = keras.backend.variable(val)
depthwise_kernel_val = np.random.random((3, 3, 4, 1))
pointwise_kernel_val = np.random.random((1, 1, 4, 5))
dk = keras.backend.variable(depthwise_kernel_val)
pk = keras.backend.variable(pointwise_kernel_val)
y = keras.backend.separable_conv2d(
x, dk, pk, padding='valid', data_format='channels_first')
self.assertEqual(y.shape.as_list(), [10, 5, 8, 8])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(1, 1), padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 8, 8, 5])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(1, 1), padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(2, 2), padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 5])
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(
x, dk, pk, (2, 2), padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(
x, dk, pk, (2, 2), data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(x, dk, pk, (2, 2, 2))
def test_conv3d(self):
val = np.random.random((10, 4, 10, 10, 10))
x = keras.backend.variable(val)
kernel_val = np.random.random((3, 3, 3, 4, 5))
k = keras.backend.variable(kernel_val)
y = keras.backend.conv3d(x, k,
padding='valid', data_format='channels_first')
self.assertEqual(y.shape.as_list(), [10, 5, 8, 8, 8])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(1, 1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 8, 8, 8, 5])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(1, 1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 5])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(2, 2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 5])
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2))
def test_rnn(self):
# implement a simple RNN
num_samples = 4
input_dim = 5
output_dim = 3
timesteps = 6
input_val = np.random.random(
(num_samples, timesteps, input_dim)).astype(np.float32)
init_state_val = np.random.random(
(num_samples, output_dim)).astype(np.float32)
w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)
w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)
np_mask = np.random.randint(2, size=(num_samples, timesteps))
def rnn_step_fn():
w_i = keras.backend.variable(w_i_val)
w_o = keras.backend.variable(w_o_val)
def step_function(x, states):
assert len(states) == 1
prev_output = states[0]
output = keras.backend.dot(x, w_i) + keras.backend.dot(prev_output, w_o)
return output, [output]
return step_function
# test default setup
last_output_list = [[], [], [], [], [], []]
outputs_list = [[], [], [], [], [], []]
state_list = [[], [], [], [], [], []]
rnn_fn = rnn_step_fn()
inputs = keras.backend.variable(input_val)
initial_states = [keras.backend.variable(init_state_val)]
mask = keras.backend.variable(np_mask)
kwargs_list = [
{'go_backwards': False, 'mask': None},
{'go_backwards': False, 'mask': None, 'unroll': True},
{'go_backwards': True, 'mask': None},
{'go_backwards': True, 'mask': None, 'unroll': True},
{'go_backwards': False, 'mask': mask},
{'go_backwards': False, 'mask': mask, 'unroll': True},
]
for i, kwargs in enumerate(kwargs_list):
last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs,
initial_states,
**kwargs)
# check static shape inference
self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim])
self.assertEqual(outputs.shape.as_list(),
[num_samples, timesteps, output_dim])
for state in new_states:
self.assertEqual(state.shape.as_list(), [num_samples, output_dim])
last_output_list[i].append(keras.backend.eval(last_output))
outputs_list[i].append(keras.backend.eval(outputs))
self.assertLen(new_states, 1)
state_list[i].append(keras.backend.eval(new_states[0]))
def assert_list_pairwise(z_list, atol=1e-05):
for (z1, z2) in zip(z_list[1:], z_list[:-1]):
self.assertAllClose(z1, z2, atol=atol)
assert_list_pairwise(last_output_list[0], atol=1e-04)
assert_list_pairwise(outputs_list[0], atol=1e-04)
assert_list_pairwise(state_list[0], atol=1e-04)
assert_list_pairwise(last_output_list[2], atol=1e-04)
assert_list_pairwise(outputs_list[2], atol=1e-04)
assert_list_pairwise(state_list[2], atol=1e-04)
for l, u_l in zip(last_output_list[0], last_output_list[1]):
self.assertAllClose(l, u_l, atol=1e-04)
for o, u_o in zip(outputs_list[0], outputs_list[1]):
self.assertAllClose(o, u_o, atol=1e-04)
for s, u_s in zip(state_list[0], state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):
self.assertAllClose(b_l, b_u_l, atol=1e-04)
for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):
self.assertAllClose(b_o, b_u_o, atol=1e-04)
for b_s, b_u_s in zip(state_list[2], state_list[3]):
self.assertAllClose(b_s, b_u_s, atol=1e-04)
def test_rnn_additional_states(self):
# implement a simple RNN
num_samples = 4
input_dim = 5
output_dim = 3
timesteps = 6
input_val = np.random.random(
(num_samples, timesteps, input_dim)).astype(np.float32)
init_state_val = np.random.random(
(num_samples, output_dim)).astype(np.float32)
w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)
w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)
np_mask = np.random.randint(2, size=(num_samples, timesteps))
def rnn_step_fn():
w_i = keras.backend.variable(w_i_val)
w_o = keras.backend.variable(w_o_val)
def step_function(x, states):
assert len(states) == 2
prev_output = states[0]
output = keras.backend.dot(x, w_i) + keras.backend.dot(prev_output, w_o)
return output, [output,
keras.backend.concatenate([output, output], axis=-1)]
return step_function
# test default setup
last_output_list = [[], [], [], [], [], []]
outputs_list = [[], [], [], [], [], []]
state_list = [[], [], [], [], [], []]
additional_state_list = [[], [], [], [], [], []]
rnn_fn = rnn_step_fn()
inputs = keras.backend.variable(input_val)
initial_states = [
keras.backend.variable(init_state_val),
ops.convert_to_tensor(
np.concatenate([init_state_val, init_state_val], axis=-1))
]
mask = keras.backend.variable(np_mask)
kwargs_list = [
{'go_backwards': False, 'mask': None},
{'go_backwards': False, 'mask': None, 'unroll': True},
{'go_backwards': True, 'mask': None},
{'go_backwards': True, 'mask': None, 'unroll': True},
{'go_backwards': False, 'mask': mask},
{'go_backwards': False, 'mask': mask, 'unroll': True},
]
for i, kwargs in enumerate(kwargs_list):
last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs,
initial_states,
**kwargs)
# check static shape inference
self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim])
self.assertEqual(outputs.shape.as_list(),
[num_samples, timesteps, output_dim])
# for state in new_states:
# self.assertEqual(state.shape.as_list(),
# [num_samples, output_dim])
self.assertEqual(new_states[0].shape.as_list(), [num_samples, output_dim])
self.assertEqual(new_states[1].shape.as_list(),
[num_samples, 2 * output_dim])
last_output_list[i].append(keras.backend.eval(last_output))
outputs_list[i].append(keras.backend.eval(outputs))
self.assertLen(new_states, 2)
state_list[i].append(keras.backend.eval(new_states[0]))
additional_state_list[i].append(keras.backend.eval(new_states[1]))
def assert_list_pairwise(z_list, atol=1e-05):
for (z1, z2) in zip(z_list[1:], z_list[:-1]):
self.assertAllClose(z1, z2, atol=atol)
assert_list_pairwise(last_output_list[0], atol=1e-04)
assert_list_pairwise(outputs_list[0], atol=1e-04)
assert_list_pairwise(state_list[0], atol=1e-04)
assert_list_pairwise(additional_state_list[0], atol=1e-04)
assert_list_pairwise(last_output_list[2], atol=1e-04)
assert_list_pairwise(outputs_list[2], atol=1e-04)
assert_list_pairwise(state_list[2], atol=1e-04)
assert_list_pairwise(additional_state_list[2], atol=1e-04)
for l, u_l in zip(last_output_list[0], last_output_list[1]):
self.assertAllClose(l, u_l, atol=1e-04)
for o, u_o in zip(outputs_list[0], outputs_list[1]):
self.assertAllClose(o, u_o, atol=1e-04)
for s, u_s in zip(state_list[0], state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for s, u_s in zip(additional_state_list[0], additional_state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):
self.assertAllClose(b_l, b_u_l, atol=1e-04)
for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):
self.assertAllClose(b_o, b_u_o, atol=1e-04)
for b_s, b_u_s in zip(state_list[2], state_list[3]):
self.assertAllClose(b_s, b_u_s, atol=1e-04)
for s, u_s in zip(additional_state_list[2], additional_state_list[3]):
self.assertAllClose(s, u_s, atol=1e-04)
def test_rnn_output_and_state_masking_independent(self):
num_samples = 2
num_timesteps = 4
state_and_io_size = 2
mask_last_num_timesteps = 2 # for second sample only
# a step function that just outputs inputs,
# but increments states +1 per timestep
def step_function(inputs, states):
return inputs, [s + 1 for s in states]
inputs_vals = np.random.random((num_samples, num_timesteps,
state_and_io_size))
initial_state_vals = np.random.random((num_samples, state_and_io_size))
# masking of two last timesteps for second sample only
mask_vals = np.ones((num_samples, num_timesteps))
mask_vals[1, -mask_last_num_timesteps:] = 0
# outputs expected to be same as inputs for the first sample
expected_outputs = inputs_vals.copy()
# but for the second sample all outputs in masked region should be the same
# as last output before masked region
expected_outputs[1, -mask_last_num_timesteps:] = \
expected_outputs[1, -(mask_last_num_timesteps + 1)]
expected_last_state = initial_state_vals.copy()
# first state should be incremented for every timestep (no masking)
expected_last_state[0] += num_timesteps
# second state should not be incremented for last two timesteps
expected_last_state[1] += (num_timesteps - mask_last_num_timesteps)
# verify same expected output for `unroll=true/false`
inputs = keras.backend.variable(inputs_vals)
initial_states = [keras.backend.variable(initial_state_vals)]
mask = keras.backend.variable(mask_vals)
for unroll in [True, False]:
_, outputs, last_states = keras.backend.rnn(
step_function,
inputs,
initial_states,
mask=mask,
unroll=unroll,
input_length=num_timesteps if unroll else None)
self.assertAllClose(keras.backend.eval(outputs), expected_outputs)
self.assertAllClose(
keras.backend.eval(last_states[0]), expected_last_state)
def test_rnn_output_num_dim_larger_than_2_masking(self):
num_samples = 3
num_timesteps = 4
num_features = 5
def step_function(inputs, states):
outputs = keras.backend.tile(keras.backend.expand_dims(inputs), [1, 1, 2])
return outputs, [keras.backend.identity(s) for s in states]
# Note: cannot just return states (which can be a problem) ->
# tensorflow/python/ops/resource_variable_ops.py", line 824, in set_shape
# NotImplementedError: ResourceVariable does not implement set_shape()
inputs_vals = np.random.random((num_samples, num_timesteps, num_features))
initial_state_vals = np.random.random((num_samples, 6))
mask_vals = np.ones((num_samples, num_timesteps))
mask_vals[-1, -1] = 0 # final timestep masked for last sample
expected_outputs = np.repeat(inputs_vals[..., None], repeats=2, axis=-1)
# for the last sample, the final timestep (in masked region) should be the
# same as the second to final output (before masked region)
expected_outputs[-1, -1] = expected_outputs[-1, -2]
inputs = keras.backend.variable(inputs_vals)
initial_states = [keras.backend.variable(initial_state_vals)]
mask = keras.backend.variable(mask_vals)
for unroll in [True, False]:
_, outputs, _ = keras.backend.rnn(
step_function,
inputs,
initial_states,
mask=mask,
unroll=unroll,
input_length=num_timesteps if unroll else None)
self.assertAllClose(keras.backend.eval(outputs), expected_outputs)
def test_rnn_state_num_dim_larger_than_2_masking(self):
num_samples = 3
num_timesteps = 4
def step_function(inputs, states):
return inputs, [s + 1 for s in states]
inputs_vals = np.random.random((num_samples, num_timesteps, 5))
initial_state_vals = np.random.random((num_samples, 6, 7))
mask_vals = np.ones((num_samples, num_timesteps))
mask_vals[0, -2:] = 0 # final two timesteps masked for first sample
expected_last_state = initial_state_vals.copy()
expected_last_state[0] += (num_timesteps - 2)
expected_last_state[1:] += num_timesteps
inputs = keras.backend.variable(inputs_vals)
initial_states = [keras.backend.variable(initial_state_vals)]
mask = keras.backend.variable(mask_vals)
for unroll in [True, False]:
_, _, last_states = keras.backend.rnn(
step_function,
inputs,
initial_states,
mask=mask,
unroll=unroll,
input_length=num_timesteps if unroll else None)
self.assertAllClose(
keras.backend.eval(last_states[0]), expected_last_state)
def test_batch_normalization(self):
g_val = np.random.random((3,))
b_val = np.random.random((3,))
gamma = keras.backend.variable(g_val)
beta = keras.backend.variable(b_val)
# 3D NHC case
val = np.random.random((10, 5, 3))
x = keras.backend.variable(val)
mean, var = nn.moments(x, (0, 1), None, None, False)
normed = keras.backend.batch_normalization(
x, mean, var, beta, gamma, axis=-1, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 5, 3])
# 4D NHWC case
val = np.random.random((10, 5, 5, 3))
x = keras.backend.variable(val)
mean, var = nn.moments(x, (0, 1, 2), None, None, False)
normed = keras.backend.batch_normalization(
x, mean, var, beta, gamma, axis=-1, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 5, 5, 3])
# 4D NCHW case
if not context.executing_eagerly():
# Eager CPU kernel for NCHW does not exist.
val = np.random.random((10, 3, 5, 5))
x = keras.backend.variable(val)
mean, var = nn.moments(x, (0, 2, 3), None, None, False)
normed = keras.backend.batch_normalization(
x, mean, var, beta, gamma, axis=1, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 3, 5, 5])
def test_normalize_batch_in_training(self):
val = np.random.random((10, 3, 10, 10))
x = keras.backend.variable(val)
reduction_axes = (0, 2, 3)
g_val = np.random.random((3,))
b_val = np.random.random((3,))
gamma = keras.backend.variable(g_val)
beta = keras.backend.variable(b_val)
normed, mean, var = keras.backend.normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])
self.assertEqual(mean.shape.as_list(), [
3,
])
self.assertEqual(var.shape.as_list(), [
3,
])
# case: gamma=None
gamma = None
normed, mean, var = keras.backend.normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])
self.assertEqual(mean.shape.as_list(), [
3,
])
self.assertEqual(var.shape.as_list(), [
3,
])
# case: beta=None
beta = None
normed, mean, var = keras.backend.normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])
self.assertEqual(mean.shape.as_list(), [
3,
])
self.assertEqual(var.shape.as_list(), [
3,
])
def test_dropout(self):
inputs = array_ops.ones((200, 200))
outputs = keras.backend.dropout(inputs, 0.2)
outputs_val = keras.backend.eval(outputs)
self.assertEqual(np.min(outputs_val), 0)
self.assertAllClose(np.count_nonzero(outputs_val), 32000, atol=1000)
# Test noise shape
outputs = keras.backend.dropout(inputs, 0.2, noise_shape=(200, 1))
outputs_val = keras.backend.eval(outputs)
self.assertAllClose(outputs_val[2, :], outputs_val[3, :], atol=1e-5)
class BackendCrossEntropyLossesTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_categorical_crossentropy_loss(self):
t = keras.backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
p = keras.backend.constant([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
result = keras.backend.categorical_crossentropy(t, p)
self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)
p = keras.backend.constant([[.9, .05, .05], [.05, .89, .01],
[.05, .06, .94]])
result = keras.backend.categorical_crossentropy(t, p, axis=0)
self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)
p = keras.backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
result = keras.backend.categorical_crossentropy(t, p, from_logits=True),
self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)
p = keras.backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]])
result = keras.backend.categorical_crossentropy(
t, p, from_logits=True, axis=0),
self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)
@test_util.run_in_graph_and_eager_modes
def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
t = keras.backend.placeholder()
p = keras.backend.placeholder()
o = keras.backend.categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
p_val = ops.convert_to_tensor([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# With axis set
o = keras.backend.categorical_crossentropy(t, p, axis=0)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .065, .111], 1e-3)
# from logits
p_val = ops.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
o = keras.backend.categorical_crossentropy(t, p, from_logits=True)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, 0, .17], 1e-3)
# from logits and axis set
o = keras.backend.categorical_crossentropy(t, p, from_logits=True, axis=0)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, .003, .036], 1e-3)
@test_util.run_in_graph_and_eager_modes
def test_sparse_categorical_crossentropy_loss(self):
t = keras.backend.constant([0, 1, 2])
p = keras.backend.constant([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
result = keras.backend.sparse_categorical_crossentropy(t, p)
self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)
p = keras.backend.constant([[.9, .05, .05], [.05, .89, .01],
[.05, .06, .94]])
result = keras.backend.sparse_categorical_crossentropy(t, p, axis=0)
self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)
p = keras.backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
result = keras.backend.sparse_categorical_crossentropy(
t, p, from_logits=True),
self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)
p = keras.backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]])
result = keras.backend.sparse_categorical_crossentropy(
t, p, from_logits=True, axis=0),
self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)
@test_util.run_in_graph_and_eager_modes
def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
t = keras.backend.placeholder()
p = keras.backend.placeholder()
o = keras.backend.sparse_categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor([0, 1, 2])
p_val = ops.convert_to_tensor([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# With axis set
with self.assertRaisesRegex(
ValueError,
'Cannot compute sparse categorical crossentropy with `axis=0`'):
o = keras.backend.sparse_categorical_crossentropy(t, p, axis=0)
f = keras.backend.function([t, p], o)
_ = f([t_val, p_val])
# from logits
p_val = ops.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
o = keras.backend.sparse_categorical_crossentropy(t, p, from_logits=True)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, 0, .17], 1e-3)
# from logits and axis set
with self.assertRaisesRegex(
ValueError,
'Cannot compute sparse categorical crossentropy with `axis=0`'):
o = keras.backend.sparse_categorical_crossentropy(
t, p, from_logits=True, axis=0)
f = keras.backend.function([t, p], o)
_ = f([t_val, p_val])
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class TestCTC(test.TestCase):
def test_ctc_decode(self):
depth = 6
seq_len_0 = 5
input_prob_matrix_0 = np.asarray(
[[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
# Random entry added in at time=5
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],
dtype=np.float32)
# len max_time_steps array of batch_size x depth matrices
inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]
for t in range(seq_len_0)] + # Pad to max_time_steps = 8
2 * [np.zeros((1, depth), dtype=np.float32)])
inputs = keras.backend.variable(np.asarray(inputs).transpose((1, 0, 2)))
# batch_size length vector of sequence_lengths
input_length = keras.backend.variable(
np.array([seq_len_0], dtype=np.int32))
# batch_size length vector of negative log probabilities
log_prob_truth = np.array([
-3.5821197, # output beam 0
-3.777835 # output beam 1
], np.float32)[np.newaxis, :]
decode_truth = [np.array([1, 0]), np.array([0, 1, 0])]
beam_width = 2
top_paths = 2
decode_pred_tf, log_prob_pred_tf = keras.backend.ctc_decode(
inputs,
input_length,
greedy=False,
beam_width=beam_width,
top_paths=top_paths)
self.assertEqual(len(decode_pred_tf), top_paths)
log_prob_pred = keras.backend.eval(log_prob_pred_tf)
for i in range(top_paths):
self.assertTrue(
np.alltrue(
decode_truth[i] == keras.backend.eval(decode_pred_tf[i])))
self.assertAllClose(log_prob_truth, log_prob_pred)
@test_util.run_v1_only('b/120545219')
def test_ctc_batch_cost(self):
with self.cached_session():
label_lens = np.expand_dims(np.asarray([5, 4]), 1)
input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps
loss_log_probs = [3.34211, 5.42262]
# dimensions are batch x time x categories
labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])
inputs = np.asarray(
[[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]],
dtype=np.float32)
labels = keras.backend.variable(labels, dtype='int32')
inputs = keras.backend.variable(inputs, dtype='float32')
input_lens = keras.backend.variable(input_lens, dtype='int32')
label_lens = keras.backend.variable(label_lens, dtype='int32')
res = keras.backend.eval(
keras.backend.ctc_batch_cost(labels, inputs, input_lens, label_lens))
self.assertAllClose(res[:, 0], loss_log_probs, atol=1e-05)
# test when batch_size = 1, that is, one sample only
ref = [3.34211]
input_lens = np.expand_dims(np.asarray([5]), 1)
label_lens = np.expand_dims(np.asarray([5]), 1)
labels = np.asarray([[0, 1, 2, 1, 0]])
inputs = np.asarray(
[[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [
0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436
], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]]
],
dtype=np.float32)
k_labels = keras.backend.variable(labels, dtype='int32')
k_inputs = keras.backend.variable(inputs, dtype='float32')
k_input_lens = keras.backend.variable(input_lens, dtype='int32')
k_label_lens = keras.backend.variable(label_lens, dtype='int32')
res = keras.backend.eval(
keras.backend.ctc_batch_cost(k_labels, k_inputs, k_input_lens,
k_label_lens))
self.assertAllClose(res[:, 0], ref, atol=1e-05)
@test_util.run_all_in_graph_and_eager_modes
class TestRandomOps(test.TestCase):
def test_random_normal(self):
np.random.seed(123)
x = keras.backend.random_normal((500, 500))
val = keras.backend.eval(x)
self.assertAllClose(np.mean(val), 0., atol=0.01)
self.assertAllClose(np.std(val), 1., atol=0.01)
def test_random_uniform(self):
np.random.seed(123)
x = keras.backend.random_uniform((500, 500))
val = keras.backend.eval(x)
self.assertAllClose(np.mean(val), 0.5, atol=0.01)
self.assertAllClose(np.max(val), 1., atol=0.01)
self.assertAllClose(np.min(val), 0., atol=0.01)
def test_random_binomial(self):
np.random.seed(123)
x = keras.backend.random_binomial((500, 500), p=0.5)
self.assertAllClose(np.mean(keras.backend.eval(x)), 0.5, atol=0.01)
def test_truncated_normal(self):
np.random.seed(123)
x = keras.backend.truncated_normal((500, 500), mean=0.0, stddev=1.0)
x = keras.backend.truncated_normal((1000, 1000), mean=0.0, stddev=1.0)
y = keras.backend.eval(x)
self.assertAllClose(np.mean(y), 0., atol=0.01)
self.assertAllClose(np.std(y), 0.88, atol=0.01)
self.assertAllClose(np.max(y), 2., atol=0.01)
self.assertAllClose(np.min(y), -2., atol=0.01)
@test_util.run_all_in_graph_and_eager_modes
class FunctionTest(test.TestCase):
def test_function_basics(self):
x1 = keras.backend.placeholder(shape=(), dtype='float32')
x2 = keras.backend.placeholder(shape=(), dtype='int32')
v = keras.backend.variable(10.)
y1 = x1 + keras.backend.cast(x2, 'float32') + v
y2 = x1 * keras.backend.cast(x2, 'float32')
with ops.control_dependencies([y1]):
u = keras.backend.update(v, x1)
f = keras.backend.function([x1, x2], [y1, y2], updates=[u])
output_values = f([2, 3])
self.assertEqual(output_values, [15., 6.])
self.assertEqual(keras.backend.eval(v), 2.)
def test_function_dict_outputs(self):
x_ph = keras.backend.placeholder(shape=(), name='x')
y_ph = keras.backend.placeholder(shape=(), name='y')
outputs = {'x*y': y_ph * x_ph, 'x*x': x_ph * x_ph}
f = keras.backend.function(inputs=[x_ph, y_ph], outputs=outputs)
x, y = 2., 5.
results = f([x, y])
self.assertEqual(results['x*y'], 10.)
self.assertEqual(results['x*x'], 4)
def test_function_dict_inputs(self):
placeholders = {
'x': keras.backend.placeholder(shape=()),
'y': keras.backend.placeholder(shape=())
}
outputs = [placeholders['x'] * placeholders['y']]
f = keras.backend.function(inputs=placeholders, outputs=outputs)
results = f({'x': 2., 'y': 3.})
self.assertEqual(results[0], 6.)
def test_function_single_input_output(self):
x_ph = keras.backend.placeholder(shape=(), name='x')
output = x_ph * x_ph
f = keras.backend.function(x_ph, output)
result = f(2.)
self.assertEqual(result, 4.)
def test_tuple_updates(self):
x_ph = keras.backend.placeholder(ndim=2)
v = keras.backend.variable(np.ones((4, 2)))
output = x_ph ** 2 + v
new_v = v + x_ph
f = keras.backend.function(x_ph, output, updates=[(v, new_v)])
input_val = np.random.random((4, 2))
result = f(input_val)
self.assertAllClose(result, input_val ** 2 + 1)
self.assertAllClose(keras.backend.get_value(v), np.ones((4, 2)) + input_val)
class BackendGraphTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_function_placeholder_with_default(self):
with keras.backend.get_graph().as_default():
x1 = array_ops.placeholder_with_default(
np.array(2., dtype='float32'), shape=())
x2 = array_ops.placeholder_with_default(
np.array(3, dtype='int32'), shape=())
y1 = x1 + keras.backend.cast(x2, 'float32')
y2 = x1 * keras.backend.cast(x2, 'float32')
f = keras.backend.function([x1, x2], [y1, y2])
output_values = f([4, 5])
self.assertEqual(output_values, [9., 20.])
output_values = f([None, None])
self.assertEqual(output_values, [5., 6.])
@test_util.run_deprecated_v1
def test_function_tf_feed_symbols(self):
# Test Keras backend functions with TF tensor inputs.
with self.cached_session():
# Test feeding a resource variable to `function`.
x1 = keras.backend.placeholder(shape=())
x2 = keras.backend.placeholder(shape=())
lr = keras.backend.learning_phase() # Include a placeholder_with_default.
y1 = keras.backend.variable(10.)
y2 = 3
f = keras.backend.function(
inputs=[x1, x2, lr],
outputs=[x1 + 1, keras.backend.in_train_phase(x2 + 2, x2 - 1)])
outs = f([y1, y2, None]) # Use default learning_phase value.
self.assertEqual(outs, [11., 2.])
outs = f([y1, y2, 1]) # Set learning phase value.
self.assertEqual(outs, [11., 5.])
# Test triggering a callable refresh by changing the input.
y3 = keras.backend.constant(20.) # Test with tensor
outs = f([y3, y2, None])
self.assertEqual(outs, [21., 2.])
y4 = 4 # Test with non-symbol
outs = f([y4, y2, None])
self.assertEqual(outs, [5., 2.])
# Test with a different dtype
y5 = keras.backend.constant(10., dtype='float64')
outs = f([y5, y2, None])
self.assertEqual(outs, [11., 2.])
@test_util.run_deprecated_v1
def test_function_tf_fetches(self):
# Additional operations can be passed to tf.compat.v1.Session().run() via
# its `fetches` arguments. In contrast to `updates` argument of
# keras.backend.function() these do not have control dependency on `outputs`
# so they can run in parallel. Also they should not contribute to output of
# keras.backend.function().
with self.cached_session():
x = keras.backend.variable(0.)
y = keras.backend.variable(0.)
x_placeholder = keras.backend.placeholder(shape=())
y_placeholder = keras.backend.placeholder(shape=())
f = keras.backend.function(
inputs=[x_placeholder, y_placeholder],
outputs=[x_placeholder + y_placeholder],
updates=[(x, x_placeholder + 1.)],
fetches=[keras.backend.update(y, 5.)])
output = f([10., 20.])
self.assertEqual(output, [30.])
self.assertEqual(keras.backend.get_session().run(fetches=[x, y]),
[11., 5.])
@test_util.run_deprecated_v1
def test_function_tf_feed_dict(self):
# Additional substitutions can be passed to `tf.compat.v1.Session().run()`
# via its `feed_dict` arguments. Note that the feed_dict is passed once in
# the constructor but we can modify the values in the dictionary. Through
# this feed_dict we can provide additional substitutions besides Keras
# inputs.
with self.cached_session():
x = keras.backend.variable(0.)
y = keras.backend.variable(0.)
x_placeholder = keras.backend.placeholder(shape=())
y_placeholder = keras.backend.placeholder(shape=())
feed_dict = {y_placeholder: 3.}
fetches = [keras.backend.update(y, y_placeholder * 10.)]
f = keras.backend.function(
inputs=[x_placeholder],
outputs=[x_placeholder + 1.],
updates=[(x, x_placeholder + 10.)],
feed_dict=feed_dict,
fetches=fetches)
output = f([10.])
self.assertEqual(output, [11.])
self.assertEqual(keras.backend.get_session().run(fetches=[x, y]),
[20., 30.])
# updated value in feed_dict will be modified within the K.function()
feed_dict[y_placeholder] = 4.
output = f([20.])
self.assertEqual(output, [21.])
self.assertEqual(keras.backend.get_session().run(fetches=[x, y]),
[30., 40.])
@test_util.run_deprecated_v1
def test_function_tf_run_options_with_run_metadata(self):
with self.cached_session():
x_placeholder = keras.backend.placeholder(shape=())
y_placeholder = keras.backend.placeholder(shape=())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
# enable run_options.
f = keras.backend.function(
inputs=[x_placeholder, y_placeholder],
outputs=[x_placeholder + y_placeholder],
options=run_options,
run_metadata=run_metadata)
output = f([10., 20.])
self.assertEqual(output, [30.])
self.assertGreater(len(run_metadata.partition_graphs), 0)
# disable run_options.
f1 = keras.backend.function(
inputs=[x_placeholder, y_placeholder],
outputs=[x_placeholder + y_placeholder],
run_metadata=run_metadata)
output1 = f1([10., 20.])
self.assertEqual(output1, [30.])
self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_deprecated_v1
def test_function_fetch_callbacks(self):
class CallbackStub(object):
def __init__(self):
self.times_called = 0
self.callback_result = 0
def _fetch_callback(self, result):
self.times_called += 1
self.callback_result = result
with self.cached_session():
callback = CallbackStub()
x_placeholder = keras.backend.placeholder(shape=())
y_placeholder = keras.backend.placeholder(shape=())
callback_op = x_placeholder * y_placeholder
f = keras.backend.function(
inputs=[x_placeholder, y_placeholder],
outputs=[x_placeholder + y_placeholder])
f.fetches.append(callback_op)
f.fetch_callbacks[callback_op] = callback._fetch_callback
_ = f([10., 20.])
self.assertEqual(callback.times_called, 1)
self.assertEqual(callback.callback_result, 200)
def test_get_session_different_graphs(self):
with ops.Graph().as_default():
x = keras.backend.constant(1)
session = keras.backend.get_session()
self.assertIs(session, keras.backend.get_session((x,)))
self.assertIs(session, keras.backend.get_session())
with ops.Graph().as_default():
self.assertIs(session, keras.backend.get_session((x,)))
self.assertIsNot(session, keras.backend.get_session())
@test_util.run_all_in_graph_and_eager_modes
class ControlOpsTests(test.TestCase):
def test_function_switch_basics(self):
x = array_ops.constant(2.0)
y = array_ops.constant(3.0)
def xpowy():
return keras.backend.pow(x, y)
def ypowx():
return keras.backend.pow(y, x)
tensor = keras.backend.switch(keras.backend.less(x, y), xpowy, ypowx)
self.assertEqual(keras.backend.eval(tensor), [8.0])
tensor = keras.backend.switch(keras.backend.greater(x, y), xpowy, ypowx)
self.assertEqual(keras.backend.eval(tensor), [9.0])
def test_unequal_rank(self):
x = ops.convert_to_tensor(np.array([[1, 2, 3], [4, 5, 6]]), dtype='float32')
y = ops.convert_to_tensor(np.array([1, 2, 3]), dtype='float32')
def true_func():
return x
def false_func():
return y
with self.assertRaisesRegexp(ValueError,
'Rank of `condition` should be less than'):
keras.backend.switch(keras.backend.equal(x, x), false_func, true_func)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/backend_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras backend config API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import keras_export
# The type of float to use throughout a session.
_FLOATX = 'float32'
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
@keras_export('keras.backend.epsilon')
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
```python
keras.backend.epsilon() >>>1e-07
```
"""
return _EPSILON
@keras_export('keras.backend.set_epsilon')
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Arguments:
value: float. New value of epsilon.
Example: ```python from keras import backend as K K.epsilon() >>> 1e-07
K.set_epsilon(1e-05) K.epsilon() >>> 1e-05 ```
"""
global _EPSILON
_EPSILON = value
@keras_export('keras.backend.floatx')
def floatx():
"""Returns the default float type, as a string.
E.g. 'float16', 'float32', 'float64'.
Returns:
String, the current default float type.
Example:
```python
keras.backend.floatx() >>> 'float32'
```
"""
return _FLOATX
@keras_export('keras.backend.set_floatx')
def set_floatx(value):
"""Sets the default float type.
Arguments:
value: String; 'float16', 'float32', or 'float64'.
Example: ```python from keras import backend as K K.floatx() >>> 'float32'
K.set_floatx('float16') K.floatx() >>> 'float16' ```
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
@keras_export('keras.backend.image_data_format')
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
```python
keras.backend.image_data_format() >>> 'channels_first'
```
"""
return _IMAGE_DATA_FORMAT
@keras_export('keras.backend.set_image_data_format')
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Arguments:
data_format: string. `'channels_first'` or `'channels_last'`.
Example: ```python from keras import backend as K K.image_data_format() >>>
'channels_first' K.set_image_data_format('channels_last')
K.image_data_format() >>> 'channels_last' ```
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format: ' + str(data_format))
_IMAGE_DATA_FORMAT = str(data_format)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/backend_config.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.losses.Loss')
class Loss(object):
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation:
```
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
```
When used with `tf.distribute.Strategy`, outside of built-in training loops
such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction
types, and reduce losses explicitly in your training loop. Using 'AUTO' or
'SUM_OVER_BATCH_SIZE' will raise an error.
Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops for more
details on this.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size))
```
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
losses_utils.ReductionV2.validate(reduction)
self.reduction = reduction
self.name = name
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
sample_weight: Optional `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
broadcasted to this shape), then each loss element of `y_pred` is scaled
by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
functions reduce by 1 dimension, usually axis=-1.)
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
because all loss functions reduce by 1 dimension, usually axis=-1.)
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
scope_name = 'lambda' if self.name == '<lambda>' else self.name
graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
y_true, y_pred, sample_weight)
with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
losses = self.call(y_true, y_pred)
return losses_utils.compute_weighted_loss(
losses, sample_weight, reduction=self._get_reduction())
@classmethod
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).
Args:
config: Output of `get_config()`.
Returns:
A `Loss` instance.
"""
return cls(**config)
def get_config(self):
return {'reduction': self.reduction, 'name': self.name}
@abc.abstractmethod
@doc_controls.for_subclass_implementers
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values, with the same shape as 'y_pred'.
y_pred: The predicted values.
"""
NotImplementedError('Must be implemented in subclasses.')
def _get_reduction(self):
"""Handles `AUTO` reduction cases and returns the reduction value."""
if distribution_strategy_context.has_strategy() and (
self.reduction == losses_utils.ReductionV2.AUTO or
self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE):
raise ValueError(
'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '
'used with `tf.distribute.Strategy` outside of the built-in training '
'loops. You can implement '
'`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '
'size like:\n```\nwith strategy.scope():\n'
' loss_obj = tf.keras.losses.CategoricalCrossentropy('
'reduction=tf.keras.losses.Reduction.NONE)\n....\n'
' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '
'(1. / global_batch_size)\n```\nPlease see '
'https://www.tensorflow.org/alpha/tutorials/distribute/training_loops'
' for more details.')
if self.reduction == losses_utils.ReductionV2.AUTO:
return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
return self.reduction
class LossFunctionWrapper(Loss):
"""Wraps a loss function in the `Loss` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.losses.MeanSquaredError')
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
`loss = square(y_true - y_pred)`
Usage:
```python
mse = tf.keras.losses.MeanSquaredError()
loss = mse([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.75
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_error'):
super(MeanSquaredError, self).__init__(
mean_squared_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsoluteError')
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
`loss = abs(y_true - y_pred)`
Usage:
```python
mae = tf.keras.losses.MeanAbsoluteError()
loss = mae([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.75
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Usage:
```python
mape = tf.keras.losses.MeanAbsolutePercentageError()
loss = mape([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 5e+08
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = square(log(y_true) - log(y_pred))`
Usage:
```python
msle = tf.keras.losses.MeanSquaredLogarithmicError()
loss = msle([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.36034
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_logarithmic_error'):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name=name, reduction=reduction)
@keras_export('keras.losses.BinaryCrossentropy')
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss when there are only two label classes (assumed to
be 0 and 1). For each example, there should be a single floating-point value
per prediction.
In the snippet below, each of the four examples has only a single
floating-pointing value, and both `y_pred` and `y_true` have the shape
`[batch_size]`.
Usage:
```python
bce = tf.keras.losses.BinaryCrossentropy()
loss = bce([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 11.522857
```
Usage with the `tf.keras` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy())
```
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume
that `y_pred` contains probabilities (i.e., values in [0, 1]).
Note: Using from_logits=True may be more numerically stable.
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we
compute the loss between the predicted labels and a smoothed version of
the true labels, where the smoothing squeezes the labels towards 0.5.
Larger values of `label_smoothing` correspond to heavier smoothing.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: (Optional) Name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
self.from_logits = from_logits
@keras_export('keras.losses.CategoricalCrossentropy')
class CategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided in a `one_hot` representation. If you want to
provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature.
In the snippet below, there is `# classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Usage:
```python
cce = tf.keras.losses.CategoricalCrossentropy()
loss = cce(
[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
[[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]])
print('Loss: ', loss.numpy()) # Loss: 0.0945
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy())
```
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
Note: Using from_logits=True may be more numerically stable.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.losses.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
```python
cce = tf.keras.losses.SparseCategoricalCrossentropy()
loss = cce(
tf.convert_to_tensor([0, 1, 2]),
tf.convert_to_tensor([[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]]))
print('Loss: ', loss.numpy()) # Loss: 0.3239
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
Note: Using from_logits=True may be more numerically stable.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name='sparse_categorical_crossentropy'):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
@keras_export('keras.losses.Hinge')
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = maximum(1 - y_true * y_pred, 0)`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
```python
h = tf.keras.losses.Hinge()
loss = h([-1., 1., 1.], [0.6, -0.7, -0.5])
# loss = max(0, 1 - y_true * y_pred) = [1.6 + 1.7 + 1.5] / 3
print('Loss: ', loss.numpy()) # Loss: 1.6
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Hinge())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):
super(Hinge, self).__init__(hinge, name=name, reduction=reduction)
@keras_export('keras.losses.SquaredHinge')
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
```python
sh = tf.keras.losses.SquaredHinge()
loss = sh([-1., 1., 1.], [0.6, -0.7, -0.5])
# loss = (max(0, 1 - y_true * y_pred))^2 = [1.6^2 + 1.7^2 + 1.5^2] / 3
print('Loss: ', loss.numpy()) # Loss: 2.566666
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SquaredHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
super(SquaredHinge, self).__init__(
squared_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.CategoricalHinge')
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg = sum(y_true * y_pred)` and `pos = maximum(1 - y_true)`
Usage:
```python
ch = tf.keras.losses.CategoricalHinge()
loss = ch([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 1.0
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_hinge'):
super(CategoricalHinge, self).__init__(
categorical_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.Poisson')
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` and `y_pred`.
`loss = y_pred - y_true * log(y_pred)`
Usage:
```python
p = tf.keras.losses.Poisson()
loss = p([1., 9., 2.], [4., 8., 12.])
print('Loss: ', loss.numpy()) # Loss: -0.35702705
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Poisson())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):
super(Poisson, self).__init__(poisson, name=name, reduction=reduction)
@keras_export('keras.losses.LogCosh')
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`,
where x is the error `y_pred - y_true`.
Usage:
```python
l = tf.keras.losses.LogCosh()
loss = l([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 0.289
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.LogCosh())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'):
super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction)
@keras_export('keras.losses.KLDivergence')
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
```python
k = tf.keras.losses.KLDivergence()
loss = k([.4, .9, .2], [.5, .8, .12])
print('Loss: ', loss.numpy()) # Loss: 0.11891246
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.KLDivergence())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='kullback_leibler_divergence'):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name=name, reduction=reduction)
@keras_export('keras.losses.Huber')
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` and `y_pred`.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Usage:
```python
l = tf.keras.losses.Huber()
loss = l([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 0.333
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Huber())
```
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
super(Huber, self).__init__(
huber_loss, name=name, reduction=reduction, delta=delta)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
def mean_squared_error(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_error',
'keras.metrics.mae',
'keras.metrics.MAE',
'keras.losses.mean_absolute_error',
'keras.losses.mae',
'keras.losses.MAE')
def mean_absolute_error(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape',
'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape',
'keras.losses.MAPE')
def mean_absolute_percentage_error(y_true, y_pred): # pylint: disable=missing-docstring
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
return 100. * K.mean(diff, axis=-1)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle',
'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle',
'keras.losses.MSLE')
def mean_squared_logarithmic_error(y_true, y_pred): # pylint: disable=missing-docstring
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary,
_convert_binary_labels, lambda: y_true)
return updated_y_true
@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
y_pred: The predicted values.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@keras_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
y_pred: The predicted values.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@keras_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
y_pred: The predicted values.
Returns:
A tensor.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
def huber_loss(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=K.floatx())
y_true = math_ops.cast(y_true, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
linear = math_ops.subtract(abs_error, quadratic)
return math_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear))
@keras_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Arguments:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.log(2.)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
"""Computes the categorical crossentropy loss.
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): # pylint: disable=missing-docstring
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
@keras_export('keras.metrics.kullback_leibler_divergence',
'keras.metrics.kld',
'keras.metrics.KLD',
'keras.losses.kullback_leibler_divergence',
'keras.losses.kld',
'keras.losses.KLD')
def kullback_leibler_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
```python
loss = tf.keras.losses.KLD([.4, .9, .2], [.5, .8, .12])
print('Loss: ', loss.numpy()) # Loss: 0.11891246
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with loss.
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@keras_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Usage:
```python
loss = tf.keras.losses.poisson([1.4, 9.3, 2.2], [4.3, 8.2, 12.2])
print('Loss: ', loss.numpy()) # Loss: -0.8045559
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with the mean Poisson loss.
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
# Retaining the legacy namespaces: 'cosine_proximity' and 'cosine'.
# TODO(psv): Change name of this function to `cosine_similarity` after fixing
# estimator test.
@keras_export(
'keras.losses.cosine_similarity',
v1=[
'keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine',
'keras.losses.cosine_similarity',
])
def cosine_proximity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions."""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return math_ops.reduce_sum(y_true * y_pred, axis=axis)
@keras_export('keras.losses.CosineSimilarity')
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between `y_true` and `y_pred`.
Usage:
```python
cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
loss = cosine_loss([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
# l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
# l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
# l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
# loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
= ((0. + 0.) + (0.5 + 0.5)) / 2
print('Loss: ', loss.numpy()) # Loss: 0.5
```
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
```
Args:
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='cosine_similarity'):
super(CosineSimilarity, self).__init__(
cosine_similarity, reduction=reduction, name=name, axis=axis)
# Aliases.
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
cosine_similarity = cosine_proximity
def is_categorical_crossentropy(loss):
result = ((isinstance(loss, CategoricalCrossentropy) or
(isinstance(loss, LossFunctionWrapper) and
loss.fn == categorical_crossentropy) or
(hasattr(loss, '__name__') and
loss.__name__ == 'categorical_crossentropy') or
(loss == 'categorical_crossentropy')))
return result
@keras_export('keras.losses.serialize')
def serialize(loss):
return serialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@keras_export('keras.losses.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
LABEL_DTYPES_FOR_LOSSES = {
losses_impl.sparse_softmax_cross_entropy: 'int32',
sparse_categorical_crossentropy: 'int32'
}
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/keras/losses.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.