Spaces:
Sleeping
Sleeping
| # Copyright 2023 The TensorFlow Authors. All Rights Reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """Tests for tf_example_decoder.py.""" | |
| # Import libraries | |
| from absl.testing import parameterized | |
| import numpy as np | |
| import tensorflow as tf, tf_keras | |
| from official.vision.dataloaders import tf_example_decoder | |
| from official.vision.dataloaders import tfexample_utils | |
| class TfExampleDecoderTest(tf.test.TestCase, parameterized.TestCase): | |
| def test_result_shape(self, image_height, image_width, num_instances, | |
| regenerate_source_id, fill_image_size): | |
| decoder = tf_example_decoder.TfExampleDecoder( | |
| include_mask=True, regenerate_source_id=regenerate_source_id) | |
| serialized_example = tfexample_utils.create_detection_test_example( | |
| image_height=image_height, | |
| image_width=image_width, | |
| image_channel=3, | |
| num_instances=num_instances, | |
| fill_image_size=fill_image_size, | |
| ).SerializeToString() | |
| decoded_tensors = decoder.decode( | |
| tf.convert_to_tensor(value=serialized_example)) | |
| results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors) | |
| self.assertAllEqual( | |
| (image_height, image_width, 3), results['image'].shape) | |
| if not regenerate_source_id: | |
| self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id']) | |
| self.assertEqual(image_height, results['height']) | |
| self.assertEqual(image_width, results['width']) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_classes'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_is_crowd'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_area'].shape) | |
| self.assertAllEqual( | |
| (num_instances, 4), results['groundtruth_boxes'].shape) | |
| self.assertAllEqual( | |
| (num_instances, image_height, image_width), | |
| results['groundtruth_instance_masks'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_instance_masks_png'].shape) | |
| def test_result_content(self): | |
| decoder = tf_example_decoder.TfExampleDecoder( | |
| include_mask=True, attribute_names=['attr1', 'attr2'] | |
| ) | |
| image_content = [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], | |
| [[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]], | |
| [[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]], | |
| [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]] | |
| image = tfexample_utils.encode_image(np.uint8(image_content), fmt='PNG') | |
| image_height = 4 | |
| image_width = 4 | |
| num_instances = 2 | |
| xmins = [0, 0.25] | |
| xmaxs = [0.5, 1.0] | |
| ymins = [0, 0] | |
| ymaxs = [0.5, 1.0] | |
| labels = [3, 1] | |
| attr1 = np.array([[0], [2]]) | |
| attr2 = np.array([[1], [3]]) | |
| areas = [ | |
| 0.25 * image_height * image_width, 0.75 * image_height * image_width | |
| ] | |
| is_crowds = [1, 0] | |
| mask_content = [[[255, 255, 0, 0], | |
| [255, 255, 0, 0], | |
| [0, 0, 0, 0], | |
| [0, 0, 0, 0]], | |
| [[0, 255, 255, 255], | |
| [0, 255, 255, 255], | |
| [0, 255, 255, 255], | |
| [0, 255, 255, 255]]] | |
| masks = [ | |
| tfexample_utils.encode_image(np.uint8(m), fmt='PNG') | |
| for m in list(mask_content) | |
| ] | |
| serialized_example = tf.train.Example( | |
| features=tf.train.Features( | |
| feature={ | |
| 'image/encoded': tf.train.Feature( | |
| bytes_list=tf.train.BytesList(value=[image]) | |
| ), | |
| 'image/source_id': tf.train.Feature( | |
| bytes_list=tf.train.BytesList( | |
| value=[tfexample_utils.DUMP_SOURCE_ID] | |
| ) | |
| ), | |
| 'image/height': tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=[image_height]) | |
| ), | |
| 'image/width': tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=[image_width]) | |
| ), | |
| 'image/object/bbox/xmin': tf.train.Feature( | |
| float_list=tf.train.FloatList(value=xmins) | |
| ), | |
| 'image/object/bbox/xmax': tf.train.Feature( | |
| float_list=tf.train.FloatList(value=xmaxs) | |
| ), | |
| 'image/object/bbox/ymin': tf.train.Feature( | |
| float_list=tf.train.FloatList(value=ymins) | |
| ), | |
| 'image/object/bbox/ymax': tf.train.Feature( | |
| float_list=tf.train.FloatList(value=ymaxs) | |
| ), | |
| 'image/object/class/label': tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=labels) | |
| ), | |
| 'image/object/is_crowd': tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=is_crowds) | |
| ), | |
| 'image/object/area': tf.train.Feature( | |
| float_list=tf.train.FloatList(value=areas) | |
| ), | |
| 'image/object/mask': tf.train.Feature( | |
| bytes_list=tf.train.BytesList(value=masks) | |
| ), | |
| 'image/object/attribute/attr1': tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=attr1.flatten()) | |
| ), | |
| 'image/object/attribute/attr2': tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=attr2.flatten()) | |
| ), | |
| } | |
| ) | |
| ).SerializeToString() | |
| decoded_tensors = decoder.decode( | |
| tf.convert_to_tensor(value=serialized_example)) | |
| results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors) | |
| self.assertAllEqual( | |
| (image_height, image_width, 3), results['image'].shape) | |
| self.assertAllEqual(image_content, results['image']) | |
| self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id']) | |
| self.assertEqual(image_height, results['height']) | |
| self.assertEqual(image_width, results['width']) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_classes'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_is_crowd'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_area'].shape) | |
| self.assertAllEqual( | |
| (num_instances, 4), results['groundtruth_boxes'].shape) | |
| self.assertAllEqual( | |
| (num_instances, image_height, image_width), | |
| results['groundtruth_instance_masks'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_instance_masks_png'].shape) | |
| self.assertAllEqual( | |
| [3, 1], results['groundtruth_classes']) | |
| np.testing.assert_array_equal( | |
| attr1, results['groundtruth_attributes']['attr1'] | |
| ) | |
| np.testing.assert_array_equal( | |
| attr2, results['groundtruth_attributes']['attr2'] | |
| ) | |
| self.assertAllEqual([True, False], results['groundtruth_is_crowd']) | |
| self.assertNDArrayNear( | |
| [0.25 * image_height * image_width, 0.75 * image_height * image_width], | |
| results['groundtruth_area'], 1e-4) | |
| self.assertNDArrayNear( | |
| [[0, 0, 0.5, 0.5], [0, 0.25, 1.0, 1.0]], | |
| results['groundtruth_boxes'], 1e-4) | |
| self.assertNDArrayNear( | |
| mask_content, results['groundtruth_instance_masks'], 1e-4) | |
| self.assertAllEqual( | |
| masks, results['groundtruth_instance_masks_png']) | |
| def test_handling_missing_fields(self): | |
| decoder = tf_example_decoder.TfExampleDecoder(include_mask=True) | |
| image_content = [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], | |
| [[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]], | |
| [[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]], | |
| [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]] | |
| image = tfexample_utils.encode_image(np.uint8(image_content), fmt='PNG') | |
| image_height = 4 | |
| image_width = 4 | |
| num_instances = 2 | |
| xmins = [0, 0.25] | |
| xmaxs = [0.5, 1.0] | |
| ymins = [0, 0] | |
| ymaxs = [0.5, 1.0] | |
| labels = [3, 1] | |
| mask_content = [[[255, 255, 0, 0], | |
| [255, 255, 0, 0], | |
| [0, 0, 0, 0], | |
| [0, 0, 0, 0]], | |
| [[0, 255, 255, 255], | |
| [0, 255, 255, 255], | |
| [0, 255, 255, 255], | |
| [0, 255, 255, 255]]] | |
| masks = [ | |
| tfexample_utils.encode_image(np.uint8(m), fmt='PNG') | |
| for m in list(mask_content) | |
| ] | |
| serialized_example = tf.train.Example( | |
| features=tf.train.Features( | |
| feature={ | |
| 'image/encoded': (tf.train.Feature( | |
| bytes_list=tf.train.BytesList(value=[image]))), | |
| 'image/source_id': (tf.train.Feature( | |
| bytes_list=tf.train.BytesList( | |
| value=[tfexample_utils.DUMP_SOURCE_ID]))), | |
| 'image/height': (tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=[image_height]))), | |
| 'image/width': (tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=[image_width]))), | |
| 'image/object/bbox/xmin': (tf.train.Feature( | |
| float_list=tf.train.FloatList(value=xmins))), | |
| 'image/object/bbox/xmax': (tf.train.Feature( | |
| float_list=tf.train.FloatList(value=xmaxs))), | |
| 'image/object/bbox/ymin': (tf.train.Feature( | |
| float_list=tf.train.FloatList(value=ymins))), | |
| 'image/object/bbox/ymax': (tf.train.Feature( | |
| float_list=tf.train.FloatList(value=ymaxs))), | |
| 'image/object/class/label': (tf.train.Feature( | |
| int64_list=tf.train.Int64List(value=labels))), | |
| 'image/object/mask': (tf.train.Feature( | |
| bytes_list=tf.train.BytesList(value=masks))), | |
| })).SerializeToString() | |
| decoded_tensors = decoder.decode( | |
| tf.convert_to_tensor(serialized_example)) | |
| results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors) | |
| self.assertAllEqual( | |
| (image_height, image_width, 3), results['image'].shape) | |
| self.assertAllEqual(image_content, results['image']) | |
| self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id']) | |
| self.assertEqual(image_height, results['height']) | |
| self.assertEqual(image_width, results['width']) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_classes'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_is_crowd'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_area'].shape) | |
| self.assertAllEqual( | |
| (num_instances, 4), results['groundtruth_boxes'].shape) | |
| self.assertAllEqual( | |
| (num_instances, image_height, image_width), | |
| results['groundtruth_instance_masks'].shape) | |
| self.assertAllEqual( | |
| (num_instances,), results['groundtruth_instance_masks_png'].shape) | |
| self.assertAllEqual( | |
| [3, 1], results['groundtruth_classes']) | |
| self.assertAllEqual( | |
| [False, False], results['groundtruth_is_crowd']) | |
| self.assertNDArrayNear( | |
| [0.25 * image_height * image_width, 0.75 * image_height * image_width], | |
| results['groundtruth_area'], 1e-4) | |
| self.assertNDArrayNear( | |
| [[0, 0, 0.5, 0.5], [0, 0.25, 1.0, 1.0]], | |
| results['groundtruth_boxes'], 1e-4) | |
| self.assertNDArrayNear( | |
| mask_content, results['groundtruth_instance_masks'], 1e-4) | |
| self.assertAllEqual( | |
| masks, results['groundtruth_instance_masks_png']) | |
| if __name__ == '__main__': | |
| tf.test.main() | |