repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_gan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/gan/gan.py
class GAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, validity)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
class TestGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_GAN(self):
keras_model = GAN().combined
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 3,542 | 29.282051 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_craft.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras, is_tf_keras
from keras.applications.vgg16 import VGG16
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
import tensorflow as tf
from keras.utils import conv_utils
from keras.engine import Layer, InputSpec
import keras.backend as K
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
concatenate = keras.layers.concatenate
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def _to_list(x):
if isinstance(x, list):
return x
return [x]
def _collect_input_shape(input_tensors):
input_tensors = _to_list(input_tensors)
shapes = []
for x in input_tensors:
try:
shapes.append(K.int_shape(x))
except Exception as e:
print(e)
shapes.append(None)
if len(shapes) == 1:
return shapes[0]
return shapes
def _permute_dimensions(x, pattern):
return tf.transpose(x, perm=pattern)
def _resie_image(x, target_layer, target_shape, data_format):
if data_format == 'channels_first':
new_shape = tf.shape(target_layer)[2:]
x = _permute_dimensions(x, [0, 2, 3, 1])
x = tf.image.resize_nearest_neighbor(x, new_shape)
x = _permute_dimensions(x, [0, 3, 1, 2])
x.set_shape((None, None, target_shape[2], target_shape[3]))
return x
elif data_format == 'channels_last':
new_shape = tf.shape(target_layer)[1:3]
x = tf.image.resize_nearest_neighbor(x, new_shape)
x.set_shape((None, target_shape[1], target_shape[2], None))
return x
else:
raise ValueError('Unknown data_format: ' + str(data_format))
class Interpolate(Layer):
def __init__(self, target_layer, data_format=None, **kwargs):
super(Interpolate, self).__init__(**kwargs)
self.target_layer = target_layer
self.target_shape = _collect_input_shape(target_layer)
# self.data_format = conv_utils.normalize_data_format(data_format)
self.data_format = K.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.target_shape[2]
width = self.target_shape[3]
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.target_shape[1]
width = self.target_shape[2]
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs, **kwargs):
return _resie_image(inputs, self.target_layer, self.target_shape, self.data_format)
def up_conv(input_tensor, filters):
x = Conv2D(filters[0], kernel_size=1)(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[1], kernel_size=3, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def conv_cls(input_tensor, num_class):
x = Conv2D(32, kernel_size=3, padding='same', activation='relu')(input_tensor)
x = Conv2D(32, kernel_size=3, padding='same', activation='relu')(x)
x = Conv2D(16, kernel_size=3, padding='same', activation='relu')(x)
x = Conv2D(16, kernel_size=1, padding='same', activation='relu')(x)
x = Conv2D(16, kernel_size=num_class, padding='same', activation='sigmoid')(x)
return x
def VGG16_UNet(weights=None, input_tensor=None, pooling=None):
vgg16 = VGG16(include_top=False, weights=weights, input_tensor=input_tensor, pooling=pooling)
source = vgg16.get_layer('block5_conv3').output
x = MaxPooling2D(3, strides=1, padding='same', name='block5_pool')(source)
x = Conv2D(1024, kernel_size=3, padding='same', dilation_rate=6)(x)
x = Conv2D(1024, kernel_size=1)(x)
x = Interpolate(target_layer=source, name='resize_1')(x)
x = concatenate([x, source])
x = up_conv(x, [512, 256])
source = vgg16.get_layer('block4_conv3').output
x = Interpolate(target_layer=source, name='resize_2')(x)
x = concatenate([x, source])
x = up_conv(x, [256, 128])
source = vgg16.get_layer('block3_conv3').output
x = Interpolate(target_layer=source, name='resize_3')(x)
x = concatenate([x, source])
x = up_conv(x, [128, 64])
source = vgg16.get_layer('block2_conv2').output
x = Interpolate(target_layer=source, name='resize_4')(x)
x = concatenate([x, source])
feature = up_conv(x, [64, 32])
x = conv_cls(feature, 2)
region_score = Lambda(lambda layer: layer[:, :, :, 0])(x)
affinity_score = Lambda(lambda layer: layer[:, :, :, 1])(x)
return region_score, affinity_score
# From https://github.com/RubanSeven/CRAFT_keras/blob/master/net/vgg16.py
class TestCRAFT(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(get_maximum_opset_supported() < 10,
"Need Upsample 10+ support.")
def test_CRAFT(self):
# input_image = Input(shape=(None, None, 3)) -- Need fixed input shape
input_image = Input(shape=(512, 512, 3))
region, affinity = VGG16_UNet(input_tensor=input_image, weights=None)
keras_model = Model(input_image, [region, affinity], name='vgg16_unet')
x = np.random.rand(1, 512, 512, 3).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 6,550 | 33.298429 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_crnn.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
Activation = keras.layers.Activation
add = keras.layers.add
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Convolution2D = keras.layers.Convolution2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Flatten = keras.layers.Flatten
GRU = keras.layers.GRU
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
LSTM = keras.layers.LSTM
MaxPooling2D = keras.layers.MaxPooling2D
Multiply = keras.layers.Multiply
Reshape = keras.layers.Reshape
Sequential = keras.models.Sequential
Model = keras.models.Model
K = keras.backend
# Model from https://github.com/qjadud1994/CRNN-Keras
class TestCRNN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(get_maximum_opset_supported() < 10,
"CRNN conversion need opset >= 10.")
def test_CRNN_LSTM(self):
img_w = 128
img_h = 64
input_shape = (img_w, img_h, 1) # (128, 64, 1)
num_classes = 80
# Make Networkw
inputs = Input(name='the_input', shape=input_shape, dtype='float32') # (None, 128, 64, 1)
# Convolution layer (VGG)
inner = Conv2D(64, (3, 3), padding='same', name='conv1', kernel_initializer='he_normal')(
inputs) # (None, 128, 64, 64)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max1')(inner) # (None,64, 32, 64)
inner = Conv2D(128, (3, 3), padding='same', name='conv2', kernel_initializer='he_normal')(
inner) # (None, 64, 32, 128)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max2')(inner) # (None, 32, 16, 128)
inner = Conv2D(256, (3, 3), padding='same', name='conv3', kernel_initializer='he_normal')(
inner) # (None, 32, 16, 256)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(256, (3, 3), padding='same', name='conv4', kernel_initializer='he_normal')(
inner) # (None, 32, 16, 256)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(1, 2), name='max3')(inner) # (None, 32, 8, 256)
inner = Conv2D(512, (3, 3), padding='same', name='conv5', kernel_initializer='he_normal')(
inner) # (None, 32, 8, 512)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(512, (3, 3), padding='same', name='conv6')(inner) # (None, 32, 8, 512)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(1, 2), name='max4')(inner) # (None, 32, 4, 512)
inner = Conv2D(512, (2, 2), padding='same', kernel_initializer='he_normal', name='con7')(
inner) # (None, 32, 4, 512)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
# CNN to RNN
inner = Reshape(target_shape=((32, 2048)), name='reshape')(inner) # (None, 32, 2048)
inner = Dense(64, activation='relu', kernel_initializer='he_normal', name='dense1')(inner) # (None, 32, 64)
# RNN layer
lstm_1 = LSTM(256, return_sequences=True, kernel_initializer='he_normal', name='lstm1')(
inner) # (None, 32, 512)
lstm_1b = LSTM(256, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='lstm1_b')(
inner)
reversed_lstm_1b = Lambda(lambda inputTensor: K.reverse(inputTensor, axes=1))(lstm_1b)
lstm1_merged = add([lstm_1, reversed_lstm_1b]) # (None, 32, 512)
lstm1_merged = BatchNormalization()(lstm1_merged)
lstm_2 = LSTM(256, return_sequences=True, kernel_initializer='he_normal', name='lstm2')(lstm1_merged)
lstm_2b = LSTM(256, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='lstm2_b')(
lstm1_merged)
reversed_lstm_2b = Lambda(lambda inputTensor: K.reverse(inputTensor, axes=1))(lstm_2b)
lstm2_merged = concatenate([lstm_2, reversed_lstm_2b]) # (None, 32, 1024)
lstm2_merged = BatchNormalization()(lstm2_merged)
# transforms RNN output to character activations:
inner = Dense(num_classes, kernel_initializer='he_normal', name='dense2')(lstm2_merged) # (None, 32, 63)
y_pred = Activation('softmax', name='softmax')(inner)
model = Model(inputs=[inputs], outputs=y_pred)
data = np.random.rand(1, 128, 64, 1).astype(np.float32)
expected = model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
@unittest.skipIf(get_maximum_opset_supported() < 10,
"CRNN conversion need opset >= 10.")
def test_CRNN_GRU(self):
img_w = 128
img_h = 64
num_classes = 80
input_shape = (img_w, img_h, 1) # (128, 64, 1)
# Make Networkw
inputs = Input(name='the_input', shape=input_shape, dtype='float32') # (None, 128, 64, 1)
# Convolution layer (VGG)
inner = Conv2D(64, (3, 3), padding='same', name='conv1', kernel_initializer='he_normal')(
inputs) # (None, 128, 64, 64)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max1')(inner) # (None,64, 32, 64)
inner = Conv2D(128, (3, 3), padding='same', name='conv2', kernel_initializer='he_normal')(
inner) # (None, 64, 32, 128)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max2')(inner) # (None, 32, 16, 128)
inner = Conv2D(256, (3, 3), padding='same', name='conv3', kernel_initializer='he_normal')(
inner) # (None, 32, 16, 256)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(256, (3, 3), padding='same', name='conv4', kernel_initializer='he_normal')(
inner) # (None, 32, 16, 256)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(1, 2), name='max3')(inner) # (None, 32, 8, 256)
inner = Conv2D(512, (3, 3), padding='same', name='conv5', kernel_initializer='he_normal')(
inner) # (None, 32, 8, 512)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = Conv2D(512, (3, 3), padding='same', name='conv6')(inner) # (None, 32, 8, 512)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
inner = MaxPooling2D(pool_size=(1, 2), name='max4')(inner) # (None, 32, 4, 512)
inner = Conv2D(512, (2, 2), padding='same', kernel_initializer='he_normal', name='con7')(
inner) # (None, 32, 4, 512)
inner = BatchNormalization()(inner)
inner = Activation('relu')(inner)
# CNN to RNN
inner = Reshape(target_shape=((32, 2048)), name='reshape')(inner) # (None, 32, 2048)
inner = Dense(64, activation='relu', kernel_initializer='he_normal', name='dense1')(inner) # (None, 32, 64)
# RNN layer
gru_1 = GRU(256, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner) # (None, 32, 512)
gru_1b = GRU(256, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(
inner)
reversed_gru_1b = Lambda(lambda inputTensor: K.reverse(inputTensor, axes=1))(gru_1b)
gru1_merged = add([gru_1, reversed_gru_1b]) # (None, 32, 512)
gru1_merged = BatchNormalization()(gru1_merged)
gru_2 = GRU(256, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(256, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(
gru1_merged)
reversed_gru_2b = Lambda(lambda inputTensor: K.reverse(inputTensor, axes=1))(gru_2b)
gru2_merged = concatenate([gru_2, reversed_gru_2b]) # (None, 32, 1024)
gru2_merged = BatchNormalization()(gru2_merged)
# transforms RNN output to character activations:
inner = Dense(num_classes, kernel_initializer='he_normal', name='dense2')(gru2_merged) # (None, 32, 63)
y_pred = Activation('softmax', name='softmax')(inner)
model = Model(inputs=[inputs], outputs=y_pred)
data = np.random.rand(1, 128, 64, 1).astype(np.float32)
expected = model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 9,647 | 44.084112 | 118 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_bigan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras, is_tf_keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/bigan/bigan.py
class BIGAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# Build the encoder
self.encoder = self.build_encoder()
# The part of the bigan that trains the discriminator and encoder
self.discriminator.trainable = False
# Generate image from sampled noise
z = Input(shape=(self.latent_dim, ))
img_ = self.generator(z)
# Encode image
img = Input(shape=self.img_shape)
z_ = self.encoder(img)
# Latent -> img is fake, and img -> latent is valid
fake = self.discriminator([z, img_])
valid = self.discriminator([z_, img])
# Set up and compile the combined model
# Trains generator to fool the discriminator
self.bigan_generator = Model([z, img], [fake, valid])
def build_encoder(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.latent_dim))
img = Input(shape=self.img_shape)
z = model(img)
return Model(img, z)
def build_generator(self):
model = Sequential()
model.add(Dense(512, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
z = Input(shape=(self.latent_dim,))
gen_img = model(z)
return Model(z, gen_img)
def build_discriminator(self):
z = Input(shape=(self.latent_dim, ))
img = Input(shape=self.img_shape)
d_in = concatenate([z, Flatten()(img)])
model = Dense(1024)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation="sigmoid")(model)
return Model([z, img], validity)
class TestBIGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_BIGAN(self):
keras_model = BIGAN().bigan_generator
batch = 5
x = np.random.rand(batch, 100).astype(np.float32)
y = np.random.rand(batch, 28, 28, 1).astype(np.float32)
expected = keras_model.predict([x, y])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,507 | 30.305556 | 168 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_deep_speech.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
layers = keras.layers
# Model from https://github.com/rolczynski/Automatic-Speech-Recognition
class TestDeepSpeech(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(get_maximum_opset_supported() < 11,
"Deep speech conversion need opset >= 11.")
def test_deep_speech(self):
K.clear_session()
input_dim = 20
output_dim = 10
context = 7
units = 1024
dropouts = (0.1, 0.1, 0)
# Define input tensor [batch, time, features]
input_tensor = layers.Input([None, input_dim], name='X')
# Add 4th dimension [batch, time, frequency, channel]
x = layers.Lambda(keras.backend.expand_dims,
arguments=dict(axis=-1))(input_tensor)
# Fill zeros around time dimension
x = layers.ZeroPadding2D(padding=(context, 0))(x)
# Convolve signal in time dim
receptive_field = (2 * context + 1, input_dim)
x = layers.Conv2D(filters=units, kernel_size=receptive_field)(x)
# Squeeze into 3rd dim array
x = layers.Lambda(keras.backend.squeeze, arguments=dict(axis=2))(x)
# Add non-linearity
x = layers.ReLU(max_value=20)(x)
# Use dropout as regularization
x = layers.Dropout(rate=dropouts[0])(x)
# 2nd and 3rd FC layers do a feature extraction base on a narrow
# context of convolutional layer
x = layers.TimeDistributed(layers.Dense(units))(x)
x = layers.ReLU(max_value=20)(x)
x = layers.Dropout(rate=dropouts[1])(x)
x = layers.TimeDistributed(layers.Dense(units))(x)
x = layers.ReLU(max_value=20)(x)
x = layers.Dropout(rate=dropouts[2])(x)
# Use recurrent layer to have a broader context
x = layers.Bidirectional(layers.LSTM(units, return_sequences=True),
merge_mode='sum')(x)
# Return at each time step logits along characters. Then CTC
# computation is more stable, in contrast to the softmax.
output_tensor = layers.TimeDistributed(layers.Dense(output_dim))(x)
model = keras.Model(input_tensor, output_tensor, name='DeepSpeech')
data = np.random.rand(2, 3, input_dim).astype(np.float32)
expected = model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
@unittest.skipIf(get_maximum_opset_supported() < 11,
"Deep speech conversion need opset >= 11.")
def test_deep_speech_2(self):
K.clear_session()
input_dim = 20
output_dim = 10
rnn_units = 800
# Define input tensor [batch, time, features]
input_tensor = layers.Input([None, input_dim], name='X')
# Add 4th dimension [batch, time, frequency, channel]
x = layers.Lambda(keras.backend.expand_dims,
arguments=dict(axis=-1))(input_tensor)
x = layers.Conv2D(filters=32,
kernel_size=[11, 41],
strides=[2, 2],
padding='same',
use_bias=False,
name='conv_1')(x)
x = layers.BatchNormalization(name='conv_1_bn')(x)
x = layers.ReLU(name='conv_1_relu')(x)
x = layers.Conv2D(filters=32,
kernel_size=[11, 21],
strides=[1, 2],
padding='same',
use_bias=False,
name='conv_2')(x)
x = layers.BatchNormalization(name='conv_2_bn')(x)
x = layers.ReLU(name='conv_2_relu')(x)
# We need to squeeze to 3D tensor. Thanks to the stride in frequency
# domain, we reduce the number of features four times for each channel.
x = layers.Reshape([-1, input_dim//4*32])(x)
for i in [1, 2, 3, 4, 5]:
recurrent = layers.GRU(units=rnn_units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
return_sequences=True,
reset_after=True,
name='gru_'+str(i))
x = layers.Bidirectional(recurrent,
name='bidirectional'+str(i),
merge_mode='concat')(x)
x = layers.Dropout(rate=0.5)(x) if i < 5 else x # Only between
# Return at each time step logits along characters. Then CTC
# computation is more stable, in contrast to the softmax.
x = layers.TimeDistributed(layers.Dense(units=rnn_units*2), name='dense_1')(x)
x = layers.ReLU(name='dense_1_relu')(x)
x = layers.Dropout(rate=0.5)(x)
output_tensor = layers.TimeDistributed(layers.Dense(units=output_dim),
name='dense_2')(x)
model = keras.Model(input_tensor, output_tensor, name='DeepSpeech2')
data = np.random.rand(2, 3, input_dim).astype(np.float32)
expected = model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 6,881 | 39.964286 | 106 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_pix2pix.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/pix2pix/pix2pix.py
class Pix2Pix():
def __init__(self):
# Input shape
self.img_rows = 256
self.img_cols = 256
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
#-------------------------
# Construct Computational
# Graph of Generator
#-------------------------
# Build the generator
self.generator = self.build_generator()
# Input images and their conditioning images
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# By conditioning on B generate a fake version of A
fake_A = self.generator(img_B)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# Discriminators determines validity of translated images / condition pairs
valid = self.discriminator([fake_A, img_B])
self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(d0, self.gf, bn=False)
d2 = conv2d(d1, self.gf*2)
d3 = conv2d(d2, self.gf*4)
d4 = conv2d(d3, self.gf*8)
d5 = conv2d(d4, self.gf*8)
d6 = conv2d(d5, self.gf*8)
d7 = conv2d(d6, self.gf*8)
# Upsampling
u1 = deconv2d(d7, d6, self.gf*8)
u2 = deconv2d(u1, d5, self.gf*8)
u3 = deconv2d(u2, d4, self.gf*8)
u4 = deconv2d(u3, d3, self.gf*4)
u5 = deconv2d(u4, d2, self.gf*2)
u6 = deconv2d(u5, d1, self.gf)
u7 = UpSampling2D(size=2)(u6)
output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u7)
return Model(d0, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, bn=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Concatenate image and conditioning image by channels to produce input
combined_imgs = Concatenate(axis=-1)([img_A, img_B])
d1 = d_layer(combined_imgs, self.df, bn=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model([img_A, img_B], validity)
class TestPix2Pix(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_Pix2Pix(self):
keras_model = Pix2Pix().combined
batch = 5
x = np.random.rand(batch, 256, 256, 3).astype(np.float32)
y = np.random.rand(batch, 256, 256, 3).astype(np.float32)
expected = keras_model.predict([x, y])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 5,618 | 32.446429 | 168 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_unet.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import keras_segmentation
import onnxruntime
from os.path import dirname, abspath
from mock_keras2onnx.proto import keras, is_keras_older_than
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from packaging.version import Version
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
Input = keras.layers.Input
Concatenate = keras.layers.Concatenate
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Conv2DTranspose = keras.layers.Conv2DTranspose
Dropout = keras.layers.Dropout
MaxPooling2D = keras.layers.MaxPooling2D
UpSampling2D = keras.layers.UpSampling2D
Model = keras.models.Model
def get_unet_model(input_channel_num=3, out_ch=3, start_ch=64, depth=4, inc_rate=2., activation='relu',
dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False):
def _conv_block(m, dim, acti, bn, res, do=0):
n = Conv2D(dim, 3, activation=acti, padding='same')(m)
n = BatchNormalization()(n) if bn else n
n = Dropout(do)(n) if do else n
n = Conv2D(dim, 3, activation=acti, padding='same')(n)
n = BatchNormalization()(n) if bn else n
return Concatenate()([m, n]) if res else n
def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
if depth > 0:
#n = _conv_block(m, dim, acti, bn, res)
n = m
m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n)
m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res)
if up:
m = UpSampling2D()(m)
m = Conv2D(dim, 2, activation=acti, padding='same')(m)
else:
m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)
n = Concatenate()([n, m])
m = _conv_block(n, dim, acti, bn, res)
else:
m = _conv_block(m, dim, acti, bn, res, do)
return m
i = Input(shape=(None, None, input_channel_num))
o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
o = Conv2D(out_ch, 1)(o)
model = Model(inputs=i, outputs=o)
return model
class TestUnet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_unet_1(self):
# From https://github.com/divamgupta/image-segmentation-keras/models/unet.py
model = keras_segmentation.models.unet.unet(101)
res = run_image(model, self.model_files, img_path, target_size=(416, 608))
self.assertTrue(*res)
@unittest.skipIf(is_keras_older_than("2.2.3"),
"Cannot import normalize_data_format from keras.backend")
def test_unet_2(self):
# From https://github.com/jocicmarko/ultrasound-nerve-segmentation
img_rows = 96
img_cols = 96
inputs = Input((img_rows, img_cols, 1))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
res = run_image(model, self.model_files, img_path, color_mode="grayscale", target_size=(img_rows, img_cols))
self.assertTrue(*res)
@unittest.skipIf(Version(onnxruntime.__version__.split('-')[0]) < Version('1.7.0'),
"ConvTranspose stride > 1 is fixed in onnxruntime 1.7.0.")
def test_unet_3(self):
# From https://github.com/yu4u/noise2noise/blob/master/model.py
model = get_unet_model(out_ch=3, upconv=False)
res = run_image(model, self.model_files, img_path, atol=5.e-4, target_size=(256, 256, 3))
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 6,020 | 42.316547 | 116 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_dcgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/dcgan/dcgan.py
class DCGAN():
def __init__(self):
# Input shape
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, valid)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
class TestDCGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_DCGAN(self):
keras_model = DCGAN().combined
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,335 | 32.353846 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_music_generation.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
import tensorflow as tf
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
Conv1D = keras.layers.Conv1D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
LSTM = keras.layers.LSTM
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
RepeatVector = keras.layers.RepeatVector
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
TimeDistributed = keras.layers.TimeDistributed
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# Define the musical styles
genre = [
'baroque',
'classical',
'romantic'
]
styles = [
[
'data/baroque/bach',
'data/baroque/handel',
'data/baroque/pachelbel'
],
[
'data/classical/burgmueller',
'data/classical/clementi',
'data/classical/haydn',
'data/classical/beethoven',
'data/classical/brahms',
'data/classical/mozart'
],
[
'data/romantic/balakirew',
'data/romantic/borodin',
'data/romantic/brahms',
'data/romantic/chopin',
'data/romantic/debussy',
'data/romantic/liszt',
'data/romantic/mendelssohn',
'data/romantic/moszkowski',
'data/romantic/mussorgsky',
'data/romantic/rachmaninov',
'data/romantic/schubert',
'data/romantic/schumann',
'data/romantic/tchaikovsky',
'data/romantic/tschai'
]
]
NUM_STYLES = sum(len(s) for s in styles)
# MIDI Resolution
DEFAULT_RES = 96
MIDI_MAX_NOTES = 128
MAX_VELOCITY = 127
# Number of octaves supported
NUM_OCTAVES = 4
OCTAVE = 12
# Min and max note (in MIDI note number)
MIN_NOTE = 36
MAX_NOTE = MIN_NOTE + NUM_OCTAVES * OCTAVE
NUM_NOTES = MAX_NOTE - MIN_NOTE
# Number of beats in a bar
BEATS_PER_BAR = 4
# Notes per quarter note
NOTES_PER_BEAT = 4
# The quickest note is a half-note
NOTES_PER_BAR = NOTES_PER_BEAT * BEATS_PER_BAR
# Training parameters
BATCH_SIZE = 16
SEQ_LEN = 8 * NOTES_PER_BAR
# Hyper Parameters
OCTAVE_UNITS = 64
STYLE_UNITS = 64
NOTE_UNITS = 3
TIME_AXIS_UNITS = 256
NOTE_AXIS_UNITS = 128
TIME_AXIS_LAYERS = 2
NOTE_AXIS_LAYERS = 2
def one_hot(i, nb_classes):
arr = np.zeros((nb_classes,))
arr[i] = 1
return arr
def pitch_pos_in_f(time_steps):
"""
Returns a constant containing pitch position of each note
"""
def f(x):
note_ranges = tf.range(NUM_NOTES, dtype='float32') / NUM_NOTES
repeated_ranges = tf.tile(note_ranges, [tf.shape(x)[0] * time_steps])
return tf.reshape(repeated_ranges, [tf.shape(x)[0], time_steps, NUM_NOTES, 1])
return f
def pitch_class_in_f(time_steps):
"""
Returns a constant containing pitch class of each note
"""
def f(x):
pitch_class_matrix = np.array([one_hot(n % OCTAVE, OCTAVE) for n in range(NUM_NOTES)])
pitch_class_matrix = tf.constant(pitch_class_matrix, dtype='float32')
pitch_class_matrix = tf.reshape(pitch_class_matrix, [1, 1, NUM_NOTES, OCTAVE])
return tf.tile(pitch_class_matrix, [tf.shape(x)[0], time_steps, 1, 1])
return f
def pitch_bins_f(time_steps):
def f(x):
bins = tf.reduce_sum([x[:, :, i::OCTAVE, 0] for i in range(OCTAVE)], axis=3)
bins = tf.tile(bins, [NUM_OCTAVES, 1, 1])
bins = tf.reshape(bins, [tf.shape(x)[0], time_steps, NUM_NOTES, 1])
return bins
return f
def time_axis(dropout):
def f(notes, beat, style):
time_steps = int(notes.get_shape()[1])
# TODO: Experiment with when to apply conv
note_octave = TimeDistributed(Conv1D(OCTAVE_UNITS, 2 * OCTAVE, padding='same'))(notes)
note_octave = Activation('tanh')(note_octave)
note_octave = Dropout(dropout)(note_octave)
# Create features for every single note.
note_features = Concatenate()([
Lambda(pitch_pos_in_f(time_steps))(notes),
Lambda(pitch_class_in_f(time_steps))(notes),
Lambda(pitch_bins_f(time_steps))(notes),
note_octave,
TimeDistributed(RepeatVector(NUM_NOTES))(beat)
])
x = note_features
# [batch, notes, time, features]
x = Permute((2, 1, 3))(x)
# Apply LSTMs
for l in range(TIME_AXIS_LAYERS):
# Integrate style
style_proj = Dense(int(x.get_shape()[3]))(style)
style_proj = TimeDistributed(RepeatVector(NUM_NOTES))(style_proj)
style_proj = Activation('tanh')(style_proj)
style_proj = Dropout(dropout)(style_proj)
style_proj = Permute((2, 1, 3))(style_proj)
x = Add()([x, style_proj])
x = TimeDistributed(LSTM(TIME_AXIS_UNITS, return_sequences=True))(x)
x = Dropout(dropout)(x)
# [batch, time, notes, features]
return Permute((2, 1, 3))(x)
return f
def note_axis(dropout):
dense_layer_cache = {}
lstm_layer_cache = {}
note_dense = Dense(2, activation='sigmoid', name='note_dense')
volume_dense = Dense(1, name='volume_dense')
def f(x, chosen, style):
time_steps = int(x.get_shape()[1])
# Shift target one note to the left.
shift_chosen = Lambda(lambda x: tf.pad(x[:, :, :-1, :], [[0, 0], [0, 0], [1, 0], [0, 0]]))(chosen)
# [batch, time, notes, 1]
shift_chosen = Reshape((time_steps, NUM_NOTES, -1))(shift_chosen)
# [batch, time, notes, features + 1]
x = Concatenate(axis=3)([x, shift_chosen])
for l in range(NOTE_AXIS_LAYERS):
# Integrate style
'''
if l not in dense_layer_cache:
dense_layer_cache[l] = Dense(int(x.get_shape()[3]))
'''
dense_layer_cache[0] = Dense(259)
dense_layer_cache[1] = Dense(128)
style_proj = dense_layer_cache[l](style)
style_proj = TimeDistributed(RepeatVector(NUM_NOTES))(style_proj)
style_proj = Activation('tanh')(style_proj)
style_proj = Dropout(dropout)(style_proj)
x = Add()([x, style_proj])
if l not in lstm_layer_cache:
lstm_layer_cache[l] = LSTM(NOTE_AXIS_UNITS, return_sequences=True)
x = TimeDistributed(lstm_layer_cache[l])(x)
x = Dropout(dropout)(x)
return Concatenate()([note_dense(x), volume_dense(x)])
return f
def build_models(time_steps=SEQ_LEN, input_dropout=0.2, dropout=0.5):
notes_in = Input((time_steps, NUM_NOTES, NOTE_UNITS))
beat_in = Input((time_steps, NOTES_PER_BAR))
style_in = Input((time_steps, NUM_STYLES))
# Target input for conditioning
chosen_in = Input((time_steps, NUM_NOTES, NOTE_UNITS))
# Dropout inputs
notes = Dropout(input_dropout)(notes_in)
beat = Dropout(input_dropout)(beat_in)
chosen = Dropout(input_dropout)(chosen_in)
# Distributed representations
style_l = Dense(STYLE_UNITS, name='style')
style = style_l(style_in)
""" Time axis """
time_out = time_axis(dropout)(notes, beat, style)
""" Note Axis & Prediction Layer """
naxis = note_axis(dropout)
notes_out = naxis(time_out, chosen, style)
model = Model([notes_in, chosen_in, beat_in, style_in], [notes_out])
""" Generation Models """
time_model = Model([notes_in, beat_in, style_in], [time_out])
note_features = Input((1, NUM_NOTES, TIME_AXIS_UNITS), name='note_features')
chosen_gen_in = Input((1, NUM_NOTES, NOTE_UNITS), name='chosen_gen_in')
style_gen_in = Input((1, NUM_STYLES), name='style_in')
# Dropout inputs
chosen_gen = Dropout(input_dropout)(chosen_gen_in)
style_gen = style_l(style_gen_in)
note_gen_out = naxis(note_features, chosen_gen, style_gen)
note_model = Model([note_features, chosen_gen_in, style_gen_in], note_gen_out)
return model, time_model, note_model
# Model from https://github.com/calclavia/DeepJ
class TestMusicGeneration(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(get_maximum_opset_supported() < 10,
"ScatterNd support need opset >= 10.")
def test_music_generation(self):
K.clear_session()
model, time_model, note_model = build_models()
batch_size = 2
data_notes = np.random.rand(batch_size, SEQ_LEN, NUM_NOTES, NOTE_UNITS).astype(np.float32)
data_beat = np.random.rand(batch_size, SEQ_LEN, NOTES_PER_BAR).astype(np.float32)
data_style = np.random.rand(batch_size, SEQ_LEN, NUM_STYLES).astype(np.float32)
data_chosen = np.random.rand(batch_size, SEQ_LEN, NUM_NOTES, NOTE_UNITS).astype(np.float32)
expected = model.predict([data_notes, data_chosen, data_beat, data_style])
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model,
{model.input_names[0]: data_notes,
model.input_names[1]: data_chosen,
model.input_names[2]: data_beat,
model.input_names[3]: data_style}, expected, self.model_files))
expected = time_model.predict([data_notes, data_beat, data_style])
onnx_model = mock_keras2onnx.convert_keras(time_model, time_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, time_model,
{time_model.input_names[0]: data_notes,
time_model.input_names[1]: data_beat,
time_model.input_names[2]: data_style}, expected, self.model_files))
data_notes = np.random.rand(batch_size, 1, NUM_NOTES, TIME_AXIS_UNITS).astype(np.float32)
data_chosen = np.random.rand(batch_size, 1, NUM_NOTES, NOTE_UNITS).astype(np.float32)
data_style = np.random.rand(batch_size, 1, NUM_STYLES).astype(np.float32)
expected = note_model.predict([data_notes, data_chosen, data_style])
onnx_model = mock_keras2onnx.convert_keras(note_model, note_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, note_model,
{note_model.input_names[0]: data_notes,
note_model.input_names[1]: data_chosen,
note_model.input_names[2]: data_style}, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 11,436 | 33.448795 | 106 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_deep_speaker.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
from keras import layers
from keras import regularizers
NUM_FRAMES = 160
NUM_FBANKS = 64
class DeepSpeakerModel:
def __init__(self, batch_input_shape=(None, NUM_FRAMES, NUM_FBANKS, 1), include_softmax=False,
num_speakers_softmax=None):
self.include_softmax = include_softmax
if self.include_softmax:
assert num_speakers_softmax > 0
self.clipped_relu_count = 0
inputs = Input(batch_shape=batch_input_shape, name='input')
x = self.cnn_component(inputs)
x = Reshape((-1, 2048))(x)
# Temporal average layer. axis=1 is time.
x = Lambda(lambda y: K.mean(y, axis=1), name='average')(x)
if include_softmax:
# used for softmax because the dataset we pre-train on might be too small. easy to overfit.
x = Dropout(0.5)(x)
x = Dense(512, name='affine')(x)
if include_softmax:
# Those weights are just when we train on softmax.
x = Dense(num_speakers_softmax, activation='softmax')(x)
else:
# Does not contain any weights.
x = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x)
self.m = Model(inputs, x, name='ResCNN')
def keras_model(self):
return self.m
def get_weights(self):
w = self.m.get_weights()
if self.include_softmax:
w.pop() # last 2 are the W_softmax and b_softmax.
w.pop()
return w
def clipped_relu(self, inputs):
relu = Lambda(lambda y: K.minimum(K.maximum(y, 0), 20), name='clipped_relu_'+str(self.clipped_relu_count))(inputs)
self.clipped_relu_count += 1
return relu
def identity_block(self, input_tensor, kernel_size, filters, stage, block):
conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
x = Conv2D(filters,
kernel_size=kernel_size,
strides=1,
activation=None,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001),
name=conv_name_base + '_2a')(input_tensor)
x = BatchNormalization(name=conv_name_base + '_2a_bn')(x)
x = self.clipped_relu(x)
x = Conv2D(filters,
kernel_size=kernel_size,
strides=1,
activation=None,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001),
name=conv_name_base + '_2b')(x)
x = BatchNormalization(name=conv_name_base + '_2b_bn')(x)
x = self.clipped_relu(x)
x = layers.add([x, input_tensor])
x = self.clipped_relu(x)
return x
def conv_and_res_block(self, inp, filters, stage):
conv_name = 'conv{}-s'.format(filters)
o = Conv2D(filters,
kernel_size=5,
strides=2,
activation=None,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001), name=conv_name)(inp)
o = BatchNormalization(name=conv_name + '_bn')(o)
o = self.clipped_relu(o)
for i in range(3):
o = self.identity_block(o, kernel_size=3, filters=filters, stage=stage, block=i)
return o
def cnn_component(self, inp):
x = self.conv_and_res_block(inp, 64, stage=1)
x = self.conv_and_res_block(x, 128, stage=2)
x = self.conv_and_res_block(x, 256, stage=3)
x = self.conv_and_res_block(x, 512, stage=4)
return x
# Model from https://github.com/philipperemy/deep-speaker
class TestDeepSpeaker(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DeepSpeaker(self):
K.clear_session()
keras_model = DeepSpeakerModel(batch_input_shape=(None, 32, 64, 4), include_softmax=True, num_speakers_softmax=10).keras_model()
data = np.random.rand(2, 32, 64, 4).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 5,827 | 34.754601 | 136 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_segnet_2.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
import tensorflow as tf
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Convolution2D = keras.layers.Convolution2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
class MaxPoolingWithArgmax2D(keras.layers.Layer):
def __init__(self, pool_size=(2, 2), strides=(2, 2), padding="same", **kwargs):
super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
self.padding = padding
self.pool_size = pool_size
self.strides = strides
def call(self, inputs, **kwargs):
padding = self.padding
pool_size = self.pool_size
strides = self.strides
if K.backend() == "tensorflow":
ksize = [1, pool_size[0], pool_size[1], 1]
padding = padding.upper()
strides = [1, strides[0], strides[1], 1]
output, argmax = tf.nn.max_pool_with_argmax(
inputs, ksize=ksize, strides=strides, padding=padding
)
else:
errmsg = "{} backend is not supported for layer {}".format(
K.backend(), type(self).__name__
)
raise NotImplementedError(errmsg)
argmax = K.cast(argmax, K.floatx())
return [output, argmax]
def compute_output_shape(self, input_shape):
ratio = (1, 2, 2, 1)
output_shape = [
dim // ratio[idx] if dim is not None else None
for idx, dim in enumerate(input_shape)
]
output_shape = tuple(output_shape)
return [output_shape, output_shape]
def compute_mask(self, inputs, mask=None):
return 2 * [None]
class MaxUnpooling2D(keras.layers.Layer):
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = size
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
with tf.variable_scope(self.name):
mask = K.cast(mask, "int32")
input_shape = tf.shape(updates, out_type="int32")
# calculation new shape
if output_shape is None:
output_shape = (
input_shape[0],
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3],
)
self.output_shape1 = output_shape
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype="int32")
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(
tf.range(output_shape[0], dtype="int32"), shape=batch_shape
)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype="int32")
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
return (
mask_shape[0],
mask_shape[1] * self.size[0],
mask_shape[2] * self.size[1],
mask_shape[3],
)
def segnet(input_shape, n_labels, kernel=3, pool_size=(2, 2), output_mode="softmax"):
# encoder
inputs = Input(shape=input_shape)
conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Activation("relu")(conv_1)
conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
conv_2 = BatchNormalization()(conv_2)
conv_2 = Activation("relu")(conv_2)
pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)
conv_3 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)
conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(conv_3)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)
pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)
conv_5 = Convolution2D(256, (kernel, kernel), padding="same")(pool_2)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)
conv_6 = Convolution2D(256, (kernel, kernel), padding="same")(conv_5)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)
conv_7 = Convolution2D(256, (kernel, kernel), padding="same")(conv_6)
conv_7 = BatchNormalization()(conv_7)
conv_7 = Activation("relu")(conv_7)
pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size)(conv_7)
conv_8 = Convolution2D(512, (kernel, kernel), padding="same")(pool_3)
conv_8 = BatchNormalization()(conv_8)
conv_8 = Activation("relu")(conv_8)
conv_9 = Convolution2D(512, (kernel, kernel), padding="same")(conv_8)
conv_9 = BatchNormalization()(conv_9)
conv_9 = Activation("relu")(conv_9)
conv_10 = Convolution2D(512, (kernel, kernel), padding="same")(conv_9)
conv_10 = BatchNormalization()(conv_10)
conv_10 = Activation("relu")(conv_10)
pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size)(conv_10)
conv_11 = Convolution2D(512, (kernel, kernel), padding="same")(pool_4)
conv_11 = BatchNormalization()(conv_11)
conv_11 = Activation("relu")(conv_11)
conv_12 = Convolution2D(512, (kernel, kernel), padding="same")(conv_11)
conv_12 = BatchNormalization()(conv_12)
conv_12 = Activation("relu")(conv_12)
conv_13 = Convolution2D(512, (kernel, kernel), padding="same")(conv_12)
conv_13 = BatchNormalization()(conv_13)
conv_13 = Activation("relu")(conv_13)
pool_5, mask_5 = MaxPoolingWithArgmax2D(pool_size)(conv_13)
# decoder
unpool_1 = MaxUnpooling2D(pool_size)([pool_5, mask_5])
conv_14 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_1)
conv_14 = BatchNormalization()(conv_14)
conv_14 = Activation("relu")(conv_14)
conv_15 = Convolution2D(512, (kernel, kernel), padding="same")(conv_14)
conv_15 = BatchNormalization()(conv_15)
conv_15 = Activation("relu")(conv_15)
conv_16 = Convolution2D(512, (kernel, kernel), padding="same")(conv_15)
conv_16 = BatchNormalization()(conv_16)
conv_16 = Activation("relu")(conv_16)
unpool_2 = MaxUnpooling2D(pool_size)([conv_16, mask_4])
conv_17 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_2)
conv_17 = BatchNormalization()(conv_17)
conv_17 = Activation("relu")(conv_17)
conv_18 = Convolution2D(512, (kernel, kernel), padding="same")(conv_17)
conv_18 = BatchNormalization()(conv_18)
conv_18 = Activation("relu")(conv_18)
conv_19 = Convolution2D(256, (kernel, kernel), padding="same")(conv_18)
conv_19 = BatchNormalization()(conv_19)
conv_19 = Activation("relu")(conv_19)
unpool_3 = MaxUnpooling2D(pool_size)([conv_19, mask_3])
conv_20 = Convolution2D(256, (kernel, kernel), padding="same")(unpool_3)
conv_20 = BatchNormalization()(conv_20)
conv_20 = Activation("relu")(conv_20)
conv_21 = Convolution2D(256, (kernel, kernel), padding="same")(conv_20)
conv_21 = BatchNormalization()(conv_21)
conv_21 = Activation("relu")(conv_21)
conv_22 = Convolution2D(128, (kernel, kernel), padding="same")(conv_21)
conv_22 = BatchNormalization()(conv_22)
conv_22 = Activation("relu")(conv_22)
unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])
conv_23 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_4)
conv_23 = BatchNormalization()(conv_23)
conv_23 = Activation("relu")(conv_23)
conv_24 = Convolution2D(64, (kernel, kernel), padding="same")(conv_23)
conv_24 = BatchNormalization()(conv_24)
conv_24 = Activation("relu")(conv_24)
unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])
conv_25 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_5)
conv_25 = BatchNormalization()(conv_25)
conv_25 = Activation("relu")(conv_25)
conv_26 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_25)
conv_26 = BatchNormalization()(conv_26)
conv_26 = Reshape(
(input_shape[0] * input_shape[1], n_labels),
input_shape=(input_shape[0], input_shape[1], n_labels),
)(conv_26)
outputs = Activation(output_mode)(conv_26)
model = Model(inputs=inputs, outputs=outputs, name="SegNet")
return model
# Model from https://github.com/ykamikawa/tf-keras-SegNet
class TestSegNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(get_maximum_opset_supported() < 11,
"ScatterNd support need opset >= 11.")
def test_segnet(self):
K.clear_session()
keras_model = segnet((128, 128, 3), 80)
data = np.random.rand(2, 128, 128, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 10,721 | 37.430108 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_fcn.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import keras_segmentation
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
class TestFCN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_fcn(self):
# From https://github.com/divamgupta/image-segmentation-keras/models/fcn.py
model = keras_segmentation.models.fcn.fcn_8(101)
res = run_image(model, self.model_files, img_path, target_size=(416, 608))
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 840 | 25.28125 | 87 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_deep_rl.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from keras.initializers import RandomUniform
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GaussianNoise = keras.layers.GaussianNoise
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
MaxPooling2D = keras.layers.MaxPooling2D
Reshape = keras.layers.Reshape
Sequential = keras.models.Sequential
Model = keras.models.Model
def conv_layer(d, k):
return Conv2D(d, k, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')
def conv_block(inp, d=3, pool_size=(2, 2), k=3):
conv = conv_layer(d, k)(inp)
return MaxPooling2D(pool_size=pool_size)(conv)
# Model from https://github.com/germain-hug/Deep-RL-Keras
class TestDeepRL(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DPPG_actor(self):
K.clear_session()
env_dim = (2, 3)
act_dim = 5
act_range = 4
inp = Input(shape=env_dim)
#
x = Dense(256, activation='relu')(inp)
x = GaussianNoise(1.0)(x)
#
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
x = GaussianNoise(1.0)(x)
#
out = Dense(act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)
out = Lambda(lambda i: i * act_range)(out)
#
keras_model = Model(inp, out)
data = np.random.rand(1000, 2, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DPPG_critic(self):
K.clear_session()
env_dim = (2, 3)
act_dim = (5,)
state = Input(shape=env_dim)
action = Input(shape=act_dim)
x = Dense(256, activation='relu')(state)
x = concatenate([Flatten()(x), action])
x = Dense(128, activation='relu')(x)
out = Dense(1, activation='linear', kernel_initializer=RandomUniform())(x)
keras_model = Model([state, action], out)
data1 = np.random.rand(5, 2, 3).astype(np.float32)
data2 = np.random.rand(5, 5).astype(np.float32)
expected = keras_model.predict([data1, data2])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, [data1, data2], expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DDQN(self):
K.clear_session()
state_dim = (2, 3)
action_dim = 5
inp = Input(shape=(state_dim))
# Determine whether we are dealing with an image input (Atari) or not
if (len(state_dim) > 2):
inp = Input((state_dim[1:]))
x = conv_block(inp, 32, (2, 2), 8)
x = conv_block(x, 64, (2, 2), 4)
x = conv_block(x, 64, (2, 2), 3)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
else:
x = Flatten()(inp)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(action_dim + 1, activation='linear')(x)
x = Lambda(lambda i: K.expand_dims(i[:,0],-1) + i[:,1:] - K.mean(i[:,1:], keepdims=True),
output_shape=(action_dim,))(x)
keras_model = Model(inp, x)
data = np.random.rand(1000, 2, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected,
self.model_files, rtol=1e-2, atol=2e-2))
if __name__ == "__main__":
unittest.main()
| 4,790 | 34.488889 | 122 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_nonlocal.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
from keras.regularizers import l2
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
add = keras.layers.add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv1D = keras.layers.Conv1D
Conv2D = keras.layers.Conv2D
Conv3D = keras.layers.Conv3D
Dense = keras.layers.Dense
dot = keras.layers.dot
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GaussianNoise = keras.layers.GaussianNoise
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GlobalMaxPooling2D = keras.layers.GlobalMaxPooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
MaxPool1D = keras.layers.MaxPool1D
MaxPooling2D = keras.layers.MaxPooling2D
Reshape = keras.layers.Reshape
Sequential = keras.models.Sequential
Model = keras.models.Model
def non_local_block(ip, intermediate_dim=None, compression=2,
mode='embedded', add_residual=True):
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
ip_shape = K.int_shape(ip)
if mode not in ['gaussian', 'embedded', 'dot', 'concatenate']:
raise ValueError('`mode` must be one of `gaussian`, `embedded`, `dot` or `concatenate`')
if compression is None:
compression = 1
dim1, dim2, dim3 = None, None, None
# check rank and calculate the input shape
if len(ip_shape) == 3: # temporal / time series data
rank = 3
batchsize, dim1, channels = ip_shape
elif len(ip_shape) == 4: # spatial / image data
rank = 4
if channel_dim == 1:
batchsize, channels, dim1, dim2 = ip_shape
else:
batchsize, dim1, dim2, channels = ip_shape
elif len(ip_shape) == 5: # spatio-temporal / Video or Voxel data
rank = 5
if channel_dim == 1:
batchsize, channels, dim1, dim2, dim3 = ip_shape
else:
batchsize, dim1, dim2, dim3, channels = ip_shape
else:
raise ValueError('Input dimension has to be either 3 (temporal), 4 (spatial) or 5 (spatio-temporal)')
# verify correct intermediate dimension specified
if intermediate_dim is None:
intermediate_dim = channels // 2
if intermediate_dim < 1:
intermediate_dim = 1
else:
intermediate_dim = int(intermediate_dim)
if intermediate_dim < 1:
raise ValueError('`intermediate_dim` must be either `None` or positive integer greater than 1.')
if mode == 'gaussian': # Gaussian instantiation
x1 = Reshape((-1, channels))(ip) # xi
x2 = Reshape((-1, channels))(ip) # xj
f = dot([x1, x2], axes=2)
f = Activation('softmax')(f)
elif mode == 'dot': # Dot instantiation
# theta path
theta = _convND(ip, rank, intermediate_dim)
theta = Reshape((-1, intermediate_dim))(theta)
# phi path
phi = _convND(ip, rank, intermediate_dim)
phi = Reshape((-1, intermediate_dim))(phi)
f = dot([theta, phi], axes=2)
size = K.int_shape(f)
# scale the values to make it size invariant
f = Lambda(lambda z: (1. / float(size[-1])) * z)(f)
elif mode == 'concatenate': # Concatenation instantiation
raise NotImplementedError('Concatenate model has not been implemented yet')
else: # Embedded Gaussian instantiation
# theta path
theta = _convND(ip, rank, intermediate_dim)
theta = Reshape((-1, intermediate_dim))(theta)
# phi path
phi = _convND(ip, rank, intermediate_dim)
phi = Reshape((-1, intermediate_dim))(phi)
if compression > 1:
# shielded computation
phi = MaxPool1D(compression)(phi)
f = dot([theta, phi], axes=2)
f = Activation('softmax')(f)
# g path
g = _convND(ip, rank, intermediate_dim)
g = Reshape((-1, intermediate_dim))(g)
if compression > 1 and mode == 'embedded':
# shielded computation
g = MaxPool1D(compression)(g)
# compute output path
y = dot([f, g], axes=[2, 1])
# reshape to input tensor format
if rank == 3:
y = Reshape((dim1, intermediate_dim))(y)
elif rank == 4:
if channel_dim == -1:
y = Reshape((dim1, dim2, intermediate_dim))(y)
else:
y = Reshape((intermediate_dim, dim1, dim2))(y)
else:
if channel_dim == -1:
y = Reshape((dim1, dim2, dim3, intermediate_dim))(y)
else:
y = Reshape((intermediate_dim, dim1, dim2, dim3))(y)
# project filters
y = _convND(y, rank, channels)
# residual connection
if add_residual:
y = add([ip, y])
return y
def _convND(ip, rank, channels):
assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5"
if rank == 3:
x = Conv1D(channels, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
elif rank == 4:
x = Conv2D(channels, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
else:
x = Conv3D(channels, (1, 1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
return x
def _bn_relu(x, bn_name=None, relu_name=None):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name)(x)
return Activation("relu", name=relu_name)(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu residual unit activation function.
This is the original ResNet v1 scheme in https://arxiv.org/abs/1512.03385
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
x = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
name=conv_name)(x)
return _bn_relu(x, bn_name=bn_name, relu_name=relu_name)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv residual unit with full pre-activation function.
This is the ResNet v2 scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
activation = _bn_relu(x, bn_name=bn_name, relu_name=relu_name)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
name=conv_name)(activation)
return f
def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input_feature)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input_feature
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
print('reshaping via a convolution...')
if conv_name_base is not None:
conv_name_base = conv_name_base + '1'
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001),
name=conv_name_base)(input_feature)
if bn_name_base is not None:
bn_name_base = bn_name_base + '1'
shortcut = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name_base)(shortcut)
return add([shortcut, residual])
def _residual_block(block_function, filters, blocks, stage,
transition_strides=None, transition_dilation_rates=None,
dilation_rates=(1, 1), is_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Builds a residual block with repeating bottleneck blocks.
stage: integer, current stage label, used for generating layer names
blocks: number of blocks 'a','b'..., current block label, used for generating layer names
transition_strides: a list of tuples for the strides of each transition
transition_dilation_rates: a list of tuples for the dilation rate of each transition
"""
if transition_dilation_rates is None:
transition_dilation_rates = [(1, 1)] * blocks
if transition_strides is None:
transition_strides = [(1, 1)] * blocks
def f(x):
for i in range(blocks):
x = block_function(filters=filters, stage=stage, block=i,
transition_strides=transition_strides[i],
dilation_rate=transition_dilation_rates[i],
is_first_block_of_first_layer=(is_first_layer and i == 0),
dropout=dropout,
residual_unit=residual_unit)(x)
# Non Local Blook
if filters >= 256:
print("Filters : ", filters, "Adding Non Local Blocks")
x = non_local_block(x, mode='embedded', compression=2)
return x
return f
def _block_name_base(stage, block):
"""Get the convolution name base and batch normalization name base defined by stage and block.
If there are less than 26 blocks they will be labeled 'a', 'b', 'c' to match the paper and keras
and beyond 26 blocks they will simply be numbered.
"""
if block < 27:
block = '%c' % (block + 97) # 97 is the ascii number for lowercase 'a'
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
return conv_name_base, bn_name_base
def basic_block(filters, stage, block, transition_strides=(1, 1),
dilation_rate=(1, 1), is_first_block_of_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input_features):
conv_name_base, bn_name_base = _block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters=filters, kernel_size=(3, 3),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
name=conv_name_base + '2a')(input_features)
else:
x = residual_unit(filters=filters, kernel_size=(3, 3),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_features)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
return _shortcut(input_features, x)
return f
def bottleneck(filters, stage, block, transition_strides=(1, 1),
dilation_rate=(1, 1), is_first_block_of_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input_feature):
conv_name_base, bn_name_base = _block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
name=conv_name_base + '2a')(input_feature)
else:
x = residual_unit(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_feature)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters * 4, kernel_size=(1, 1),
conv_name_base=conv_name_base + '2c',
bn_name_base=bn_name_base + '2c')(x)
return _shortcut(input_feature, x)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
def NonLocalResNet(input_shape=None, classes=10, block='bottleneck', residual_unit='v2', repetitions=None,
initial_filters=64, activation='softmax', include_top=True, input_tensor=None, dropout=None,
transition_dilation_rate=(1, 1), initial_strides=(2, 2), initial_kernel_size=(7, 7),
initial_pooling='max', final_pooling=None, top='classification'):
if activation not in ['softmax', 'sigmoid', None]:
raise ValueError('activation must be one of "softmax", "sigmoid", or None')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
if repetitions is None:
repetitions = [3, 4, 6, 3]
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
if block == 'basic':
block_fn = basic_block
elif block == 'bottleneck':
block_fn = bottleneck
else:
block_fn = block
if residual_unit == 'v2':
residual_unit = _bn_relu_conv
elif residual_unit == 'v1':
residual_unit = _conv_bn_relu
else:
residual_unit = residual_unit
# Permute dimension order if necessary
if K.image_data_format() == 'channels_first':
input_shape = (input_shape[1], input_shape[2], input_shape[0])
img_input = Input(shape=input_shape, tensor=input_tensor)
x = _conv_bn_relu(filters=initial_filters, kernel_size=initial_kernel_size, strides=initial_strides)(img_input)
if initial_pooling == 'max':
x = MaxPooling2D(pool_size=(3, 3), strides=initial_strides, padding="same")(x)
block = x
filters = initial_filters
for i, r in enumerate(repetitions):
transition_dilation_rates = [transition_dilation_rate] * r
transition_strides = [(1, 1)] * r
if transition_dilation_rate == (1, 1):
transition_strides[0] = (2, 2)
block = _residual_block(block_fn, filters=filters,
stage=i, blocks=r,
is_first_layer=(i == 0),
dropout=dropout,
transition_dilation_rates=transition_dilation_rates,
transition_strides=transition_strides,
residual_unit=residual_unit)(block)
filters *= 2
# Last activation
x = _bn_relu(block)
# Classifier block
if include_top and top is 'classification':
x = GlobalAveragePooling2D()(x)
x = Dense(units=classes, activation=activation, kernel_initializer="he_normal")(x)
elif include_top and top is 'segmentation':
x = Conv2D(classes, (1, 1), activation='linear', padding='same')(x)
if K.image_data_format() == 'channels_first':
channel, row, col = input_shape
else:
row, col, channel = input_shape
x = Reshape((row * col, classes))(x)
x = Activation(activation)(x)
x = Reshape((row, col, classes))(x)
elif final_pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif final_pooling == 'max':
x = GlobalMaxPooling2D()(x)
model = Model(inputs=img_input, outputs=x)
return model
def NonLocalResNet18(input_shape, classes):
"""ResNet with 18 layers and v2 residual units
"""
return NonLocalResNet(input_shape, classes, basic_block, repetitions=[2, 2, 2, 2])
def NonLocalResNet50(input_shape, classes):
"""ResNet with 50 layers and v2 residual units
"""
return NonLocalResNet(input_shape, classes, bottleneck, repetitions=[3, 4, 6, 3])
# Model from https://github.com/titu1994/keras-non-local-nets
class TestNonLocalNets(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_non_local_nets(self):
K.clear_session()
for keras_model in [NonLocalResNet18((128, 160, 3), classes=10),
NonLocalResNet50((128, 160, 3), classes=10)]:
data = np.random.rand(2, 128, 160, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 20,235 | 37.471483 | 116 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_mask_rcnn.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
from mock_keras2onnx import set_converter
from mock_keras2onnx.proto import keras
import onnx
import numpy as np
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime, print_mismatches, convert_tf_crop_and_resize
import urllib.request
MASKRCNN_WEIGHTS_PATH = r'https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5'
model_file_name = 'mask_rcnn_coco.h5'
if not os.path.exists(model_file_name):
urllib.request.urlretrieve(MASKRCNN_WEIGHTS_PATH, model_file_name)
keras.backend.clear_session()
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../mask_rcnn/'))
from mask_rcnn import model
from packaging.version import Version
working_path = os.path.abspath(os.path.dirname(__file__))
tmp_path = os.path.join(working_path, 'temp')
# mask rcnn code From https://github.com/matterport/Mask_RCNN
class TestMaskRCNN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(Version(onnx.__version__.split('-')[0]) < Version("1.6.0"),
"Mask-rcnn conversion needs contrib op for onnx < 1.6.0.")
def test_mask_rcnn(self):
set_converter('CropAndResize', convert_tf_crop_and_resize)
onnx_model = mock_keras2onnx.convert_keras(model.keras_model)
import skimage
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
image = skimage.io.imread(img_path)
images = [image]
case_name = 'mask_rcnn'
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
onnx.save_model(onnx_model, temp_model_file)
try:
import onnxruntime
sess = onnxruntime.InferenceSession(temp_model_file)
except ImportError:
return True
# preprocessing
molded_images, image_metas, windows = model.mold_inputs(images)
anchors = model.get_anchors(molded_images[0].shape)
anchors = np.broadcast_to(anchors, (model.config.BATCH_SIZE,) + anchors.shape)
expected = model.keras_model.predict(
[molded_images.astype(np.float32), image_metas.astype(np.float32), anchors])
actual = \
sess.run(None, {"input_image": molded_images.astype(np.float32),
"input_anchors": anchors,
"input_image_meta": image_metas.astype(np.float32)})
rtol = 1.e-3
atol = 1.e-6
compare_idx = [0, 3]
res = all(np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in compare_idx)
if res and temp_model_file not in self.model_files: # still keep the failed case files for the diagnosis.
self.model_files.append(temp_model_file)
if not res:
for n_ in compare_idx:
expected_list = expected[n_].flatten()
actual_list = actual[n_].flatten()
print_mismatches(case_name, n_, expected_list, actual_list, atol, rtol)
self.assertTrue(res)
if __name__ == "__main__":
unittest.main()
| 3,410 | 36.483516 | 114 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_densenet_1.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import keras_segmentation
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
from mock_keras2onnx.proto import is_keras_older_than
class TestDenseNet_1(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(is_keras_older_than("2.2.3"),
"Cannot import normalize_data_format from keras.backend")
def test_densenet(self):
# From https://github.com/titu1994/DenseNet/blob/master/densenet.py
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../model_source/densenet_1/'))
import densenet_1
image_dim = (224, 224, 3)
model = densenet_1.DenseNetImageNet121(input_shape=image_dim)
res = run_image(model, self.model_files, img_path, target_size=(224, 224))
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 1,202 | 29.846154 | 99 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_mlstm_fcn.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv1D = keras.layers.Conv1D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling1D = keras.layers.GlobalAveragePooling1D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
LSTM = keras.layers.LSTM
Masking = keras.layers.Masking
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def squeeze_excite_block(input):
filters = input._keras_shape[-1] # channel_axis = -1 for TF
se = GlobalAveragePooling1D()(input)
se = Reshape((1, filters))(se)
se = Dense(filters // 16, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
se = multiply([input, se])
return se
MAX_NB_VARIABLES = 20
MAX_TIMESTEPS = 5
NB_CLASS = 10
# Model from https://github.com/titu1994/MLSTM-FCN
class TestMLSTM_FCN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11,
"Test level 0 only.")
def test_MLSTM_FCN(self):
K.clear_session()
ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
x = Masking()(ip)
x = LSTM(8)(x)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = squeeze_excite_block(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = squeeze_excite_block(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
keras_model = Model(ip, out)
data = np.random.rand(2, MAX_NB_VARIABLES, MAX_TIMESTEPS).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11,
"Test level 0 only.")
def test_LSTM_FCN(self):
K.clear_session()
ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
x = Masking()(ip)
x = LSTM(8)(x)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
keras_model = Model(ip, out)
data = np.random.rand(2, MAX_NB_VARIABLES, MAX_TIMESTEPS).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,770 | 31.903448 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/run_all.py | # SPDX-License-Identifier: Apache-2.0
import os
from os import listdir
from os.path import isfile, join
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exclude')
args = parser.parse_args()
exclude_set = set(args.exclude.split()) if args.exclude is not None else set()
os.environ["PYTHONPATH"] = \
os.environ.get("PYTHONPATH", "") + os.pathsep + "../../keras2onnx_unit_tests" + os.pathsep + "../../../"
os.environ["TF2ONNX_CATCH_ERRORS"] = "FALSE"
mypath = '.'
files = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f.find("test_") == 0]
files.sort()
res_final = True
for f_ in files:
if f_ not in exclude_set:
res = os.system("pytest " + f_ + " --no-cov "
"--doctest-modules --junitxml=junit/test-results-" + f_[5:-3] + ".xml")
if res > 0:
res_final = False
if res_final:
assert(True)
else:
assert(False)
| 918 | 26.848485 | 108 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_keras_applications_v2.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
from mock_keras2onnx.proto import keras
from mock_keras2onnx.proto.tfcompat import is_tf2
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
@unittest.skipIf(not is_tf2, "Tensorflow 2.x only tests")
class TestKerasApplications(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_DenseNet121(self):
DenseNet121 = keras.applications.densenet.DenseNet121
model = DenseNet121(include_top=True, weights=None)
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
def test_MobileNet(self):
MobileNet = keras.applications.mobilenet.MobileNet
model = MobileNet(weights=None)
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
def test_MobileNetV2(self):
MobileNetV2 = keras.applications.mobilenet_v2.MobileNetV2
model = MobileNetV2(weights=None)
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
def test_NASNetMobile(self):
NASNetMobile = keras.applications.nasnet.NASNetMobile
model = NASNetMobile(weights=None)
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
def test_InceptionV3(self):
keras.backend.set_learning_phase(0)
InceptionV3 = keras.applications.inception_v3.InceptionV3
model = InceptionV3(include_top=True)
res = run_image(model, self.model_files, img_path, target_size=299)
self.assertTrue(*res)
def test_InceptionResNetV2(self):
InceptionResNetV2 = keras.applications.inception_resnet_v2.InceptionResNetV2
model = InceptionResNetV2(include_top=True)
res = run_image(model, self.model_files, img_path, target_size=299)
self.assertTrue(*res)
def test_ResNet50(self):
ResNet50 = keras.applications.resnet_v2.ResNet50V2
model = ResNet50(include_top=True, weights=None)
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
def test_Xception(self):
Xception = keras.applications.xception.Xception
model = Xception(include_top=True, weights=None)
res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=299)
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 2,691 | 33.512821 | 87 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_ssrnet.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from keras.applications import VGG19
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime, test_level_0
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
Multiply = keras.layers.Multiply
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
K = keras.backend
# From https://github.com/shamangary/FSA-Net
class SSR_net_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
img_inputs = Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = SeparableConv2D(16, (3, 3), padding='same')(img_inputs)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
x_layer1 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(32, (3, 3), padding='same')(x_layer1)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
x = SeparableConv2D(32, (3, 3), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
x_layer2 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(64, (3, 3), padding='same')(x_layer2)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
x = SeparableConv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
x_layer3 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(128, (3, 3), padding='same')(x_layer3)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
x = SeparableConv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x_layer4 = Activation('relu')(x)
# -------------------------------------------------------------------------------------------------------------------------
s = SeparableConv2D(16, (3, 3), padding='same')(img_inputs)
s = BatchNormalization(axis=-1)(s)
s = Activation('tanh')(s)
s_layer1 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(32, (3, 3), padding='same')(s_layer1)
s = BatchNormalization(axis=-1)(s)
s = Activation('tanh')(s)
s = SeparableConv2D(32, (3, 3), padding='same')(s)
s = BatchNormalization(axis=-1)(s)
s = Activation('tanh')(s)
s_layer2 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(64, (3, 3), padding='same')(s_layer2)
s = BatchNormalization(axis=-1)(s)
s = Activation('tanh')(s)
s = SeparableConv2D(64, (3, 3), padding='same')(s)
s = BatchNormalization(axis=-1)(s)
s = Activation('tanh')(s)
s_layer3 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(128, (3, 3), padding='same')(s_layer3)
s = BatchNormalization(axis=-1)(s)
s = Activation('tanh')(s)
s = SeparableConv2D(128, (3, 3), padding='same')(s)
s = BatchNormalization(axis=-1)(s)
s_layer4 = Activation('tanh')(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(64, (1, 1), activation='tanh')(s_layer4)
s_layer4 = MaxPooling2D((2, 2))(s_layer4)
x_layer4 = Conv2D(64, (1, 1), activation='relu')(x_layer4)
x_layer4 = AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = Multiply()([s_layer4, x_layer4])
feat_s1_pre = Flatten()(feat_s1_pre)
feat_delta_s1 = Dense(2 * self.num_classes, activation='tanh')(feat_s1_pre)
delta_s1 = Dense(self.num_classes, activation='tanh', name='delta_s1')(feat_delta_s1)
feat_local_s1 = Dense(2 * self.num_classes, activation='tanh')(feat_s1_pre)
local_s1 = Dense(units=self.num_classes, activation='tanh', name='local_delta_stage1')(feat_local_s1)
feat_pred_s1 = Dense(self.stage_num[0] * self.num_classes, activation='relu')(feat_s1_pre)
pred_a_s1 = Reshape((self.num_classes, self.stage_num[0]))(feat_pred_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = Conv2D(64, (1, 1), activation='tanh')(s_layer3)
s_layer3 = MaxPooling2D((2, 2))(s_layer3)
x_layer3 = Conv2D(64, (1, 1), activation='relu')(x_layer3)
x_layer3 = AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = Multiply()([s_layer3, x_layer3])
feat_s2_pre = Flatten()(feat_s2_pre)
feat_delta_s2 = Dense(2 * self.num_classes, activation='tanh')(feat_s2_pre)
delta_s2 = Dense(self.num_classes, activation='tanh', name='delta_s2')(feat_delta_s2)
feat_local_s2 = Dense(2 * self.num_classes, activation='tanh')(feat_s2_pre)
local_s2 = Dense(units=self.num_classes, activation='tanh', name='local_delta_stage2')(feat_local_s2)
feat_pred_s2 = Dense(self.stage_num[1] * self.num_classes, activation='relu')(feat_s2_pre)
pred_a_s2 = Reshape((self.num_classes, self.stage_num[1]))(feat_pred_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(64, (1, 1), activation='tanh')(s_layer2)
s_layer2 = MaxPooling2D((2, 2))(s_layer2)
x_layer2 = Conv2D(64, (1, 1), activation='relu')(x_layer2)
x_layer2 = AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = Multiply()([s_layer2, x_layer2])
feat_s3_pre = Flatten()(feat_s3_pre)
feat_delta_s3 = Dense(2 * self.num_classes, activation='tanh')(feat_s3_pre)
delta_s3 = Dense(self.num_classes, activation='tanh', name='delta_s3')(feat_delta_s3)
feat_local_s3 = Dense(2 * self.num_classes, activation='tanh')(feat_s3_pre)
local_s3 = Dense(units=self.num_classes, activation='tanh', name='local_delta_stage3')(feat_local_s3)
feat_pred_s3 = Dense(self.stage_num[2] * self.num_classes, activation='relu')(feat_s3_pre)
pred_a_s3 = Reshape((self.num_classes, self.stage_num[2]))(feat_pred_s3)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
pred = (a + b + c) * V
return pred
pred_pose = Lambda(SSR_module,
arguments={'s1': self.stage_num[0], 's2': self.stage_num[1], 's3': self.stage_num[2],
'lambda_d': self.lambda_d}, name='pred_pose')(
[pred_a_s1, pred_a_s2, pred_a_s3, delta_s1, delta_s2, delta_s3, local_s1, local_s2, local_s3])
model = Model(inputs=img_inputs, outputs=pred_pose)
return model
class TestSSRNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_SSR_Net_MT(self):
K.clear_session()
_IMAGE_SIZE = 64
stage_num = [3, 3, 3]
num_classes = 3
lambda_d = 1
keras_model = SSR_net_MT(_IMAGE_SIZE, num_classes, stage_num, lambda_d)()
data = np.random.rand(2, 64, 64, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 9,524 | 41.333333 | 131 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_lsgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/lsgan/lsgan.py
class LSGAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generated imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains generator to fool discriminator
self.combined = Model(z, valid)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
# (!!!) No softmax
model.add(Dense(1))
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
class TestLSGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_LSGAN(self):
keras_model = LSGAN().combined
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 3,548 | 29.076271 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_yolov3.py | # SPDX-License-Identifier: Apache-2.0
import os
from os.path import dirname, abspath
import numpy as np
import unittest
import sys
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../yolov3'))
from keras.models import load_model
import onnx
import urllib.request
from yolov3 import YOLO, convert_model
from packaging.version import Version
import mock_keras2onnx
from test_utils import is_bloburl_access
YOLOV3_WEIGHTS_PATH = r'https://lotus.blob.core.windows.net/converter-models/yolov3.h5'
model_file_name = 'yolov3.h5'
YOLOV3_TINY_WEIGHTS_PATH = r'https://lotus.blob.core.windows.net/converter-models/yolov3-tiny.h5'
tiny_model_file_name = 'yolov3-tiny.h5'
working_path = os.path.abspath(os.path.dirname(__file__))
tmp_path = os.path.join(working_path, 'temp')
class TestYoloV3(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def post_compute(self, all_boxes, all_scores, indices):
out_boxes, out_scores, out_classes = [], [], []
for idx_ in indices[0]:
out_classes.append(idx_[1])
out_scores.append(all_scores[tuple(idx_)])
idx_1 = (idx_[0], idx_[2])
out_boxes.append(all_boxes[idx_1])
return [out_boxes, out_scores, out_classes]
@unittest.skipIf(Version(onnx.__version__.split('-')[0]) < Version("1.5.0"),
"NonMaxSuppression op is not supported for onnx < 1.5.0.")
@unittest.skipIf(not is_bloburl_access(YOLOV3_WEIGHTS_PATH) or not is_bloburl_access(YOLOV3_TINY_WEIGHTS_PATH),
"Model blob url can't access.")
def test_yolov3(self):
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
yolo3_yolo3_dir = os.path.join(os.path.dirname(__file__), '../../../keras-yolo3/yolo3')
try:
import onnxruntime
except ImportError:
return True
from PIL import Image
for is_tiny_yolo in [True, False]:
if is_tiny_yolo:
if not os.path.exists(tiny_model_file_name):
urllib.request.urlretrieve(YOLOV3_TINY_WEIGHTS_PATH, tiny_model_file_name)
yolo_weights = load_model(tiny_model_file_name)
model_path = tiny_model_file_name # model path or trained weights path
anchors_path = 'model_data/tiny_yolo_anchors.txt'
case_name = 'yolov3-tiny'
else:
if not os.path.exists(model_file_name):
urllib.request.urlretrieve(YOLOV3_WEIGHTS_PATH, model_file_name)
yolo_weights = load_model(model_file_name)
model_path = model_file_name # model path or trained weights path
anchors_path = 'model_data/yolo_anchors.txt'
case_name = 'yolov3'
my_yolo = YOLO(model_path, anchors_path, yolo3_yolo3_dir)
my_yolo.load_model(yolo_weights)
onnx_model = convert_model(my_yolo, is_tiny_yolo)
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
onnx.save_model(onnx_model, temp_model_file)
sess = onnxruntime.InferenceSession(temp_model_file)
image = Image.open(img_path)
image_data = my_yolo.prepare_keras_data(image)
all_boxes_k, all_scores_k, indices_k = my_yolo.final_model.predict([image_data, np.array([image.size[1], image.size[0]], dtype='float32').reshape(1, 2)])
image_data_onnx = np.transpose(image_data, [0, 3, 1, 2])
feed_f = dict(zip(['input_1', 'image_shape'],
(image_data_onnx, np.array([image.size[1], image.size[0]], dtype='float32').reshape(1, 2))))
all_boxes, all_scores, indices = sess.run(None, input_feed=feed_f)
expected = self.post_compute(all_boxes_k, all_scores_k, indices_k)
actual = self.post_compute(all_boxes, all_scores, indices)
res = all(np.allclose(expected[n_], actual[n_]) for n_ in range(3))
self.assertTrue(res)
if __name__ == "__main__":
unittest.main()
| 4,362 | 39.027523 | 165 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_pixelda.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import keras_contrib
import numpy as np
from mock_keras2onnx import set_converter
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime, convert_InstanceNormalizationLayer
Activation = keras.layers.Activation
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/pixelda/pixelda.py
class PixelDA():
def __init__(self):
# Input shape
self.img_rows = 32
self.img_cols = 32
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.num_classes = 10
# Loss weights
lambda_adv = 10
lambda_clf = 1
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of residual blocks in the generator
self.residual_blocks = 6
# Number of filters in first layer of discriminator and classifier
self.df = 64
self.cf = 64
# Build and compile the discriminators
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# Build the task (classification) network
self.clf = self.build_classifier()
# Input images from both domains
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Translate images from domain A to domain B
fake_B = self.generator(img_A)
# Classify the translated image
class_pred = self.clf(fake_B)
# For the combined model we will only train the generator and classifier
self.discriminator.trainable = False
# Discriminator determines validity of translated images
valid = self.discriminator(fake_B)
self.combined = Model(img_A, [valid, class_pred])
def build_generator(self):
"""Resnet Generator"""
def residual_block(layer_input):
"""Residual block described in paper"""
d = Conv2D(64, kernel_size=3, strides=1, padding='same')(layer_input)
d = BatchNormalization(momentum=0.8)(d)
d = Activation('relu')(d)
d = Conv2D(64, kernel_size=3, strides=1, padding='same')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Add()([d, layer_input])
return d
# Image input
img = Input(shape=self.img_shape)
l1 = Conv2D(64, kernel_size=3, padding='same', activation='relu')(img)
# Propogate signal through residual blocks
r = residual_block(l1)
for _ in range(self.residual_blocks - 1):
r = residual_block(r)
output_img = Conv2D(self.channels, kernel_size=3, padding='same', activation='tanh')(r)
return Model(img, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
d1 = d_layer(img, self.df, normalization=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model(img, validity)
def build_classifier(self):
def clf_layer(layer_input, filters, f_size=4, normalization=True):
"""Classifier layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
c1 = clf_layer(img, self.cf, normalization=False)
c2 = clf_layer(c1, self.cf*2)
c3 = clf_layer(c2, self.cf*4)
c4 = clf_layer(c3, self.cf*8)
c5 = clf_layer(c4, self.cf*8)
class_pred = Dense(self.num_classes, activation='softmax')(Flatten()(c5))
return Model(img, class_pred)
set_converter(keras_contrib.layers.InstanceNormalization, convert_InstanceNormalizationLayer)
class TestPixelDA(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_PixelDA(self):
keras_model = PixelDA().combined
x = np.random.rand(5, 32, 32, 3).astype(np.float32)
expected = keras_model.predict([x])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files, atol=1.e-5))
if __name__ == "__main__":
unittest.main()
| 5,796 | 31.027624 | 119 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_image_outpainting.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
from mock_keras2onnx import set_converter
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0, is_keras_older_than, convert_InstanceNormalizationLayer
K = keras.backend
Activation = keras.layers.Activation
AtrousConvolution2D = keras.layers.AtrousConvolution2D
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Conv2DTranspose = keras.layers.Conv2DTranspose
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
if not is_keras_older_than("2.2.4"):
ReLU = keras.layers.ReLU
import keras_contrib
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Sequential = keras.models.Sequential
Model = keras.models.Model
INPUT_SHAPE = (256, 256, 3)
EPOCHS = 500
BATCH = 1
# 25% i.e 64 width size will be mask from both side
MASK_PERCENTAGE = .25
EPSILON = 1e-9
ALPHA = 0.0004
d_input_shape = (INPUT_SHAPE[0], int(INPUT_SHAPE[1] * (MASK_PERCENTAGE *2)), INPUT_SHAPE[2])
d_dropout = 0.25
def d_build_conv(layer_input, filter_size, kernel_size=4, strides=2, activation='leakyrelu', dropout_rate=d_dropout,
norm=True):
c = Conv2D(filter_size, kernel_size=kernel_size, strides=strides, padding='same')(layer_input)
if activation == 'leakyrelu':
c = LeakyReLU(alpha=0.2)(c)
if dropout_rate:
c = Dropout(dropout_rate)(c)
if norm == 'inst':
c = InstanceNormalization()(c)
return c
def build_discriminator():
d_input = Input(shape=d_input_shape)
d = d_build_conv(d_input, 32, 5, strides=2, norm=False)
d = d_build_conv(d, 64, 5, strides=2)
d = d_build_conv(d, 64, 5, strides=2)
d = d_build_conv(d, 128, 5, strides=2)
d = d_build_conv(d, 128, 5, strides=2)
flat = Flatten()(d)
fc1 = Dense(1024, activation='relu')(flat)
d_output = Dense(1, activation='sigmoid')(fc1)
return Model(d_input, d_output)
g_input_shape = (INPUT_SHAPE[0], int(INPUT_SHAPE[1] * (MASK_PERCENTAGE *2)), INPUT_SHAPE[2])
g_dropout = 0.25
def g_build_conv(layer_input, filter_size, kernel_size=4, strides=2, activation='leakyrelu', dropout_rate=g_dropout,
norm='inst', dilation=1):
c = AtrousConvolution2D(filter_size, kernel_size=kernel_size, strides=strides, atrous_rate=(dilation, dilation),
padding='same')(layer_input)
if activation == 'leakyrelu':
c = ReLU()(c)
if dropout_rate:
c = Dropout(dropout_rate)(c)
if norm == 'inst':
c = InstanceNormalization()(c)
return c
def g_build_deconv(layer_input, filter_size, kernel_size=3, strides=2, activation='relu', dropout=0):
d = Conv2DTranspose(filter_size, kernel_size=kernel_size, strides=strides, padding='same')(layer_input)
if activation == 'relu':
d = ReLU()(d)
return d
def build_generator():
g_input = Input(shape=g_input_shape)
g1 = g_build_conv(g_input, 64, 5, strides=1)
g2 = g_build_conv(g1, 128, 4, strides=2)
g3 = g_build_conv(g2, 256, 4, strides=2)
g4 = g_build_conv(g3, 512, 4, strides=1)
g5 = g_build_conv(g4, 512, 4, strides=1)
g6 = g_build_conv(g5, 512, 4, strides=1, dilation=2)
g7 = g_build_conv(g6, 512, 4, strides=1, dilation=4)
g8 = g_build_conv(g7, 512, 4, strides=1, dilation=8)
g9 = g_build_conv(g8, 512, 4, strides=1, dilation=16)
g10 = g_build_conv(g9, 512, 4, strides=1)
g11 = g_build_conv(g10, 512, 4, strides=1)
g12 = g_build_deconv(g11, 256, 4, strides=2)
g13 = g_build_deconv(g12, 128, 4, strides=2)
g14 = g_build_conv(g13, 128, 4, strides=1)
g15 = g_build_conv(g14, 64, 4, strides=1)
g_output = AtrousConvolution2D(3, kernel_size=4, strides=(1, 1), activation='tanh', padding='same',
atrous_rate=(1, 1))(g15)
return Model(g_input, g_output)
set_converter(keras_contrib.layers.InstanceNormalization, convert_InstanceNormalizationLayer)
# Model from https://github.com/bendangnuksung/Image-OutPainting
class TestImageOutPainting(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_ImageOutPainting(self):
K.clear_session()
DCRM = build_discriminator()
GEN = build_generator()
IMAGE = Input(shape=g_input_shape)
GENERATED_IMAGE = GEN(IMAGE)
CONF_GENERATED_IMAGE = DCRM(GENERATED_IMAGE)
keras_model = Model(IMAGE, [CONF_GENERATED_IMAGE, GENERATED_IMAGE])
g_input_shape_batch = (2,) + g_input_shape
data = np.random.rand(*g_input_shape_batch).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files,
atol=1e-2, rtol=1e-2))
if __name__ == "__main__":
unittest.main()
| 5,915 | 32.805714 | 116 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_wgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/wgan/wgan.py
class WGAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Following parameter and optimizer set as recommended in paper
self.n_critic = 5
self.clip_value = 0.01
# Build and compile the critic
self.critic = self.build_critic()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generated imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.critic.trainable = False
# The critic takes generated images as input and determines validity
valid = self.critic(img)
# The combined model (stacked generator and critic)
self.combined = Model(z, valid)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_critic(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1))
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
class TestWGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_WGAN(self):
keras_model = WGAN().combined
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,302 | 31.353383 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_pspnet.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import keras
import keras_segmentation
import numpy as np
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
BatchNormalization = keras.layers.BatchNormalization
Bidirectional = keras.layers.Bidirectional
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
LSTM = keras.layers.LSTM
MaxPooling1D = keras.layers.MaxPooling1D
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
class TestPSPNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def _pool_block(self, feats, pool_factor, IMAGE_ORDERING):
import keras.backend as K
if IMAGE_ORDERING == 'channels_first':
h = K.int_shape(feats)[2]
w = K.int_shape(feats)[3]
elif IMAGE_ORDERING == 'channels_last':
h = K.int_shape(feats)[1]
w = K.int_shape(feats)[2]
pool_size = strides = [int(np.round(float(h) / pool_factor)), int(np.round(float(w) / pool_factor))]
x = AveragePooling2D(pool_size, data_format=IMAGE_ORDERING, strides=strides, padding='same')(feats)
x = Conv2D(512, (1, 1), data_format=IMAGE_ORDERING, padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = keras_segmentation.models.model_utils.resize_image(x, strides, data_format=IMAGE_ORDERING)
return x
def test_pspnet(self):
# From https://github.com/divamgupta/image-segmentation-keras/models/pspnet.py
from keras_segmentation.models.basic_models import vanilla_encoder
img_input, levels = vanilla_encoder(input_height=384, input_width=576)
o = levels[4]
pool_factors = [1, 2, 3, 6]
pool_outs = [o]
IMAGE_ORDERING = 'channels_last'
if IMAGE_ORDERING == 'channels_first':
MERGE_AXIS = 1
elif IMAGE_ORDERING == 'channels_last':
MERGE_AXIS = -1
for p in pool_factors:
pooled = self._pool_block(o, p, IMAGE_ORDERING)
pool_outs.append(pooled)
o = Concatenate(axis=MERGE_AXIS)(pool_outs)
o = Conv2D(512, (1, 1), data_format=IMAGE_ORDERING, use_bias=False)(o)
o = BatchNormalization()(o)
o = Activation('relu')(o)
o = Conv2D(101, (3, 3), data_format=IMAGE_ORDERING, padding='same')(o)
o = keras_segmentation.models.model_utils.resize_image(o, (8, 8), data_format=IMAGE_ORDERING)
model = keras_segmentation.models.model_utils.get_segmentation_model(img_input, o)
model.model_name = "pspnet"
res = run_image(model, self.model_files, img_path, target_size=(384, 576))
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 3,344 | 36.58427 | 108 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_nlp.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras, is_tensorflow_older_than
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Bidirectional = keras.layers.Bidirectional
concatenate = keras.layers.concatenate
Conv1D = keras.layers.Conv1D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
LSTM = keras.layers.LSTM
MaxPooling1D = keras.layers.MaxPooling1D
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
Sequential = keras.models.Sequential
Model = keras.models.Model
class TestNLP(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_addition_rnn(self):
# An implementation of sequence to sequence learning for performing addition
# from https://github.com/keras-team/keras/blob/master/examples/addition_rnn.py
DIGITS = 3
MAXLEN = DIGITS + 1 + DIGITS
HIDDEN_SIZE = 128
BATCH_SIZE = 128
CHARS_LENGTH = 12
for RNN in [keras.layers.LSTM, keras.layers.GRU, keras.layers.SimpleRNN]:
model = keras.models.Sequential()
model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, CHARS_LENGTH)))
model.add(keras.layers.RepeatVector(DIGITS + 1))
model.add(RNN(HIDDEN_SIZE, return_sequences=True))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(CHARS_LENGTH, activation='softmax')))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
x = np.random.rand(BATCH_SIZE, MAXLEN, CHARS_LENGTH).astype(np.float32)
expected = model.predict(x)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_babi_rnn(self):
# two recurrent neural networks based upon a story and a question.
# from https://github.com/keras-team/keras/blob/master/examples/babi_rnn.py
RNN = keras.layers.recurrent.LSTM
EMBED_HIDDEN_SIZE = 50
SENT_HIDDEN_SIZE = 100
QUERY_HIDDEN_SIZE = 100
BATCH_SIZE = 32
story_maxlen = 15
vocab_size = 27
query_maxlen = 17
sentence = Input(shape=(story_maxlen,), dtype='int32')
encoded_sentence = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
encoded_sentence = RNN(SENT_HIDDEN_SIZE)(encoded_sentence)
question = Input(shape=(query_maxlen,), dtype='int32')
encoded_question = Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
encoded_question = RNN(QUERY_HIDDEN_SIZE)(encoded_question)
merged = concatenate([encoded_sentence, encoded_question])
preds = Dense(vocab_size, activation='softmax')(merged)
model = Model([sentence, question], preds)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
x = np.random.randint(5, 10, size=(BATCH_SIZE, story_maxlen)).astype(np.int32)
y = np.random.randint(5, 10, size=(BATCH_SIZE, query_maxlen)).astype(np.int32)
expected = model.predict([x, y])
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {model.input_names[0]: x, model.input_names[1]: y}, expected, self.model_files))
@unittest.skipIf(is_tensorflow_older_than('2.0.0'), "Result is slightly different in tf1")
@unittest.skipIf(get_maximum_opset_supported() < 9,
"None seq_length LSTM is not supported before opset 9.")
def test_imdb_bidirectional_lstm(self):
# A Bidirectional LSTM on the IMDB sentiment classification task.
# from https://github.com/keras-team/keras/blob/master/examples/imdb_bidirectional_lstm.py
max_features = 20000
maxlen = 100
batch_size = 32
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
model.add(Bidirectional(LSTM(64)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
x = np.random.rand(batch_size, maxlen).astype(np.float32)
expected = model.predict(x)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_imdb_cnn_lstm(self):
# A recurrent convolutional network on the IMDB sentiment classification task.
# from https://github.com/keras-team/keras/blob/master/examples/imdb_cnn_lstm.py
max_features = 20000
maxlen = 100
embedding_size = 128
kernel_size = 5
filters = 64
pool_size = 4
lstm_output_size = 70
batch_size = 30
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout(0.25))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
x = np.random.rand(batch_size, maxlen).astype(np.float32)
expected = model.predict(x)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
@unittest.skipIf(get_maximum_opset_supported() < 9,
"None seq_length LSTM is not supported before opset 9.")
def test_imdb_lstm(self):
# An LSTM model on the IMDB sentiment classification task.
# from https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py
max_features = 20000
maxlen = 80
batch_size = 32
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
x = np.random.rand(batch_size, maxlen).astype(np.float32)
expected = model.predict(x)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_lstm_text_generation(self):
# Generate text from Nietzsche's writings.
# from https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py
maxlen = 40
chars_len = 20
batch_size = 32
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, chars_len)))
model.add(Dense(chars_len, activation='softmax'))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
x = np.random.rand(batch_size, maxlen, chars_len).astype(np.float32)
expected = model.predict(x)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
def test_reuters_mlp(self):
# An MLP on the Reuters newswire topic classification task.
# from https://github.com/keras-team/keras/blob/master/examples/reuters_mlp.py
max_words = 1000
batch_size = 32
num_classes = 20
model = Sequential()
model.add(Dense(512, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
x = np.random.rand(batch_size, max_words).astype(np.float32)
expected = model.predict(x)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 8,385 | 42.450777 | 156 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_dual_path_network.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from keras.regularizers import l2
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
add = keras.layers.add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GlobalMaxPooling2D = keras.layers.GlobalMaxPooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def DualPathNetwork(input_shape=None,
initial_conv_filters=64,
depth=[3, 4, 20, 3],
filter_increment=[16, 32, 24, 128],
cardinality=32,
width=3,
weight_decay=0,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000):
img_input = Input(shape=input_shape)
x = _create_dpn(classes, img_input, include_top, initial_conv_filters,
filter_increment, depth, cardinality, width, weight_decay, pooling)
inputs = img_input
model = Model(inputs, x, name='resnext')
return model
def DPN92(input_shape=None,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000):
return DualPathNetwork(input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
pooling=pooling, classes=classes)
def DPN98(input_shape=None,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000):
return DualPathNetwork(input_shape, initial_conv_filters=96, depth=[3, 6, 20, 3], filter_increment=[16, 32, 32, 128],
cardinality=40, width=4, include_top=include_top, weights=weights, input_tensor=input_tensor,
pooling=pooling, classes=classes)
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4):
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4):
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=stride)(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
def _grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
init = input
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
group_list = []
if cardinality == 1:
# with cardinality 1, it is a standard convolution
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides,
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
for c in range(cardinality):
x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
if K.image_data_format() == 'channels_last' else
lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides,
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
group_merge = concatenate(group_list, axis=channel_axis)
group_merge = BatchNormalization(axis=channel_axis)(group_merge)
group_merge = Activation('relu')(group_merge)
return group_merge
def _dual_path_block(input, pointwise_filters_a, grouped_conv_filters_b, pointwise_filters_c,
filter_increment, cardinality, block_type='normal'):
channel_axis = -1
grouped_channels = int(grouped_conv_filters_b / cardinality)
init = concatenate(input, axis=channel_axis) if isinstance(input, list) else input
if block_type == 'projection':
stride = (1, 1)
projection = True
elif block_type == 'downsample':
stride = (2, 2)
projection = True
elif block_type == 'normal':
stride = (1, 1)
projection = False
else:
raise ValueError('`block_type` must be one of ["projection", "downsample", "normal"]. Given %s' % block_type)
if projection:
projection_path = _bn_relu_conv_block(init, filters=pointwise_filters_c + 2 * filter_increment,
kernel=(1, 1), stride=stride)
input_residual_path = Lambda(lambda z: z[:, :, :, :pointwise_filters_c]
if K.image_data_format() == 'channels_last' else
z[:, :pointwise_filters_c, :, :])(projection_path)
input_dense_path = Lambda(lambda z: z[:, :, :, pointwise_filters_c:]
if K.image_data_format() == 'channels_last' else
z[:, pointwise_filters_c:, :, :])(projection_path)
else:
input_residual_path = input[0]
input_dense_path = input[1]
x = _bn_relu_conv_block(init, filters=pointwise_filters_a, kernel=(1, 1))
x = _grouped_convolution_block(x, grouped_channels=grouped_channels, cardinality=cardinality, strides=stride)
x = _bn_relu_conv_block(x, filters=pointwise_filters_c + filter_increment, kernel=(1, 1))
output_residual_path = Lambda(lambda z: z[:, :, :, :pointwise_filters_c]
if K.image_data_format() == 'channels_last' else
z[:, :pointwise_filters_c, :, :])(x)
output_dense_path = Lambda(lambda z: z[:, :, :, pointwise_filters_c:]
if K.image_data_format() == 'channels_last' else
z[:, pointwise_filters_c:, :, :])(x)
residual_path = add([input_residual_path, output_residual_path])
dense_path = concatenate([input_dense_path, output_dense_path], axis=channel_axis)
return [residual_path, dense_path]
def _create_dpn(nb_classes, img_input, include_top, initial_conv_filters,
filter_increment, depth, cardinality=32, width=3, weight_decay=5e-4, pooling=None):
channel_axis = -1
N = list(depth)
base_filters = 256
# block 1 (initial conv block)
x = _initial_conv_block_inception(img_input, initial_conv_filters, weight_decay)
# block 2 (projection block)
filter_inc = filter_increment[0]
filters = int(cardinality * width)
x = _dual_path_block(x, pointwise_filters_a=filters,
grouped_conv_filters_b=filters,
pointwise_filters_c=base_filters,
filter_increment=filter_inc,
cardinality=cardinality,
block_type='projection')
for i in range(N[0] - 1):
x = _dual_path_block(x, pointwise_filters_a=filters,
grouped_conv_filters_b=filters,
pointwise_filters_c=base_filters,
filter_increment=filter_inc,
cardinality=cardinality,
block_type='normal')
# remaining blocks
for k in range(1, len(N)):
filter_inc = filter_increment[k]
filters *= 2
base_filters *= 2
x = _dual_path_block(x, pointwise_filters_a=filters,
grouped_conv_filters_b=filters,
pointwise_filters_c=base_filters,
filter_increment=filter_inc,
cardinality=cardinality,
block_type='downsample')
for i in range(N[k] - 1):
x = _dual_path_block(x, pointwise_filters_a=filters,
grouped_conv_filters_b=filters,
pointwise_filters_c=base_filters,
filter_increment=filter_inc,
cardinality=cardinality,
block_type='normal')
x = concatenate(x, axis=channel_axis)
if include_top:
avg = GlobalAveragePooling2D()(x)
max = GlobalMaxPooling2D()(x)
x = add([avg, max])
x = Lambda(lambda z: 0.5 * z)(x)
x = Dense(nb_classes, use_bias=False, kernel_regularizer=l2(weight_decay),
kernel_initializer='he_normal', activation='softmax')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
elif pooling == 'max-avg':
a = GlobalMaxPooling2D()(x)
b = GlobalAveragePooling2D()(x)
x = add([a, b])
x = Lambda(lambda z: 0.5 * z)(x)
return x
# Model from https://github.com/titu1994/Keras-DualPathNetworks/blob/master/dual_path_network.py
class TestDualPathNetwork(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DPN92(self):
K.clear_session()
keras_model = DPN92(input_shape=(224, 224, 3))
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 11,171 | 38.338028 | 121 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_ocr.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
import tensorflow as tf
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def _repeat(x, num_repeats):
ones = tf.ones((1, num_repeats), dtype='int32')
x = tf.reshape(x, shape=(-1, 1))
x = tf.matmul(x, ones)
return tf.reshape(x, [-1])
def _meshgrid(height, width):
x_linspace = tf.linspace(-1., 1., width)
y_linspace = tf.linspace(-1., 1., height)
x_coordinates, y_coordinates = tf.meshgrid(x_linspace, y_linspace)
x_coordinates = tf.reshape(x_coordinates, shape=(1, -1))
y_coordinates = tf.reshape(y_coordinates, shape=(1, -1))
ones = tf.ones_like(x_coordinates)
indices_grid = tf.concat([x_coordinates, y_coordinates, ones], 0)
return indices_grid
# pylint: disable=too-many-statements
def _transform(inputs):
locnet_x, locnet_y = inputs
output_size = locnet_x.shape[1:]
batch_size = tf.shape(locnet_x)[0]
height = tf.shape(locnet_x)[1]
width = tf.shape(locnet_x)[2]
num_channels = tf.shape(locnet_x)[3]
locnet_y = tf.reshape(locnet_y, shape=(batch_size, 2, 3))
locnet_y = tf.reshape(locnet_y, (-1, 2, 3))
locnet_y = tf.cast(locnet_y, 'float32')
output_height = output_size[0]
output_width = output_size[1]
indices_grid = _meshgrid(output_height, output_width)
indices_grid = tf.expand_dims(indices_grid, 0)
indices_grid = tf.reshape(indices_grid, [-1]) # flatten?
indices_grid = tf.tile(indices_grid, tf.stack([batch_size]))
indices_grid = tf.reshape(indices_grid, tf.stack([batch_size, 3, -1]))
transformed_grid = tf.matmul(locnet_y, indices_grid)
x_s = tf.slice(transformed_grid, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(transformed_grid, [0, 1, 0], [-1, 1, -1])
x = tf.reshape(x_s, [-1])
y = tf.reshape(y_s, [-1])
# Interpolate
height_float = tf.cast(height, dtype='float32')
width_float = tf.cast(width, dtype='float32')
output_height = output_size[0]
output_width = output_size[1]
x = tf.cast(x, dtype='float32')
y = tf.cast(y, dtype='float32')
x = .5 * (x + 1.0) * width_float
y = .5 * (y + 1.0) * height_float
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
max_y = tf.cast(height - 1, dtype='int32')
max_x = tf.cast(width - 1, dtype='int32')
zero = tf.zeros([], dtype='int32')
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
flat_image_dimensions = width * height
pixels_batch = tf.range(batch_size) * flat_image_dimensions
flat_output_dimensions = output_height * output_width
base = _repeat(pixels_batch, flat_output_dimensions)
base_y0 = base + y0 * width
base_y1 = base + y1 * width
indices_a = base_y0 + x0
indices_b = base_y1 + x0
indices_c = base_y0 + x1
indices_d = base_y1 + x1
flat_image = tf.reshape(locnet_x, shape=(-1, num_channels))
flat_image = tf.cast(flat_image, dtype='float32')
pixel_values_a = tf.gather(flat_image, indices_a)
pixel_values_b = tf.gather(flat_image, indices_b)
pixel_values_c = tf.gather(flat_image, indices_c)
pixel_values_d = tf.gather(flat_image, indices_d)
x0 = tf.cast(x0, 'float32')
x1 = tf.cast(x1, 'float32')
y0 = tf.cast(y0, 'float32')
y1 = tf.cast(y1, 'float32')
area_a = tf.expand_dims(((x1 - x) * (y1 - y)), 1)
area_b = tf.expand_dims(((x1 - x) * (y - y0)), 1)
area_c = tf.expand_dims(((x - x0) * (y1 - y)), 1)
area_d = tf.expand_dims(((x - x0) * (y - y0)), 1)
transformed_image = tf.add_n([
area_a * pixel_values_a, area_b * pixel_values_b, area_c * pixel_values_c,
area_d * pixel_values_d
])
# Finished interpolation
transformed_image = tf.reshape(transformed_image,
shape=(batch_size, output_height, output_width, num_channels))
return transformed_image
# Model from https://github.com/faustomorales/keras-ocr
class TestOCR(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_OCR(self):
K.clear_session()
height = 31
width = 200
color = False,
filters = (64, 128, 256, 256, 512, 512, 512)
rnn_units = (128, 128)
dropout = 0.25
rnn_steps_to_discard = 2
pool_size = 2
stn = True
inputs = keras.layers.Input((height, width, 3 if color else 1))
x = keras.layers.Permute((2, 1, 3))(inputs)
x = keras.layers.Lambda(lambda x: x[:, :, ::-1])(x)
x = keras.layers.Conv2D(filters[0], (3, 3), activation='relu', padding='same', name='conv_1')(x)
x = keras.layers.Conv2D(filters[1], (3, 3), activation='relu', padding='same', name='conv_2')(x)
x = keras.layers.Conv2D(filters[2], (3, 3), activation='relu', padding='same', name='conv_3')(x)
x = keras.layers.BatchNormalization(name='bn_3')(x)
x = keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size), name='maxpool_3')(x)
x = keras.layers.Conv2D(filters[3], (3, 3), activation='relu', padding='same', name='conv_4')(x)
x = keras.layers.Conv2D(filters[4], (3, 3), activation='relu', padding='same', name='conv_5')(x)
x = keras.layers.BatchNormalization(name='bn_5')(x)
x = keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size), name='maxpool_5')(x)
x = keras.layers.Conv2D(filters[5], (3, 3), activation='relu', padding='same', name='conv_6')(x)
x = keras.layers.Conv2D(filters[6], (3, 3), activation='relu', padding='same', name='conv_7')(x)
x = keras.layers.BatchNormalization(name='bn_7')(x)
if stn:
stn_input_output_shape = (width // pool_size ** 2, height // pool_size ** 2, filters[6])
stn_input_layer = keras.layers.Input(shape=stn_input_output_shape)
locnet_y = keras.layers.Conv2D(16, (5, 5), padding='same',
activation='relu')(stn_input_layer)
locnet_y = keras.layers.Conv2D(32, (5, 5), padding='same', activation='relu')(locnet_y)
locnet_y = keras.layers.Flatten()(locnet_y)
locnet_y = keras.layers.Dense(64, activation='relu')(locnet_y)
locnet_y = keras.layers.Dense(6,
weights=[
np.zeros((64, 6), dtype='float32'),
np.float32([[1, 0, 0], [0, 1, 0]]).flatten()
])(locnet_y)
localization_net = keras.models.Model(inputs=stn_input_layer, outputs=locnet_y)
x = keras.layers.Lambda(_transform,
output_shape=stn_input_output_shape)([x, localization_net(x)])
x = keras.layers.Reshape(target_shape=(width // pool_size ** 2,
(height // pool_size ** 2) * filters[-1]),
name='reshape')(x)
x = keras.layers.Dense(rnn_units[0], activation='relu', name='fc_9')(x)
rnn_1_forward = keras.layers.LSTM(rnn_units[0],
kernel_initializer="he_normal",
return_sequences=True,
name='lstm_10')(x)
rnn_1_back = keras.layers.LSTM(rnn_units[0],
kernel_initializer="he_normal",
go_backwards=True,
return_sequences=True,
name='lstm_10_back')(x)
rnn_1_add = keras.layers.Add()([rnn_1_forward, rnn_1_back])
rnn_2_forward = keras.layers.LSTM(rnn_units[1],
kernel_initializer="he_normal",
return_sequences=True,
name='lstm_11')(rnn_1_add)
rnn_2_back = keras.layers.LSTM(rnn_units[1],
kernel_initializer="he_normal",
go_backwards=True,
return_sequences=True,
name='lstm_11_back')(rnn_1_add)
x = keras.layers.Concatenate()([rnn_2_forward, rnn_2_back])
backbone = keras.models.Model(inputs=inputs, outputs=x)
x = keras.layers.Dropout(dropout, name='dropout')(x)
x = keras.layers.Dense(129,
kernel_initializer='he_normal',
activation='softmax',
name='fc_12')(x)
x = keras.layers.Lambda(lambda x: x[:, rnn_steps_to_discard:])(x)
keras_model = keras.models.Model(inputs=inputs, outputs=x)
data = np.random.rand(2, 31, 200, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 10,561 | 41.58871 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_sgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras, is_keras_older_than
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/sgan/sgan.py
class SGAN:
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.num_classes = 10
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
noise = Input(shape=(100,))
img = self.generator(noise)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid, _ = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains generator to fool discriminator
self.combined = Model(noise, valid)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(1, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
img = Input(shape=self.img_shape)
features = model(img)
valid = Dense(1, activation="sigmoid")(features)
label = Dense(self.num_classes+1, activation="softmax")(features)
return Model(img, [valid, label])
class TestSGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(is_keras_older_than("2.2.4"),
"keras version older than 2.2.4 not supported for SGAN")
def test_SGAN(self):
keras_model = SGAN().combined
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,549 | 32.703704 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_infogan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/infogan/infogan.py
class INFOGAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.num_classes = 10
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 72
# Build and the discriminator and recognition network
self.discriminator, self.auxilliary = self.build_disk_and_q_net()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
gen_input = Input(shape=(self.latent_dim,))
img = self.generator(gen_input)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
valid = self.discriminator(img)
# The recognition network produces the label
target_label = self.auxilliary(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(gen_input, [valid, target_label])
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
class TestInfoGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_InfoGAN(self):
keras_model = INFOGAN().combined
x = np.random.rand(5, 72).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,941 | 34.049645 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_aae.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras, is_tf_keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
add = keras.layers.add
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
import keras.backend as K
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/aae/aae.py
class AdversarialAutoencoder():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 10
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the encoder / decoder
self.encoder = self.build_encoder()
self.decoder = self.build_decoder()
img = Input(shape=self.img_shape)
# The generator takes the image, encodes it and reconstructs it
# from the encoding
encoded_repr = self.encoder(img)
reconstructed_img = self.decoder(encoded_repr)
# For the adversarial_autoencoder model we will only train the generator
self.discriminator.trainable = False
# The discriminator determines validity of the encoding
validity = self.discriminator(encoded_repr)
# The adversarial_autoencoder model (stacked generator and discriminator)
self.adversarial_autoencoder = Model(img, [reconstructed_img, validity])
def build_encoder(self):
# Encoder
img = Input(shape=self.img_shape)
h = Flatten()(img)
h = Dense(512)(h)
h = LeakyReLU(alpha=0.2)(h)
h = Dense(512)(h)
h = LeakyReLU(alpha=0.2)(h)
mu = Dense(self.latent_dim)(h)
log_var = Dense(self.latent_dim)(h)
# merge is deprecated, use add to replace it.
latent_repr = add([mu, log_var])
return Model(img, latent_repr)
def build_decoder(self):
model = Sequential()
model.add(Dense(512, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
z = Input(shape=(self.latent_dim,))
img = model(z)
return Model(z, img)
def build_discriminator(self):
model = Sequential()
model.add(Dense(512, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation="sigmoid"))
encoded_repr = Input(shape=(self.latent_dim,))
validity = model(encoded_repr)
return Model(encoded_repr, validity)
class TestAdversarialAutoencoder(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_AdversarialAutoencoder(self):
keras_model = AdversarialAutoencoder().adversarial_autoencoder
x = np.random.rand(5, 28, 28, 1).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,067 | 30.292308 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_bgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras, is_tf_keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/bgan/bgan.py
class BGAN():
"""Reference: https://wiseodd.github.io/techblog/2017/03/07/boundary-seeking-gan/"""
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generated imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, valid)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
class TestBGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_BGAN(self):
keras_model = BGAN().combined
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 3,634 | 30.336207 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_inception_v4.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from keras.applications import VGG19
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime, test_level_0
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Convolution2D = keras.layers.Convolution2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
Multiply = keras.layers.Multiply
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
K = keras.backend
# Model from https://github.com/titu1994/Inception-v4
def conv_block(x, nb_filter, nb_row, nb_col, border_mode='same', subsample=(1, 1), bias=False):
channel_axis = -1
x = Convolution2D(nb_filter, (nb_row, nb_col), strides=subsample, padding=border_mode, use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
def inception_stem(input):
channel_axis = -1
# Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)
x = conv_block(input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv_block(x, 32, 3, 3, border_mode='valid')
x = conv_block(x, 64, 3, 3)
x1 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)
x2 = conv_block(x, 96, 3, 3, subsample=(2, 2), border_mode='valid')
x = concatenate([x1, x2], axis=channel_axis)
x1 = conv_block(x, 64, 1, 1)
x1 = conv_block(x1, 96, 3, 3, border_mode='valid')
x2 = conv_block(x, 64, 1, 1)
x2 = conv_block(x2, 64, 1, 7)
x2 = conv_block(x2, 64, 7, 1)
x2 = conv_block(x2, 96, 3, 3, border_mode='valid')
x = concatenate([x1, x2], axis=channel_axis)
x1 = conv_block(x, 192, 3, 3, subsample=(2, 2), border_mode='valid')
x2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)
x = concatenate([x1, x2], axis=channel_axis)
return x
def inception_A(input):
channel_axis = -1
a1 = conv_block(input, 96, 1, 1)
a2 = conv_block(input, 64, 1, 1)
a2 = conv_block(a2, 96, 3, 3)
a3 = conv_block(input, 64, 1, 1)
a3 = conv_block(a3, 96, 3, 3)
a3 = conv_block(a3, 96, 3, 3)
a4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
a4 = conv_block(a4, 96, 1, 1)
m = concatenate([a1, a2, a3, a4], axis=channel_axis)
return m
def inception_B(input):
channel_axis = -1
b1 = conv_block(input, 384, 1, 1)
b2 = conv_block(input, 192, 1, 1)
b2 = conv_block(b2, 224, 1, 7)
b2 = conv_block(b2, 256, 7, 1)
b3 = conv_block(input, 192, 1, 1)
b3 = conv_block(b3, 192, 7, 1)
b3 = conv_block(b3, 224, 1, 7)
b3 = conv_block(b3, 224, 7, 1)
b3 = conv_block(b3, 256, 1, 7)
b4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
b4 = conv_block(b4, 128, 1, 1)
m = concatenate([b1, b2, b3, b4], axis=channel_axis)
return m
def inception_C(input):
channel_axis = -1
c1 = conv_block(input, 256, 1, 1)
c2 = conv_block(input, 384, 1, 1)
c2_1 = conv_block(c2, 256, 1, 3)
c2_2 = conv_block(c2, 256, 3, 1)
c2 = concatenate([c2_1, c2_2], axis=channel_axis)
c3 = conv_block(input, 384, 1, 1)
c3 = conv_block(c3, 448, 3, 1)
c3 = conv_block(c3, 512, 1, 3)
c3_1 = conv_block(c3, 256, 1, 3)
c3_2 = conv_block(c3, 256, 3, 1)
c3 = concatenate([c3_1, c3_2], axis=channel_axis)
c4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
c4 = conv_block(c4, 256, 1, 1)
m = concatenate([c1, c2, c3, c4], axis=channel_axis)
return m
def reduction_A(input):
channel_axis = -1
r1 = conv_block(input, 384, 3, 3, subsample=(2, 2), border_mode='valid')
r2 = conv_block(input, 192, 1, 1)
r2 = conv_block(r2, 224, 3, 3)
r2 = conv_block(r2, 256, 3, 3, subsample=(2, 2), border_mode='valid')
r3 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)
m = concatenate([r1, r2, r3], axis=channel_axis)
return m
def reduction_B(input):
channel_axis = -1
r1 = conv_block(input, 192, 1, 1)
r1 = conv_block(r1, 192, 3, 3, subsample=(2, 2), border_mode='valid')
r2 = conv_block(input, 256, 1, 1)
r2 = conv_block(r2, 256, 1, 7)
r2 = conv_block(r2, 320, 7, 1)
r2 = conv_block(r2, 320, 3, 3, subsample=(2, 2), border_mode='valid')
r3 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)
m = concatenate([r1, r2, r3], axis=channel_axis)
return m
def create_inception_v4(nb_classes=1001):
'''
Creates a inception v4 network
:param nb_classes: number of classes.txt
:return: Keras Model with 1 input and 1 output
'''
init = Input((299, 299, 3))
# Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
x = inception_stem(init)
# 4 x Inception A
for i in range(4):
x = inception_A(x)
# Reduction A
x = reduction_A(x)
# 7 x Inception B
for i in range(7):
x = inception_B(x)
# Reduction B
x = reduction_B(x)
# 3 x Inception C
for i in range(3):
x = inception_C(x)
# Average Pooling
x = AveragePooling2D((8, 8))(x)
# Dropout
x = Dropout(0.8)(x)
x = Flatten()(x)
# Output
out = Dense(activation='softmax', units=nb_classes)(x)
model = Model(init, out, name='Inception-v4')
return model
# Model from https://github.com/titu1994/Inception-v4
class TestInceptionV4(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_inception_v4(self):
K.clear_session()
keras_model = create_inception_v4()
data = np.random.rand(2, 299, 299, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 6,729 | 26.469388 | 108 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_cyclegan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import keras_contrib
import numpy as np
from mock_keras2onnx import set_converter
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime, convert_InstanceNormalizationLayer
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/cyclegan/cyclegan.py
class CycleGAN():
def __init__(self):
# Input shape
self.img_rows = 128
self.img_cols = 128
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 32
self.df = 64
# Loss weights
self.lambda_cycle = 10.0 # Cycle-consistency loss
self.lambda_id = 0.1 * self.lambda_cycle # Identity loss
# Build and compile the discriminators
self.d_A = self.build_discriminator()
self.d_B = self.build_discriminator()
#-------------------------
# Construct Computational
# Graph of Generators
#-------------------------
# Build the generators
self.g_AB = self.build_generator()
self.g_BA = self.build_generator()
# Input images from both domains
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Translate images to the other domain
fake_B = self.g_AB(img_A)
fake_A = self.g_BA(img_B)
# Translate images back to original domain
reconstr_A = self.g_BA(fake_B)
reconstr_B = self.g_AB(fake_A)
# Identity mapping of images
img_A_id = self.g_BA(img_A)
img_B_id = self.g_AB(img_B)
# For the combined model we will only train the generators
self.d_A.trainable = False
self.d_B.trainable = False
# Discriminators determines validity of translated images
valid_A = self.d_A(fake_A)
valid_B = self.d_B(fake_B)
# Combined model trains generators to fool discriminators
self.combined = Model(inputs=[img_A, img_B],
outputs=[ valid_A, valid_B,
reconstr_A, reconstr_B,
img_A_id, img_B_id ])
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
d = InstanceNormalization()(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = InstanceNormalization()(u)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(d0, self.gf)
d2 = conv2d(d1, self.gf*2)
d3 = conv2d(d2, self.gf*4)
d4 = conv2d(d3, self.gf*8)
# Upsampling
u1 = deconv2d(d4, d3, self.gf*4)
u2 = deconv2d(u1, d2, self.gf*2)
u3 = deconv2d(u2, d1, self.gf)
u4 = UpSampling2D(size=2)(u3)
output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)
return Model(d0, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
d1 = d_layer(img, self.df, normalization=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model(img, validity)
set_converter(keras_contrib.layers.InstanceNormalization, convert_InstanceNormalizationLayer)
class TestCycleGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_CycleGAN(self):
keras_model = CycleGAN().combined
batch = 5
x = np.random.rand(batch, 128, 128, 3).astype(np.float32)
y = np.random.rand(batch, 128, 128, 3).astype(np.float32)
expected = keras_model.predict([x, y])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files, rtol=1.e-2, atol=5.e-3))
if __name__ == "__main__":
unittest.main()
| 6,164 | 33.060773 | 192 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_discogan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import keras_contrib
import numpy as np
from mock_keras2onnx import set_converter
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime, convert_InstanceNormalizationLayer
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/discogan/discogan.py
class DiscoGAN():
def __init__(self):
# Input shape
self.img_rows = 128
self.img_cols = 128
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
# Build and compile the discriminators
self.d_A = self.build_discriminator()
self.d_B = self.build_discriminator()
#-------------------------
# Construct Computational
# Graph of Generators
#-------------------------
# Build the generators
self.g_AB = self.build_generator()
self.g_BA = self.build_generator()
# Input images from both domains
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Translate images to the other domain
fake_B = self.g_AB(img_A)
fake_A = self.g_BA(img_B)
# Translate images back to original domain
reconstr_A = self.g_BA(fake_B)
reconstr_B = self.g_AB(fake_A)
# For the combined model we will only train the generators
self.d_A.trainable = False
self.d_B.trainable = False
# Discriminators determines validity of translated images
valid_A = self.d_A(fake_A)
valid_B = self.d_B(fake_B)
# Objectives
# + Adversarial: Fool domain discriminators
# + Translation: Minimize MAE between e.g. fake B and true B
# + Cycle-consistency: Minimize MAE between reconstructed images and original
self.combined = Model(inputs=[img_A, img_B],
outputs=[ valid_A, valid_B,
fake_B, fake_A,
reconstr_A, reconstr_B ])
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4, normalize=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalize:
d = InstanceNormalization()(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = InstanceNormalization()(u)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(d0, self.gf, normalize=False)
d2 = conv2d(d1, self.gf*2)
d3 = conv2d(d2, self.gf*4)
d4 = conv2d(d3, self.gf*8)
d5 = conv2d(d4, self.gf*8)
d6 = conv2d(d5, self.gf*8)
d7 = conv2d(d6, self.gf*8)
# Upsampling
u1 = deconv2d(d7, d6, self.gf*8)
u2 = deconv2d(u1, d5, self.gf*8)
u3 = deconv2d(u2, d4, self.gf*8)
u4 = deconv2d(u3, d3, self.gf*4)
u5 = deconv2d(u4, d2, self.gf*2)
u6 = deconv2d(u5, d1, self.gf)
u7 = UpSampling2D(size=2)(u6)
output_img = Conv2D(self.channels, kernel_size=4, strides=1,
padding='same', activation='tanh')(u7)
return Model(d0, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
d1 = d_layer(img, self.df, normalization=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model(img, validity)
set_converter(keras_contrib.layers.InstanceNormalization, convert_InstanceNormalizationLayer)
class TestDiscoGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_DiscoGAN(self):
keras_model = DiscoGAN().combined
batch = 5
x = np.random.rand(batch, 128, 128, 3).astype(np.float32)
y = np.random.rand(batch, 128, 128, 3).astype(np.float32)
expected = keras_model.predict([x, y])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files, rtol=1.e-2, atol=1.e-2))
if __name__ == "__main__":
unittest.main()
| 6,363 | 33.4 | 192 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_se_inc_resnet.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from mock_keras2onnx.proto.tfcompat import is_tf2
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
from keras_applications.imagenet_utils import _obtain_input_shape
K = keras.backend
is_keras_tensor = K.is_keras_tensor
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GlobalMaxPooling2D = keras.layers.GlobalMaxPooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def squeeze_excite_block(input_tensor, ratio=16):
init = input_tensor
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
x = Conv2D(filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(x)
if not use_bias:
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else '{name}_bn'.format(name=name)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else '{name}_ac'.format(name=name)
x = Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: {block_type}'.format(block_type=block_type))
block_name = '{block_type}_{block_idx}'.format(block_type=block_type, block_idx=block_idx)
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
mixed = Concatenate(branches, axis=channel_axis, name='{block_name}_mixed'.format(block_name=block_name))
up = conv2d_bn(mixed,
K.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name='{block_name}_conv'.format(block_name=block_name))
x = Lambda(lambda inputs, scale_: inputs[0] + inputs[1] * scale_,
output_shape=K.int_shape(x)[1:],
arguments={'scale_': scale},
name=block_name)([x, up])
if activation is not None:
x = Activation(activation, name='{block_name}_ac'.format(block_name=block_name))(x)
# squeeze and excite block
x = squeeze_excite_block(x)
return x
def SEInceptionResNetV2(include_top=True,
weights=None,
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(branches, axis=channel_axis, name='mixed_5b')
# squeeze and excite block
x = squeeze_excite_block(x)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(x,
scale=0.17,
block_type='block35',
block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(branches, axis=channel_axis, name='mixed_6a')
# squeeze and excite block
x = squeeze_excite_block(x)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(x,
scale=0.1,
block_type='block17',
block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(branches, axis=channel_axis, name='mixed_7a')
# squeeze and excite block
x = squeeze_excite_block(x)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(x,
scale=0.2,
block_type='block8',
block_idx=block_idx)
x = inception_resnet_block(x,
scale=1.,
activation=None,
block_type='block8',
block_idx=10)
# squeeze and excite block
x = squeeze_excite_block(x)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
inputs = img_input
# Create model
model = Model(inputs, x, name='se_inception_resnet_v2')
return model
# Model from https://github.com/titu1994/keras-squeeze-excite-network
class TestSEInceptionResNetV2(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0 or not is_tf2,
"Test level 0 only.")
def test_SE_InceptionResNetV2(self):
K.clear_session()
keras_model = SEInceptionResNetV2()
data = np.random.rand(2, 128, 128, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 10,376 | 35.797872 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_densenet_2.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import keras_segmentation
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
from mock_keras2onnx.proto import is_keras_older_than
class TestDenseNet_2(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_densenet(self):
# From https://github.com/tdeboissiere/DeepLearningImplementations/blob/master/DenseNet/densenet.py
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../model_source/densenet_2/'))
import densenet_2
model = densenet_2.DenseNet(20,
(224, 224, 3),
4,
1,
1,
nb_filter=10)
res = run_image(model, self.model_files, img_path, target_size=(224, 224))
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 1,258 | 29.707317 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_open_face.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras, is_tf_keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
import tensorflow as tf
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
K = keras.backend
# From https://github.com/serengil/deepface/blob/master/deepface/basemodels/OpenFace.py
def loadModel():
myInput = Input(shape=(96, 96, 3))
x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_1')(x)
x = Conv2D(64, (1, 1), name='conv2')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), name='conv3')(x)
x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
x = Activation('relu')(x)
x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x) #x is equal added
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D(pool_size=3, strides=2)(x)
# Inception3a
inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
inception_3a_3x3 = Conv2D(128, (3, 3), name='inception_3a_3x3_conv2')(inception_3a_3x3)
inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
inception_3a_5x5 = Conv2D(32, (5, 5), name='inception_3a_5x5_conv2')(inception_3a_5x5)
inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
inception_3a_pool = Conv2D(32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_pool_bn')(inception_3a_pool)
inception_3a_pool = Activation('relu')(inception_3a_pool)
inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
inception_3a_1x1 = Activation('relu')(inception_3a_1x1)
inception_3a = concatenate([inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3)
# Inception3b
inception_3b_3x3 = Conv2D(96, (1, 1), name='inception_3b_3x3_conv1')(inception_3a)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
inception_3b_3x3 = Conv2D(128, (3, 3), name='inception_3b_3x3_conv2')(inception_3b_3x3)
inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
inception_3b_5x5 = Conv2D(32, (1, 1), name='inception_3b_5x5_conv1')(inception_3a)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
inception_3b_5x5 = Conv2D(64, (5, 5), name='inception_3b_5x5_conv2')(inception_3b_5x5)
inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: x*9, name='mult9_3b')(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_3b')(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_pool_bn')(inception_3b_pool)
inception_3b_pool = Activation('relu')(inception_3b_pool)
inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
inception_3b_1x1 = Conv2D(64, (1, 1), name='inception_3b_1x1_conv')(inception_3a)
inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
inception_3b_1x1 = Activation('relu')(inception_3b_1x1)
inception_3b = concatenate([inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3)
# Inception3c
inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name='inception_3c_3x3_conv1')(inception_3b)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn1')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name='inception_3c_3x3_conv'+'2')(inception_3c_3x3)
inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn'+'2')(inception_3c_3x3)
inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name='inception_3c_5x5_conv1')(inception_3b)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn1')(inception_3c_5x5)
inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name='inception_3c_5x5_conv'+'2')(inception_3c_5x5)
inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn'+'2')(inception_3c_5x5)
inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)
#inception 4a
inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name='inception_4a_3x3_conv'+'1')(inception_3c)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'1')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name='inception_4a_3x3_conv'+'2')(inception_4a_3x3)
inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'2')(inception_4a_3x3)
inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
inception_4a_5x5 = Conv2D(32, (1,1), strides=(1,1), name='inception_4a_5x5_conv1')(inception_3c)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn1')(inception_4a_5x5)
inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
inception_4a_5x5 = ZeroPadding2D(padding=(2,2))(inception_4a_5x5)
inception_4a_5x5 = Conv2D(64, (5,5), strides=(1,1), name='inception_4a_5x5_conv'+'2')(inception_4a_5x5)
inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn'+'2')(inception_4a_5x5)
inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c)
inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: x*9, name='mult9_4a')(inception_4a_pool)
inception_4a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_4a')(inception_4a_pool)
inception_4a_pool = Conv2D(128, (1,1), strides=(1,1), name='inception_4a_pool_conv'+'')(inception_4a_pool)
inception_4a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_pool_bn'+'')(inception_4a_pool)
inception_4a_pool = Activation('relu')(inception_4a_pool)
inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name='inception_4a_1x1_conv'+'')(inception_3c)
inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_1x1_bn'+'')(inception_4a_1x1)
inception_4a_1x1 = Activation('relu')(inception_4a_1x1)
inception_4a = concatenate([inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3)
#inception4e
inception_4e_3x3 = Conv2D(160, (1,1), strides=(1,1), name='inception_4e_3x3_conv'+'1')(inception_4a)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'1')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
inception_4e_3x3 = Conv2D(256, (3,3), strides=(2,2), name='inception_4e_3x3_conv'+'2')(inception_4e_3x3)
inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'2')(inception_4e_3x3)
inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
inception_4e_5x5 = Conv2D(64, (1,1), strides=(1,1), name='inception_4e_5x5_conv'+'1')(inception_4a)
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'1')(inception_4e_5x5)
inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
inception_4e_5x5 = Conv2D(128, (5,5), strides=(2,2), name='inception_4e_5x5_conv'+'2')(inception_4e_5x5)
inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'2')(inception_4e_5x5)
inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)
#inception5a
inception_5a_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_3x3_conv'+'1')(inception_4e)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'1')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
inception_5a_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5a_3x3_conv'+'2')(inception_5a_3x3)
inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'2')(inception_5a_3x3)
inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e)
inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: x*9, name='mult9_5a')(inception_5a_pool)
inception_5a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_5a')(inception_5a_pool)
inception_5a_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_pool_conv'+'')(inception_5a_pool)
inception_5a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_pool_bn'+'')(inception_5a_pool)
inception_5a_pool = Activation('relu')(inception_5a_pool)
inception_5a_pool = ZeroPadding2D(padding=(1,1))(inception_5a_pool)
inception_5a_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5a_1x1_conv'+'')(inception_4e)
inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_1x1_bn'+'')(inception_5a_1x1)
inception_5a_1x1 = Activation('relu')(inception_5a_1x1)
inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)
#inception_5b
inception_5b_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_3x3_conv'+'1')(inception_5a)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'1')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5b_3x3 = ZeroPadding2D(padding=(1,1))(inception_5b_3x3)
inception_5b_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5b_3x3_conv'+'2')(inception_5b_3x3)
inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'2')(inception_5b_3x3)
inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
inception_5b_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_pool_conv'+'')(inception_5b_pool)
inception_5b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_pool_bn'+'')(inception_5b_pool)
inception_5b_pool = Activation('relu')(inception_5b_pool)
inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
inception_5b_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5b_1x1_conv'+'')(inception_5a)
inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_1x1_bn'+'')(inception_5b_1x1)
inception_5b_1x1 = Activation('relu')(inception_5b_1x1)
inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)
av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
reshape_layer = Flatten()(av_pool)
dense_layer = Dense(128, name='dense_layer')(reshape_layer)
norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name='norm_layer')(dense_layer)
# Final Model
model = Model(inputs=[myInput], outputs=norm_layer)
return model
class TestOpenFace(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_OpenFace(self):
keras_model = loadModel()
x = np.random.rand(2, 96, 96, 3).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files, rtol=5e-3, atol=5e-6))
if __name__ == "__main__":
unittest.main()
| 15,525 | 55.871795 | 129 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_semantic_segmentation.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
import math
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from mock_keras2onnx.proto import keras
from keras.initializers import RandomNormal
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
import tensorflow as tf
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
add = keras.layers.add
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Conv2DTranspose = keras.layers.Conv2DTranspose
DepthwiseConv2D = keras.layers.DepthwiseConv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
Multiply = keras.layers.Multiply
multiply = keras.layers.multiply
Permute = keras.layers.Permute
PReLU = keras.layers.PReLU
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
SpatialDropout2D = keras.layers.SpatialDropout2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def ConvBNLayer(x, out_channels, kernel_size, stride=1, dilation=1, act=True):
x = Conv2D(out_channels, kernel_size, strides=stride,
padding='same', dilation_rate=dilation)(x)
x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x)
if act:
return Activation('relu')(x)
else:
return x
def ACBlock(x, out_channels, kernel_size, stride=1, padding=0, dilation=1,
groups=1, deploy=False):
if deploy:
return Conv2D(out_channels, (kernel_size, kernel_size), strides=stride,
dilation_rate=dilation, use_bias=True, padding='same')(x)
else:
square_outputs = Conv2D(out_channels, (kernel_size, kernel_size), strides=stride,
dilation_rate=dilation, use_bias=False, padding='same')(x)
square_outputs = BatchNormalization(epsilon=1e-5, momentum=0.1)(square_outputs)
center_offset_from_origin_border = padding - kernel_size // 2
ver_pad_or_crop = (padding, center_offset_from_origin_border)
hor_pad_or_crop = (center_offset_from_origin_border, padding)
if center_offset_from_origin_border >= 0:
vertical_outputs = x
ver_conv_padding = ver_pad_or_crop
horizontal_outputs = x
hor_conv_padding = hor_pad_or_crop
else:
vertical_outputs = x
ver_conv_padding = (0, 0)
horizontal_outputs = x
hor_conv_padding = (0, 0)
vertical_outputs = ZeroPadding2D(padding=ver_conv_padding)(vertical_outputs)
vertical_outputs = Conv2D(out_channels, kernel_size=(kernel_size, 1),
strides=stride, padding='same', use_bias=False,
dilation_rate=dilation)(vertical_outputs)
vertical_outputs = BatchNormalization(epsilon=1e-5, momentum=0.1)(vertical_outputs)
horizontal_outputs = ZeroPadding2D(padding=hor_conv_padding)(horizontal_outputs)
horizontal_outputs = Conv2D(out_channels, kernel_size=(kernel_size, 1),
strides=stride, padding='same', use_bias=False,
dilation_rate=dilation)(horizontal_outputs)
horizontal_outputs = BatchNormalization(epsilon=1e-5, momentum=0.1)(horizontal_outputs)
results = Add()([square_outputs, vertical_outputs, horizontal_outputs])
return results
def BasicBlock(x, out_channels, stride=1, downsample=False):
residual = x
x = ACBlock(x, out_channels, kernel_size=3, stride=stride)
x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x)
x = Activation('relu')(x)
x = ACBlock(x, out_channels, kernel_size=3)
x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x)
if downsample:
shortcut = ConvBNLayer(residual, out_channels, kernel_size=1, stride=stride)
outputs = Add()([x, shortcut])
else:
outputs = Add()([x, residual])
return Activation('relu')(outputs)
def BottleNeckBlock(x, out_channels, stride=1, downsample=False):
expansion = 4
residual = x
x = ConvBNLayer(x, out_channels, kernel_size=1, act=True)
x = ACBlock(x, out_channels, kernel_size=3, stride=stride)
x = ConvBNLayer(x, out_channels * expansion, kernel_size=1, act=False)
if downsample:
shortcut_tensor = ConvBNLayer(residual, out_channels * 4, kernel_size=1, stride=stride)
else:
shortcut_tensor = residual
outputs = Add()([x, shortcut_tensor])
return Activation('relu')(outputs)
def ResNet(x, block_type, layers_repeat, class_dim=1000):
num_filters = [64, 128, 256, 512]
x = ConvBNLayer(x, 64, kernel_size=7, stride=2, act=True)
x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)
for block in range(4):
downsample = True
for i in range(layers_repeat[block]):
x = block_type(x, num_filters[block], stride=2 if i == 0 and block != 0 else 1, downsample=downsample)
downsample = False
pool = GlobalAveragePooling2D()(x)
output = Dense(class_dim, activation='relu')(pool)
return output
def ResACNet(x, class_dim=1000, depth=50):
assert depth in [10, 18, 34, 50, 101, 152, 200]
if depth == 10:
output = ResNet(x, BasicBlock, [1, 1, 1, 1], class_dim)
elif depth == 18:
output = ResNet(x, BasicBlock, [2, 2, 2, 2], class_dim)
elif depth == 34:
output = ResNet(x, BasicBlock, [3, 4, 6, 3], class_dim)
elif depth == 50:
output = ResNet(x, BottleNeckBlock, [3, 4, 6, 3], class_dim)
elif depth == 101:
output = ResNet(x, BottleNeckBlock, [3, 4, 23, 3], class_dim)
elif depth == 152:
output = ResNet(x, BottleNeckBlock, [3, 8, 36, 3], class_dim)
elif depth == 200:
output = ResNet(x, BottleNeckBlock, [3, 24, 36, 3], class_dim)
return output
def conv_block(input, filters):
out = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='same')(input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def up_conv(input, filters):
out = UpSampling2D()(input)
out = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def Attention_block(input1, input2, filters):
g1 = Conv2D(filters, kernel_size=1, strides=1, padding='same')(input1)
g1 = BatchNormalization()(g1)
x1 = Conv2D(filters, kernel_size=1, strides=1, padding='same')(input2)
x1 = BatchNormalization()(x1)
psi = Activation('relu')(add([g1, x1]))
psi = Conv2D(filters, kernel_size=1, strides=1, padding='same')(psi)
psi = BatchNormalization()(psi)
psi = Activation('sigmoid')(psi)
out = multiply([input2, psi])
return out
def AttUNet(nClasses, input_height=224, input_width=224):
inputs = Input(shape=(input_height, input_width, 3))
n1 = 32
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
e1 = conv_block(inputs, filters[0])
e2 = MaxPooling2D(strides=2)(e1)
e2 = conv_block(e2, filters[1])
e3 = MaxPooling2D(strides=2)(e2)
e3 = conv_block(e3, filters[2])
e4 = MaxPooling2D(strides=2)(e3)
e4 = conv_block(e4, filters[3])
e5 = MaxPooling2D(strides=2)(e4)
e5 = conv_block(e5, filters[4])
d5 = up_conv(e5, filters[3])
x4 = Attention_block(d5, e4, filters[3])
d5 = Concatenate()([x4, d5])
d5 = conv_block(d5, filters[3])
d4 = up_conv(d5, filters[2])
x3 = Attention_block(d4, e3, filters[2])
d4 = Concatenate()([x3, d4])
d4 = conv_block(d4, filters[2])
d3 = up_conv(d4, filters[1])
x2 = Attention_block(d3, e2, filters[1])
d3 = Concatenate()([x2, d3])
d3 = conv_block(d3, filters[1])
d2 = up_conv(d3, filters[0])
x1 = Attention_block(d2, e1, filters[0])
d2 = Concatenate()([x1, d2])
d2 = conv_block(d2, filters[0])
o = Conv2D(nClasses, (3, 3), padding='same')(d2)
outputHeight = Model(inputs, o).output_shape[1]
outputWidth = Model(inputs, o).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(o)
out = Activation('softmax')(out)
model = Model(input=inputs, output=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
class BilinearUpsampling(keras.layers.Layer):
def __init__(self, upsampling=(2, 2), data_format=None, **kwargs):
super(BilinearUpsampling, self).__init__(**kwargs)
self.upsampling = keras.utils.conv_utils.normalize_tuple(upsampling, 2, 'size')
self.input_spec = keras.layers.InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
height = self.upsampling[0] * \
input_shape[1] if input_shape[1] is not None else None
width = self.upsampling[1] * \
input_shape[2] if input_shape[2] is not None else None
return (input_shape[0], height, width, input_shape[3])
def call(self, inputs):
# .tf
return tf.image.resize_bilinear(inputs, (int(inputs.shape[1] * self.upsampling[0]),
int(inputs.shape[2] * self.upsampling[1])))
def get_config(self):
config = {'size': self.upsampling, 'data_format': self.data_format}
base_config = super(BilinearUpsampling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def xception_downsample_block(x, channels, top_relu=False):
##separable conv1
if top_relu:
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
##separable conv2
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
##separable conv3
x = DepthwiseConv2D((3, 3), strides=(2, 2), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
return x
def res_xception_downsample_block(x, channels):
res = Conv2D(channels, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
res = BatchNormalization()(res)
x = xception_downsample_block(x, channels)
x = add([x, res])
return x
def xception_block(x, channels):
##separable conv1
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
##separable conv2
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
##separable conv3
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
return x
def res_xception_block(x, channels):
res = x
x = xception_block(x, channels)
x = add([x, res])
return x
def aspp(x, input_shape, out_stride):
b0 = Conv2D(256, (1, 1), padding="same", use_bias=False)(x)
b0 = BatchNormalization()(b0)
b0 = Activation("relu")(b0)
b1 = DepthwiseConv2D((3, 3), dilation_rate=(6, 6), padding="same", use_bias=False)(x)
b1 = BatchNormalization()(b1)
b1 = Activation("relu")(b1)
b1 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b1)
b1 = BatchNormalization()(b1)
b1 = Activation("relu")(b1)
b2 = DepthwiseConv2D((3, 3), dilation_rate=(12, 12), padding="same", use_bias=False)(x)
b2 = BatchNormalization()(b2)
b2 = Activation("relu")(b2)
b2 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b2)
b2 = BatchNormalization()(b2)
b2 = Activation("relu")(b2)
b3 = DepthwiseConv2D((3, 3), dilation_rate=(12, 12), padding="same", use_bias=False)(x)
b3 = BatchNormalization()(b3)
b3 = Activation("relu")(b3)
b3 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b3)
b3 = BatchNormalization()(b3)
b3 = Activation("relu")(b3)
out_shape = int(input_shape[0] / out_stride)
b4 = AveragePooling2D(pool_size=(out_shape, out_shape))(x)
b4 = Conv2D(256, (1, 1), padding="same", use_bias=False)(b4)
b4 = BatchNormalization()(b4)
b4 = Activation("relu")(b4)
b4 = BilinearUpsampling((out_shape, out_shape))(b4)
x = Concatenate()([b4, b0, b1, b2, b3])
return x
def DeeplabV3_plus(nClasses=21, input_height=512, input_width=512, out_stride=16):
img_input = Input(shape=(input_height, input_width, 3))
x = Conv2D(32, (3, 3), strides=(2, 2), padding="same", use_bias=False)(img_input)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(64, (3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = res_xception_downsample_block(x, 128)
res = Conv2D(256, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
res = BatchNormalization()(res)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x)
skip = BatchNormalization()(x)
x = Activation("relu")(skip)
x = DepthwiseConv2D((3, 3), strides=(2, 2), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = add([x, res])
x = xception_downsample_block(x, 728, top_relu=True)
for i in range(16):
x = res_xception_block(x, 728)
res = Conv2D(1024, (1, 1), padding="same", use_bias=False)(x)
res = BatchNormalization()(res)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(728, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(1024, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(1024, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = add([x, res])
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(1536, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(1536, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Conv2D(2048, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# aspp
x = aspp(x, (input_height, input_width, 3), out_stride)
x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Dropout(0.9)(x)
##decoder
x = BilinearUpsampling((4, 4))(x)
dec_skip = Conv2D(48, (1, 1), padding="same", use_bias=False)(skip)
dec_skip = BatchNormalization()(dec_skip)
dec_skip = Activation("relu")(dec_skip)
x = Concatenate()([x, dec_skip])
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(256, (1, 1), padding="same", use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(nClasses, (1, 1), padding="same")(x)
x = BilinearUpsampling((4, 4))(x)
outputHeight = Model(img_input, x).output_shape[1]
outputWidth = Model(img_input, x).output_shape[2]
x = (Reshape((outputHeight * outputWidth, nClasses)))(x)
x = Activation('softmax')(x)
model = Model(input=img_input, output=x)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
def initial_block(inp, nb_filter=13, nb_row=3, nb_col=3, strides=(2, 2)):
conv = Conv2D(nb_filter, (nb_row, nb_col), padding='same', strides=strides)(inp)
max_pool = MaxPooling2D()(inp)
merged = concatenate([conv, max_pool], axis=3)
return merged
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
# main branch
internal = output // internal_scale
encoder = inp
# 1x1
input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
encoder = Conv2D(internal, (input_stride, input_stride),
# padding='same',
strides=(input_stride, input_stride), use_bias=False)(encoder)
# Batch normalization + PReLU
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# conv
if not asymmetric and not dilated:
encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
elif asymmetric:
encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
elif dilated:
encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
else:
raise (Exception('You shouldn\'t be here'))
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# 1x1
encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = SpatialDropout2D(dropout_rate)(encoder)
other = inp
# other branch
if downsample:
other = MaxPooling2D()(other)
other = Permute((1, 3, 2))(other)
pad_feature_maps = output - inp.get_shape().as_list()[3]
tb_pad = (0, 0)
lr_pad = (0, pad_feature_maps)
other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
other = Permute((1, 3, 2))(other)
encoder = add([encoder, other])
encoder = PReLU(shared_axes=[1, 2])(encoder)
return encoder
def en_build(inp, dropout_rate=0.01):
enet = initial_block(inp)
enet = BatchNormalization(momentum=0.1)(enet) # enet_unpooling uses momentum of 0.1, keras default is 0.99
enet = PReLU(shared_axes=[1, 2])(enet)
enet = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate) # bottleneck 1.0
for _ in range(4):
enet = bottleneck(enet, 64, dropout_rate=dropout_rate) # bottleneck 1.i
enet = bottleneck(enet, 128, downsample=True) # bottleneck 2.0
# bottleneck 2.x and 3.x
for _ in range(2):
enet = bottleneck(enet, 128) # bottleneck 2.1
enet = bottleneck(enet, 128, dilated=2) # bottleneck 2.2
enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.3
enet = bottleneck(enet, 128, dilated=4) # bottleneck 2.4
enet = bottleneck(enet, 128) # bottleneck 2.5
enet = bottleneck(enet, 128, dilated=8) # bottleneck 2.6
enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.7
enet = bottleneck(enet, 128, dilated=16) # bottleneck 2.8
return enet
# decoder
def de_bottleneck(encoder, output, upsample=False, reverse_module=False):
internal = output // 4
x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
x = BatchNormalization(momentum=0.1)(x)
x = Activation('relu')(x)
if not upsample:
x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
else:
x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
x = BatchNormalization(momentum=0.1)(x)
x = Activation('relu')(x)
x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)
other = encoder
if encoder.get_shape()[-1] != output or upsample:
other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
other = BatchNormalization(momentum=0.1)(other)
if upsample and reverse_module is not False:
other = UpSampling2D(size=(2, 2))(other)
if upsample and reverse_module is False:
decoder = x
else:
x = BatchNormalization(momentum=0.1)(x)
decoder = add([x, other])
decoder = Activation('relu')(decoder)
return decoder
def de_build(encoder, nc):
enet = de_bottleneck(encoder, 64, upsample=True, reverse_module=True) # bottleneck 4.0
enet = de_bottleneck(enet, 64) # bottleneck 4.1
enet = de_bottleneck(enet, 64) # bottleneck 4.2
enet = de_bottleneck(enet, 16, upsample=True, reverse_module=True) # bottleneck 5.0
enet = de_bottleneck(enet, 16) # bottleneck 5.1
enet = Conv2DTranspose(filters=nc, kernel_size=(2, 2), strides=(2, 2), padding='same')(enet)
return enet
def ENet(n_classes, input_height=256, input_width=256):
assert input_height % 32 == 0
assert input_width % 32 == 0
img_input = Input(shape=(input_height, input_width, 3))
enet = en_build(img_input)
enet = de_build(enet, n_classes)
o_shape = Model(img_input, enet).output_shape
outputHeight = o_shape[1]
outputWidth = o_shape[2]
enet = (Reshape((outputHeight*outputWidth, n_classes)))(enet)
enet = Activation('softmax')(enet)
model = Model(img_input, enet)
model.outputWidth = outputWidth
model.outputHeight = outputHeight
return model
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def ConvBnLayer(x, oup, kernel_size, stride, padding='valid'):
y = Conv2D(filters=oup, kernel_size=kernel_size, strides=stride, padding=padding)(x)
y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y)
return y
def SELayer(x, reduction=4):
batch, _, __, channel = x.shape
y = GlobalAveragePooling2D()(x)
y = Dense(units=channel.value // reduction, activation='relu')(y)
y = Dense(units=channel.value, activation='sigmoid')(y)
y = Reshape([1, 1, channel.value])(y)
se_tensor = Multiply()([x, y])
return se_tensor
def DepthWiseConv(x, kernel_size=3, stride=1, depth_multiplier=1, padding='same', relu=False):
y = DepthwiseConv2D(kernel_size=kernel_size // 2, depth_multiplier=depth_multiplier,
strides=stride, padding=padding)(x)
y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y)
if relu:
y = Activation('relu')(y)
return y
def GhostModule(x, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True):
init_channels = math.ceil(oup / ratio)
new_channels = init_channels * (ratio - 1)
multiplier = new_channels // init_channels
primary_tensor = ConvBnLayer(x, init_channels, kernel_size=kernel_size, stride=stride, padding='same')
if relu:
primary_tensor = Activation('relu')(primary_tensor)
cheap_tensor = DepthWiseConv(primary_tensor, kernel_size=dw_size,
depth_multiplier=multiplier, padding='same', stride=1)
if relu:
cheap_tensor = Activation('relu')(cheap_tensor)
out = Concatenate()([primary_tensor, cheap_tensor])
return Lambda(lambda x: x[:, :, :, :oup])(out)
def GhostBottleneck(x, hidden_dim, oup, kernel_size, stride, use_se):
assert stride in [1, 2]
inp = x.shape[-1]
if stride == 1 and inp == oup:
shortcut = x
else:
shortcut = DepthWiseConv(x, kernel_size=3, stride=stride, relu=False)
shortcut = ConvBnLayer(shortcut, oup, 1, 1, padding='same')
x = GhostModule(x, hidden_dim, kernel_size=1, relu=True)
if stride == 2:
x = DepthWiseConv(x, kernel_size, stride, relu=False)
if use_se:
x = SELayer(x)
x = GhostModule(x, oup, kernel_size=1, relu=False)
return Add()([x, shortcut])
def GhostNet(x, num_classes=1000, width_mult=1.):
cfgs = [
# k, t, c, SE, s
[3, 16, 16, 0, 1],
[3, 48, 24, 0, 2],
[3, 72, 24, 0, 1],
[5, 72, 40, 1, 2],
[5, 120, 40, 1, 1],
[3, 240, 80, 0, 2],
[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 1, 1],
[3, 672, 112, 1, 1],
[5, 672, 160, 1, 2],
[5, 960, 160, 0, 1],
[5, 960, 160, 1, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 1, 1]
]
output_channel = _make_divisible(16 * width_mult, 4)
x = ConvBnLayer(x, output_channel, 3, 2, padding='same')
for k, exp_size, c, use_se, s in cfgs:
output_channel = _make_divisible(c * width_mult, 4)
hidden_channel = _make_divisible(exp_size * width_mult, 4)
x = GhostBottleneck(x, hidden_channel, output_channel, k, s, use_se)
output_channel = _make_divisible(exp_size * width_mult, 4)
x = ConvBnLayer(x, output_channel, kernel_size=1, stride=1, padding='same')
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
output_channel = 1280
x = Dense(output_channel)(x)
x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x)
x = Activation('relu')(x)
x = Dropout(0.2)(x)
x = Dense(num_classes)(x)
return x
def conv(x, outsize, kernel_size, strides_=1, padding_='same', activation=None):
return Conv2D(outsize, kernel_size, strides=strides_, padding=padding_, kernel_initializer=RandomNormal(
stddev=0.001), use_bias=False, activation=activation)(x)
def Bottleneck(x, size, downsampe=False):
residual = x
out = conv(x, size, 1, padding_='valid')
out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out)
out = Activation('relu')(out)
out = conv(out, size, 3)
out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out)
out = Activation('relu')(out)
out = conv(out, size * 4, 1, padding_='valid')
out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out)
if downsampe:
residual = conv(x, size * 4, 1, padding_='valid')
residual = BatchNormalization(epsilon=1e-5, momentum=0.1)(residual)
out = Add()([out, residual])
out = Activation('relu')(out)
return out
def BasicBlock(x, size, downsampe=False):
residual = x
out = conv(x, size, 3)
out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out)
out = Activation('relu')(out)
out = conv(out, size, 3)
out = BatchNormalization(epsilon=1e-5, momentum=0.1)(out)
if downsampe:
residual = conv(x, size, 1, padding_='valid')
residual = BatchNormalization(epsilon=1e-5, momentum=0.1)(residual)
out = Add()([out, residual])
out = Activation('relu')(out)
return out
def layer1(x):
x = Bottleneck(x, 64, downsampe=True)
x = Bottleneck(x, 64)
x = Bottleneck(x, 64)
x = Bottleneck(x, 64)
return x
def transition_layer(x, in_channels, out_channels):
num_in = len(in_channels)
num_out = len(out_channels)
out = []
for i in range(num_out):
if i < num_in:
if in_channels[i] != out_channels[i]:
residual = conv(x[i], out_channels[i], 3)
residual = BatchNormalization(
epsilon=1e-5, momentum=0.1)(residual)
residual = Activation('relu')(residual)
out.append(residual)
else:
out.append(x[i])
else:
residual = conv(x[-1], out_channels[i], 3, strides_=2)
residual = BatchNormalization(epsilon=1e-5, momentum=0.1)(residual)
residual = Activation('relu')(residual)
out.append(residual)
return out
def branches(x, block_num, channels):
out = []
for i in range(len(channels)):
residual = x[i]
for j in range(block_num):
residual = BasicBlock(residual, channels[i])
out.append(residual)
return out
def fuse_layers(x, channels, multi_scale_output=True):
out = []
for i in range(len(channels) if multi_scale_output else 1):
residual = x[i]
for j in range(len(channels)):
if j > i:
y = conv(x[j], channels[i], 1, padding_='valid')
y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y)
y = UpSampling2D(size=2 ** (j - i))(y)
residual = Add()([residual, y])
elif j < i:
y = x[j]
for k in range(i - j):
if k == i - j - 1:
y = conv(y, channels[i], 3, strides_=2)
y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y)
else:
y = conv(y, channels[j], 3, strides_=2)
y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y)
y = Activation('relu')(y)
residual = Add()([residual, y])
residual = Activation('relu')(residual)
out.append(residual)
return out
def HighResolutionModule(x, channels, multi_scale_output=True):
residual = branches(x, 4, channels)
out = fuse_layers(residual, channels,
multi_scale_output=multi_scale_output)
return out
def stage(x, num_modules, channels, multi_scale_output=True):
out = x
for i in range(num_modules):
if i == num_modules - 1 and multi_scale_output == False:
out = HighResolutionModule(out, channels, multi_scale_output=False)
else:
out = HighResolutionModule(out, channels)
return out
def HRNet(nClasses, input_height=224, input_width=224):
channels_2 = [32, 64]
channels_3 = [32, 64, 128]
channels_4 = [32, 64, 128, 256]
num_modules_2 = 1
num_modules_3 = 4
num_modules_4 = 3
inputs = Input(shape=(input_height, input_width, 3))
x = conv(inputs, 64, 3, strides_=2)
x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x)
x = conv(x, 64, 3, strides_=2)
x = BatchNormalization(epsilon=1e-5, momentum=0.1)(x)
x = Activation('relu')(x)
la1 = layer1(x)
tr1 = transition_layer([la1], [256], channels_2)
st2 = stage(tr1, num_modules_2, channels_2)
tr2 = transition_layer(st2, channels_2, channels_3)
st3 = stage(tr2, num_modules_3, channels_3)
tr3 = transition_layer(st3, channels_3, channels_4)
st4 = stage(tr3, num_modules_4, channels_4, multi_scale_output=False)
up1 = UpSampling2D()(st4[0])
up1 = conv(up1, 32, 3)
up1 = BatchNormalization(epsilon=1e-5, momentum=0.1)(up1)
up1 = Activation('relu')(up1)
up2 = UpSampling2D()(up1)
up2 = conv(up2, 32, 3)
up2 = BatchNormalization(epsilon=1e-5, momentum=0.1)(up2)
up2 = Activation('relu')(up2)
final = conv(up2, nClasses, 1, padding_='valid')
outputHeight = Model(inputs, final).output_shape[1]
outputWidth = Model(inputs, final).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(final)
out = Activation('softmax')(out)
model = Model(input=inputs, output=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
def ICNet(nClasses, input_height=224, input_width=224):
inputs = Input(shape=(input_height, input_width, 3))
# (1/2)
y = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1]) // 2, int(x.shape[2]) // 2)),
name='data_sub2')(inputs)
y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_1_3x3_s2')(y)
y = BatchNormalization(name='conv1_1_3x3_s2_bn')(y)
y = Conv2D(32, 3, padding='same', activation='relu', name='conv1_2_3x3')(y)
y = BatchNormalization(name='conv1_2_3x3_s2_bn')(y)
y = Conv2D(64, 3, padding='same', activation='relu', name='conv1_3_3x3')(y)
y = BatchNormalization(name='conv1_3_3x3_bn')(y)
y_ = MaxPooling2D(pool_size=3, strides=2, name='pool1_3x3_s2')(y)
y = Conv2D(128, 1, name='conv2_1_1x1_proj')(y_)
y = BatchNormalization(name='conv2_1_1x1_proj_bn')(y)
y_ = Conv2D(32, 1, activation='relu', name='conv2_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv2_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(name='padding1')(y_)
y_ = Conv2D(32, 3, activation='relu', name='conv2_1_3x3')(y_)
y_ = BatchNormalization(name='conv2_1_3x3_bn')(y_)
y_ = Conv2D(128, 1, name='conv2_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv2_1_1x1_increase_bn')(y_)
y = Add(name='conv2_1')([y, y_])
y_ = Activation('relu', name='conv2_1/relu')(y)
y = Conv2D(32, 1, activation='relu', name='conv2_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv2_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding2')(y)
y = Conv2D(32, 3, activation='relu', name='conv2_2_3x3')(y)
y = BatchNormalization(name='conv2_2_3x3_bn')(y)
y = Conv2D(128, 1, name='conv2_2_1x1_increase')(y)
y = BatchNormalization(name='conv2_2_1x1_increase_bn')(y)
y = Add(name='conv2_2')([y, y_])
y_ = Activation('relu', name='conv2_2/relu')(y)
y = Conv2D(32, 1, activation='relu', name='conv2_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv2_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding3')(y)
y = Conv2D(32, 3, activation='relu', name='conv2_3_3x3')(y)
y = BatchNormalization(name='conv2_3_3x3_bn')(y)
y = Conv2D(128, 1, name='conv2_3_1x1_increase')(y)
y = BatchNormalization(name='conv2_3_1x1_increase_bn')(y)
y = Add(name='conv2_3')([y, y_])
y_ = Activation('relu', name='conv2_3/relu')(y)
y = Conv2D(256, 1, strides=2, name='conv3_1_1x1_proj')(y_)
y = BatchNormalization(name='conv3_1_1x1_proj_bn')(y)
y_ = Conv2D(64, 1, strides=2, activation='relu', name='conv3_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv3_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(name='padding4')(y_)
y_ = Conv2D(64, 3, activation='relu', name='conv3_1_3x3')(y_)
y_ = BatchNormalization(name='conv3_1_3x3_bn')(y_)
y_ = Conv2D(256, 1, name='conv3_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv3_1_1x1_increase_bn')(y_)
y = Add(name='conv3_1')([y, y_])
z = Activation('relu', name='conv3_1/relu')(y)
# (1/4)
y_ = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1]) // 2, int(x.shape[2]) // 2)),
name='conv3_1_sub4')(z)
y = Conv2D(64, 1, activation='relu', name='conv3_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding5')(y)
y = Conv2D(64, 3, activation='relu', name='conv3_2_3x3')(y)
y = BatchNormalization(name='conv3_2_3x3_bn')(y)
y = Conv2D(256, 1, name='conv3_2_1x1_increase')(y)
y = BatchNormalization(name='conv3_2_1x1_increase_bn')(y)
y = Add(name='conv3_2')([y, y_])
y_ = Activation('relu', name='conv3_2/relu')(y)
y = Conv2D(64, 1, activation='relu', name='conv3_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding6')(y)
y = Conv2D(64, 3, activation='relu', name='conv3_3_3x3')(y)
y = BatchNormalization(name='conv3_3_3x3_bn')(y)
y = Conv2D(256, 1, name='conv3_3_1x1_increase')(y)
y = BatchNormalization(name='conv3_3_1x1_increase_bn')(y)
y = Add(name='conv3_3')([y, y_])
y_ = Activation('relu', name='conv3_3/relu')(y)
y = Conv2D(64, 1, activation='relu', name='conv3_4_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_4_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding7')(y)
y = Conv2D(64, 3, activation='relu', name='conv3_4_3x3')(y)
y = BatchNormalization(name='conv3_4_3x3_bn')(y)
y = Conv2D(256, 1, name='conv3_4_1x1_increase')(y)
y = BatchNormalization(name='conv3_4_1x1_increase_bn')(y)
y = Add(name='conv3_4')([y, y_])
y_ = Activation('relu', name='conv3_4/relu')(y)
y = Conv2D(512, 1, name='conv4_1_1x1_proj')(y_)
y = BatchNormalization(name='conv4_1_1x1_proj_bn')(y)
y_ = Conv2D(128, 1, activation='relu', name='conv4_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv4_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(padding=2, name='padding8')(y_)
y_ = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_1_3x3')(y_)
y_ = BatchNormalization(name='conv4_1_3x3_bn')(y_)
y_ = Conv2D(512, 1, name='conv4_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv4_1_1x1_increase_bn')(y_)
y = Add(name='conv4_1')([y, y_])
y_ = Activation('relu', name='conv4_1/relu')(y)
y = Conv2D(128, 1, activation='relu', name='conv4_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding9')(y)
y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_2_3x3')(y)
y = BatchNormalization(name='conv4_2_3x3_bn')(y)
y = Conv2D(512, 1, name='conv4_2_1x1_increase')(y)
y = BatchNormalization(name='conv4_2_1x1_increase_bn')(y)
y = Add(name='conv4_2')([y, y_])
y_ = Activation('relu', name='conv4_2/relu')(y)
y = Conv2D(128, 1, activation='relu', name='conv4_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding10')(y)
y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_3_3x3')(y)
y = BatchNormalization(name='conv4_3_3x3_bn')(y)
y = Conv2D(512, 1, name='conv4_3_1x1_increase')(y)
y = BatchNormalization(name='conv4_3_1x1_increase_bn')(y)
y = Add(name='conv4_3')([y, y_])
y_ = Activation('relu', name='conv4_3/relu')(y)
y = Conv2D(128, 1, activation='relu', name='conv4_4_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_4_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding11')(y)
y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_4_3x3')(y)
y = BatchNormalization(name='conv4_4_3x3_bn')(y)
y = Conv2D(512, 1, name='conv4_4_1x1_increase')(y)
y = BatchNormalization(name='conv4_4_1x1_increase_bn')(y)
y = Add(name='conv4_4')([y, y_])
y_ = Activation('relu', name='conv4_4/relu')(y)
y = Conv2D(128, 1, activation='relu', name='conv4_5_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_5_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding12')(y)
y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_5_3x3')(y)
y = BatchNormalization(name='conv4_5_3x3_bn')(y)
y = Conv2D(512, 1, name='conv4_5_1x1_increase')(y)
y = BatchNormalization(name='conv4_5_1x1_increase_bn')(y)
y = Add(name='conv4_5')([y, y_])
y_ = Activation('relu', name='conv4_5/relu')(y)
y = Conv2D(128, 1, activation='relu', name='conv4_6_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_6_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding13')(y)
y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_6_3x3')(y)
y = BatchNormalization(name='conv4_6_3x3_bn')(y)
y = Conv2D(512, 1, name='conv4_6_1x1_increase')(y)
y = BatchNormalization(name='conv4_6_1x1_increase_bn')(y)
y = Add(name='conv4_6')([y, y_])
y = Activation('relu', name='conv4_6/relu')(y)
y_ = Conv2D(1024, 1, name='conv5_1_1x1_proj')(y)
y_ = BatchNormalization(name='conv5_1_1x1_proj_bn')(y_)
y = Conv2D(256, 1, activation='relu', name='conv5_1_1x1_reduce')(y)
y = BatchNormalization(name='conv5_1_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding14')(y)
y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_1_3x3')(y)
y = BatchNormalization(name='conv5_1_3x3_bn')(y)
y = Conv2D(1024, 1, name='conv5_1_1x1_increase')(y)
y = BatchNormalization(name='conv5_1_1x1_increase_bn')(y)
y = Add(name='conv5_1')([y, y_])
y_ = Activation('relu', name='conv5_1/relu')(y)
y = Conv2D(256, 1, activation='relu', name='conv5_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv5_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding15')(y)
y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_2_3x3')(y)
y = BatchNormalization(name='conv5_2_3x3_bn')(y)
y = Conv2D(1024, 1, name='conv5_2_1x1_increase')(y)
y = BatchNormalization(name='conv5_2_1x1_increase_bn')(y)
y = Add(name='conv5_2')([y, y_])
y_ = Activation('relu', name='conv5_2/relu')(y)
y = Conv2D(256, 1, activation='relu', name='conv5_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv5_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding16')(y)
y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_3_3x3')(y)
y = BatchNormalization(name='conv5_3_3x3_bn')(y)
y = Conv2D(1024, 1, name='conv5_3_1x1_increase')(y)
y = BatchNormalization(name='conv5_3_1x1_increase_bn')(y)
y = Add(name='conv5_3')([y, y_])
y = Activation('relu', name='conv5_3/relu')(y)
h, w = y.shape[1:3].as_list()
pool1 = AveragePooling2D(pool_size=(h, w), strides=(h, w), name='conv5_3_pool1')(y)
pool1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h, w)), name='conv5_3_pool1_interp')(pool1)
pool2 = AveragePooling2D(pool_size=(h / 2, w / 2), strides=(h // 2, w // 2), name='conv5_3_pool2')(y)
pool2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h, w)), name='conv5_3_pool2_interp')(pool2)
pool3 = AveragePooling2D(pool_size=(h / 3, w / 3), strides=(h // 3, w // 3), name='conv5_3_pool3')(y)
pool3 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h, w)), name='conv5_3_pool3_interp')(pool3)
pool6 = AveragePooling2D(pool_size=(h / 4, w / 4), strides=(h // 4, w // 4), name='conv5_3_pool6')(y)
pool6 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h, w)), name='conv5_3_pool6_interp')(pool6)
y = Add(name='conv5_3_sum')([y, pool1, pool2, pool3, pool6])
y = Conv2D(256, 1, activation='relu', name='conv5_4_k1')(y)
y = BatchNormalization(name='conv5_4_k1_bn')(y)
aux_1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1]) * 2, int(x.shape[2]) * 2)),
name='conv5_4_interp')(y)
y = ZeroPadding2D(padding=2, name='padding17')(aux_1)
y = Conv2D(128, 3, dilation_rate=2, name='conv_sub4')(y)
y = BatchNormalization(name='conv_sub4_bn')(y)
y_ = Conv2D(128, 1, name='conv3_1_sub2_proj')(z)
y_ = BatchNormalization(name='conv3_1_sub2_proj_bn')(y_)
y = Add(name='sub24_sum')([y, y_])
y = Activation('relu', name='sub24_sum/relu')(y)
aux_2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1]) * 2, int(x.shape[2]) * 2)),
name='sub24_sum_interp')(y)
y = ZeroPadding2D(padding=2, name='padding18')(aux_2)
y_ = Conv2D(128, 3, dilation_rate=2, name='conv_sub2')(y)
y_ = BatchNormalization(name='conv_sub2_bn')(y_)
# (1)
y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_sub1')(inputs)
y = BatchNormalization(name='conv1_sub1_bn')(y)
y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv2_sub1')(y)
y = BatchNormalization(name='conv2_sub1_bn')(y)
y = Conv2D(64, 3, strides=2, padding='same', activation='relu', name='conv3_sub1')(y)
y = BatchNormalization(name='conv3_sub1_bn')(y)
y = Conv2D(128, 1, name='conv3_sub1_proj')(y)
y = BatchNormalization(name='conv3_sub1_proj_bn')(y)
y = Add(name='sub12_sum')([y, y_])
y = Activation('relu', name='sub12_sum/relu')(y)
y = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1]) * 2, int(x.shape[2]) * 2)),
name='sub12_sum_interp')(y)
o = Conv2D(nClasses, 1, name='conv6_cls')(y)
o_shape = Model(inputs, o).output_shape
outputHeight = o_shape[1]
outputWidth = o_shape[2]
o = (Reshape((outputHeight * outputWidth, nClasses)))(o)
o = (Activation('softmax'))(o)
model = Model(inputs, o)
model.outputWidth = outputWidth
model.outputHeight = outputHeight
return model
def relu6(x):
return K.relu(x, max_value=6)
# Width Multiplier: Thinner Models
def conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
filters = int(filters * alpha)
x = ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs)
x = Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x)
x = BatchNormalization(axis=3, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x)
def depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1):
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = ZeroPadding2D((1, 1), name='conv_pad_%d' % block_id)(inputs)
x = DepthwiseConv2D((3, 3), padding='valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name='conv_dw_%d' % block_id)(x)
x = BatchNormalization(axis=3, name='conv_dw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%d' % block_id)(x)
x = BatchNormalization(axis=3, name='conv_pw_%d_bn' % block_id)(x)
return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
def MobileNetFCN8 (nClasses, optimizer=None, input_width=512, input_height=512, pretrained='imagenet'):
input_size = (input_height, input_width, 3)
img_input = Input(input_size)
alpha = 1.0
depth_multiplier = 1
x = conv_block(img_input, 16, alpha, strides=(2, 2))
x = depthwise_conv_block(x, 16, alpha, depth_multiplier, block_id=1)
f1 = x
x = depthwise_conv_block(x, 32, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = depthwise_conv_block(x, 32, alpha, depth_multiplier, block_id=3)
f2 = x
x = depthwise_conv_block(x, 64, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=5)
f3 = x
x = depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=7)
x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=8)
x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=9)
x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=10)
x = depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=11)
f4 = x
x = depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=13)
f5 = x
o = f5
o = (Conv2D(256, (7, 7), activation='relu', padding='same'))(o)
o = BatchNormalization()(o)
o = (Conv2D(nClasses, (1, 1)))(o)
# W = (N - 1) * S - 2P + F = 6 * 2 - 0 + 2 = 14
o = Conv2DTranspose(nClasses, kernel_size=(2, 2), strides=(2, 2), padding="valid")(o)
# 14 x 14
o2 = f4
o2 = (Conv2D(nClasses, (1, 1)))(o2)
# (14 x 14) (14 x 14)
o = Add()([o, o2])
# W = (N - 1) * S - 2P + F = 13 * 2 - 0 + 2 = 28
o = Conv2DTranspose(nClasses, kernel_size=(2, 2), strides=(2, 2), padding="valid")(o)
o2 = f3
o2 = (Conv2D(nClasses, (1, 1)))(o2)
# (28 x 28) (28 x 28)
o = Add()([o2, o])
# 224 x 224
# W = (N - 1) * S - 2P + F = 27 * 8 + 8 = 224
o = Conv2DTranspose(nClasses , kernel_size=(8,8), strides=(8,8), padding="valid")(o)
o_shape = Model(img_input, o).output_shape
outputHeight = o_shape[1]
outputWidth = o_shape[2]
o = (Reshape((outputHeight*outputWidth, nClasses)))(o)
o = (Activation('softmax'))(o)
model = Model(img_input, o)
model.outputWidth = outputWidth
model.outputHeight = outputHeight
return model
def conv_block_nested(x, mid_ch, out_ch, kernel_size=3, padding='same'):
x = Conv2D(mid_ch, kernel_size=3, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(out_ch, kernel_size=3, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def NestedUNet(nClasses, input_height=224, input_width=224):
inputs = Input(shape=(input_height, input_width, 3))
t = 2
n1 = 32
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
x0_0 = conv_block_nested(inputs, filters[0], filters[0])
x1_0 = conv_block_nested(MaxPooling2D(strides=2)(x0_0), filters[1], filters[1])
x0_1 = conv_block_nested(Concatenate()([x0_0, UpSampling2D()(x1_0)]), filters[0], filters[0])
x2_0 = conv_block_nested(MaxPooling2D(strides=2)(x1_0), filters[2], filters[2])
x1_1 = conv_block_nested(Concatenate()([x1_0, UpSampling2D()(x2_0)]), filters[1], filters[1])
x0_2 = conv_block_nested(Concatenate()([x0_0, x0_1, UpSampling2D()(x1_1)]), filters[0], filters[0])
x3_0 = conv_block_nested(MaxPooling2D(strides=2)(x2_0), filters[3], filters[3])
x2_1 = conv_block_nested(Concatenate()([x2_0, UpSampling2D()(x3_0)]), filters[2], filters[2])
x1_2 = conv_block_nested(Concatenate()([x1_0, x1_1, UpSampling2D()(x2_1)]), filters[1], filters[1])
x0_3 = conv_block_nested(Concatenate()([x0_0, x0_1, x0_2, UpSampling2D()(x1_2)]), filters[0], filters[0])
x4_0 = conv_block_nested(MaxPooling2D(strides=2)(x3_0), filters[4], filters[4])
x3_1 = conv_block_nested(Concatenate()([x3_0, UpSampling2D()(x4_0)]), filters[3], filters[3])
x2_2 = conv_block_nested(Concatenate()([x2_0, x2_1, UpSampling2D()(x3_1)]), filters[2], filters[2])
x1_3 = conv_block_nested(Concatenate()([x1_0, x1_1, x1_2, UpSampling2D()(x2_2)]), filters[1], filters[1])
x0_4 = conv_block_nested(Concatenate()([x0_0, x0_1, x0_2, x0_3, UpSampling2D()(x1_3)]), filters[0], filters[0])
o = Conv2D(nClasses, (3, 3), padding='same')(x0_4)
outputHeight = Model(inputs, o).output_shape[1]
outputWidth = Model(inputs, o).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(o)
out = Activation('softmax')(out)
model = Model(input=inputs, output=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
def pool_block(inp, pool_factor):
h = K.int_shape(inp)[1]
w = K.int_shape(inp)[2]
pool_size = strides = [int(np.round( float(h) / pool_factor)), int(np.round( float(w)/ pool_factor))]
x = AveragePooling2D(pool_size, strides=strides, padding='same')(inp)
x = Conv2D(256, (1, 1), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*strides[0], int(x.shape[2])*strides[1])))(x)
x = Conv2D(256, (1, 1), padding='same', activation='relu')(x)
return x
def conv_block(input, filters):
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def up_conv(input, filters):
out = UpSampling2D()(input)
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def Attention_block(input1, input2, filters):
g1 = Conv2D(filters, kernel_size=1, strides=1, padding='same')(input1)
g1 = BatchNormalization()(g1)
x1 = Conv2D(filters, kernel_size=1, strides=1, padding='same')(input2)
x1 = BatchNormalization()(x1)
psi = Activation('relu')(add([g1, x1]))
psi = Conv2D(filters, kernel_size=1, strides=1, padding='same')(psi)
psi = BatchNormalization()(psi)
psi = Activation('sigmoid')(psi)
out = multiply([input2, psi])
return out
def Recurrent_block(input, channel, t=2):
for i in range(t):
if i == 0:
x = Conv2D(channel, kernel_size=(3, 3), strides=1, padding='same')(input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
out = Conv2D(channel, kernel_size=(3, 3), strides=1, padding='same')(add([x, x]))
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def RRCNN_block(input, channel, t=2):
x1 = Conv2D(channel, kernel_size=(1, 1), strides=1, padding='same')(input)
x2 = Recurrent_block(x1, channel, t=t)
x2 = Recurrent_block(x2, channel, t=t)
out = add([x1, x2])
return out
def R2AttUNet(nClasses, input_height=224, input_width=224):
# """
#Residual Recuurent Block with attention Unet
#Implementation : https://github.com/LeeJunHyun/Image_Segmentation
#"""
inputs = Input(shape=(input_height, input_width, 3))
t = 2
n1 = 32
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
e1 = RRCNN_block(inputs, filters[0], t=t)
e2 = MaxPooling2D(strides=2)(e1)
e2 = RRCNN_block(e2, filters[1], t=t)
e3 = MaxPooling2D(strides=2)(e2)
e3 = RRCNN_block(e3, filters[2], t=t)
e4 = MaxPooling2D(strides=2)(e3)
e4 = RRCNN_block(e4, filters[3], t=t)
e5 = MaxPooling2D(strides=2)(e4)
e5 = RRCNN_block(e5, filters[4], t=t)
d5 = up_conv(e5, filters[3])
x4 = Attention_block(d5, e4, filters[3])
d5 = Concatenate()([x4, d5])
d5 = conv_block(d5, filters[3])
d4 = up_conv(d5, filters[2])
x3 = Attention_block(d4, e3, filters[2])
d4 = Concatenate()([x3, d4])
d4 = conv_block(d4, filters[2])
d3 = up_conv(d4, filters[1])
x2 = Attention_block(d3, e2, filters[1])
d3 = Concatenate()([x2, d3])
d3 = conv_block(d3, filters[1])
d2 = up_conv(d3, filters[0])
x1 = Attention_block(d2, e1, filters[0])
d2 = Concatenate()([x1, d2])
d2 = conv_block(d2, filters[0])
o = Conv2D(nClasses, (3, 3), padding='same')(d2)
outputHeight = Model(inputs, o).output_shape[1]
outputWidth = Model(inputs, o).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(o)
out = Activation('softmax')(out)
model = Model(input=inputs, output=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
def SEModule(input, ratio, out_dim):
# bs, c, h, w
x = GlobalAveragePooling2D()(input)
excitation = Dense(units=out_dim // ratio)(x)
excitation = Activation('relu')(excitation)
excitation = Dense(units=out_dim)(excitation)
excitation = Activation('sigmoid')(excitation)
excitation = Reshape((1, 1, out_dim))(excitation)
scale = multiply([input, excitation])
return scale
def SEUnet(nClasses, input_height=224, input_width=224):
inputs = Input(shape=(input_height, input_width, 3))
conv1 = Conv2D(16,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(inputs)
conv1 = BatchNormalization()(conv1)
conv1 = Conv2D(16,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
# se
conv1 = SEModule(conv1, 4, 16)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(pool1)
conv2 = BatchNormalization()(conv2)
conv2 = Conv2D(32,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv2)
conv2 = BatchNormalization()(conv2)
# se
conv2 = SEModule(conv2, 8, 32)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(64,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(pool2)
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(64,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv3)
conv3 = BatchNormalization()(conv3)
# se
conv3 = SEModule(conv3, 8, 64)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(128,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(pool3)
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(128,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv4)
conv4 = BatchNormalization()(conv4)
# se
conv4 = SEModule(conv4, 16, 128)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(256,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(pool4)
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(256,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization()(conv5)
# se
conv5 = SEModule(conv5, 16, 256)
up6 = Conv2D(128,
2,
activation='relu',
padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2,
2))(conv5))
up6 = BatchNormalization()(up6)
merge6 = concatenate([conv4, up6], axis=3)
conv6 = Conv2D(128,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(merge6)
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(128,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv6)
conv6 = BatchNormalization()(conv6)
# se
conv6 = SEModule(conv6, 16, 128)
up7 = Conv2D(64,
2,
activation='relu',
padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2,
2))(conv6))
up7 = BatchNormalization()(up7)
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(64,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(merge7)
conv7 = BatchNormalization()(conv7)
conv7 = Conv2D(64,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv7)
conv7 = BatchNormalization()(conv7)
# se
conv7 = SEModule(conv7, 8, 64)
up8 = Conv2D(32,
2,
activation='relu',
padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2,
2))(conv7))
up8 = BatchNormalization()(up8)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(32,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(merge8)
conv8 = BatchNormalization()(conv8)
conv8 = Conv2D(32,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv8)
conv8 = BatchNormalization()(conv8)
# se
conv8 = SEModule(conv8, 4, 32)
up9 = Conv2D(16,
2,
activation='relu',
padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2,
2))(conv8))
up9 = BatchNormalization()(up9)
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(16,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(merge9)
conv9 = BatchNormalization()(conv9)
conv9 = Conv2D(16,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv9)
conv9 = BatchNormalization()(conv9)
# se
conv9 = SEModule(conv9, 2, 16)
conv10 = Conv2D(nClasses, (3, 3), padding='same')(conv9)
conv10 = BatchNormalization()(conv10)
outputHeight = Model(inputs, conv10).output_shape[1]
outputWidth = Model(inputs, conv10).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(conv10)
out = Activation('softmax')(out)
model = Model(input=inputs, output=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
def convolution_block(x, filters, size, strides=(1, 1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
x = BatchNormalization()(x)
if activation == True:
x = LeakyReLU(alpha=0.1)(x)
return x
def residual_block(blockInput, num_filters=16):
x = LeakyReLU(alpha=0.1)(blockInput)
x = BatchNormalization()(x)
blockInput = BatchNormalization()(blockInput)
x = convolution_block(x, num_filters, (3, 3))
x = convolution_block(x, num_filters, (3, 3), activation=False)
x = Add()([x, blockInput])
return x
def Unet_Xception_ResNetBlock(nClasses, input_height=224, input_width=224):
from keras.applications.xception import Xception
backbone = Xception(input_shape=(input_height, input_width, 3), weights=None, include_top=False)
inputs = backbone.input
conv4 = backbone.layers[121].output
conv4 = LeakyReLU(alpha=0.1)(conv4)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(0.1)(pool4)
# Middle
convm = Conv2D(16 * 32, (3, 3), activation=None, padding="same")(pool4)
convm = residual_block(convm, 16 * 32)
convm = residual_block(convm, 16 * 32)
convm = LeakyReLU(alpha=0.1)(convm)
# 8 -> 16
deconv4 = Conv2DTranspose(16 * 16, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(0.1)(uconv4)
uconv4 = Conv2D(16 * 16, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4, 16 * 16)
uconv4 = residual_block(uconv4, 16 * 16)
uconv4 = LeakyReLU(alpha=0.1)(uconv4)
# 16 -> 32
deconv3 = Conv2DTranspose(16 * 8, (3, 3), strides=(2, 2), padding="same")(uconv4)
conv3 = backbone.layers[31].output
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(0.1)(uconv3)
uconv3 = Conv2D(16 * 8, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3, 16 * 8)
uconv3 = residual_block(uconv3, 16 * 8)
uconv3 = LeakyReLU(alpha=0.1)(uconv3)
# 32 -> 64
deconv2 = Conv2DTranspose(16 * 4, (3, 3), strides=(2, 2), padding="same")(uconv3)
conv2 = backbone.layers[21].output
conv2 = ZeroPadding2D(((1, 0), (1, 0)))(conv2)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(0.1)(uconv2)
uconv2 = Conv2D(16 * 4, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2, 16 * 4)
uconv2 = residual_block(uconv2, 16 * 4)
uconv2 = LeakyReLU(alpha=0.1)(uconv2)
# 64 -> 128
deconv1 = Conv2DTranspose(16 * 2, (3, 3), strides=(2, 2), padding="same")(uconv2)
conv1 = backbone.layers[11].output
conv1 = ZeroPadding2D(((3, 0), (3, 0)))(conv1)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(0.1)(uconv1)
uconv1 = Conv2D(16 * 2, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1, 16 * 2)
uconv1 = residual_block(uconv1, 16 * 2)
uconv1 = LeakyReLU(alpha=0.1)(uconv1)
# 128 -> 256
uconv0 = Conv2DTranspose(16 * 1, (3, 3), strides=(2, 2), padding="same")(uconv1)
uconv0 = Dropout(0.1)(uconv0)
uconv0 = Conv2D(16 * 1, (3, 3), activation=None, padding="same")(uconv0)
uconv0 = residual_block(uconv0, 16 * 1)
uconv0 = residual_block(uconv0, 16 * 1)
uconv0 = LeakyReLU(alpha=0.1)(uconv0)
uconv0 = Dropout(0.1 / 2)(uconv0)
out = Conv2D(nClasses, (3, 3), padding='same')(uconv0)
out = BatchNormalization()(out)
outputHeight = Model(inputs, out).output_shape[1]
outputWidth = Model(inputs, out).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(out)
out = Activation('softmax')(out)
model = Model(input=inputs, output=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
def conv_block(input, filters):
out = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='same')(input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def up_conv(input, filters):
out = UpSampling2D()(input)
out = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def cse_block(prevlayer, prefix):
mean = Lambda(lambda xin: K.mean(xin, axis=[1, 2]))(prevlayer)
# mean = prevlayer
# K.int_shape() Returns the shape of tensor or variable as a tuple of int or None entries
lin1 = Dense(K.int_shape(prevlayer)[
3] // 2, name=prefix + 'cse_lin1', activation='relu')(mean)
lin2 = Dense(K.int_shape(prevlayer)[
3], name=prefix + 'cse_lin2', activation='sigmoid')(lin1)
x = Multiply()([prevlayer, lin2])
return x
# channel squeeze and spatial excitation
def sse_block(prevlayer, prefix):
# Bug? Should be 1 here?
conv = Conv2D(K.int_shape(prevlayer)[3], (1, 1), padding="same", kernel_initializer="he_normal",
activation='sigmoid', strides=(1, 1),
name=prefix + "_conv")(prevlayer)
conv = Multiply(name=prefix + "_mul")([prevlayer, conv])
return conv
# concurrent spatial and channel squeeze and channel excitation
def csse_block(x, prefix):
cse = cse_block(x, prefix)
sse = sse_block(x, prefix)
x = Add(name=prefix + "_csse_mul")([cse, sse])
return x
def scSEUnet(nClasses, input_height=224, input_width=224):
inputs = Input(shape=(input_height, input_width, 3))
conv1 = Conv2D(16,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(inputs)
conv1 = BatchNormalization()(conv1)
conv1 = Conv2D(16,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(pool1)
conv2 = BatchNormalization()(conv2)
conv2 = Conv2D(32,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv2)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(64,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(pool2)
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(64,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv3)
conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(128,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(pool3)
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(128,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv4)
conv4 = BatchNormalization()(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(256,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(pool4)
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(256,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization()(conv5)
up6 = Conv2D(128,
2,
activation='relu',
padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2,
2))(conv5))
up6 = BatchNormalization()(up6)
merge6 = concatenate([conv4, up6], axis=3)
conv6 = Conv2D(128,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(merge6)
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(128,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv6)
conv6 = BatchNormalization()(conv6)
up7 = Conv2D(64,
2,
activation='relu',
padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2,
2))(conv6))
up7 = BatchNormalization()(up7)
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(64,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(merge7)
conv7 = BatchNormalization()(conv7)
conv7 = Conv2D(64,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv7)
conv7 = BatchNormalization()(conv7)
conv7 = csse_block(conv7, prefix="conv7")
up8 = Conv2D(32,
2,
activation='relu',
padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2,
2))(conv7))
up8 = BatchNormalization()(up8)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(32,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(merge8)
conv8 = BatchNormalization()(conv8)
conv8 = Conv2D(32,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv8)
conv8 = BatchNormalization()(conv8)
conv8 = csse_block(conv8, prefix="conv8")
up9 = Conv2D(16,
2,
activation='relu',
padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2,
2))(conv8))
up9 = BatchNormalization()(up9)
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(16,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(merge9)
conv9 = BatchNormalization()(conv9)
conv9 = Conv2D(16,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(conv9)
conv9 = BatchNormalization()(conv9)
conv9 = csse_block(conv9, prefix="conv9")
conv10 = Conv2D(nClasses, (3, 3), padding='same')(conv9)
conv10 = BatchNormalization()(conv10)
outputHeight = Model(inputs, conv10).output_shape[1]
outputWidth = Model(inputs, conv10).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(conv10)
out = Activation('softmax')(out)
model = Model(input=inputs, output=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
# Model from https://github.com/BBuf/Keras-Semantic-Segmentation
class TestSemanticSegmentation(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_ResACNet(self):
K.clear_session()
input_shape = (224, 224, 3)
inputs = Input(shape=input_shape, name="inputs")
y = ResACNet(inputs, depth=50)
keras_model = keras.models.Model(inputs=inputs, outputs=y)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_AttUNet(self):
K.clear_session()
keras_model = AttUNet(80)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DeepLabV3Plus(self):
K.clear_session()
keras_model = DeeplabV3_plus(input_height=224, input_width=224)
data = np.random.rand(1, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_ENet(self):
K.clear_session()
keras_model = ENet(80)
data = np.random.rand(1, 256, 256, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_GhostNet(self):
K.clear_session()
input_shape = (224, 224, 3)
inputs = Input(shape=input_shape, name="inputs")
y = GhostNet(inputs, 80)
keras_model = keras.models.Model(inputs=inputs, outputs=y)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_HRNet(self):
K.clear_session()
keras_model = HRNet(80)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(get_maximum_opset_supported() < 10,
reason="Upsample op need scale value >= 1.0.")
def test_ICNet(self):
K.clear_session()
keras_model = ICNet(80)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skip("TODO: perf.")
def test_MobileNetFCN8(self):
K.clear_session()
keras_model = MobileNetFCN8(80)
data = np.random.rand(2, 512, 512, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_NestedUNet(self):
K.clear_session()
keras_model = NestedUNet(80)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_R2AttUNet(self):
K.clear_session()
keras_model = R2AttUNet(80)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_SEUnet(self):
K.clear_session()
keras_model = SEUnet(80)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_Unet_Xception_ResNetBlock(self):
K.clear_session()
keras_model = Unet_Xception_ResNetBlock(80)
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_scSEUnet(self):
K.clear_session()
keras_model = scSEUnet(80)
data = np.random.rand(1, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 82,051 | 36.552403 | 149 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_acgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
from packaging.version import Version
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/acgan/acgan.py
class ACGAN():
def __init__(self):
# Input shape
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.num_classes = 10
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,))
img = self.generator([noise, label])
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
# and the label of that image
valid, target_label = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model([noise, label], [valid, target_label])
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))
model_input = multiply([noise, label_embedding])
img = model(model_input)
return Model([noise, label], img)
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
img = Input(shape=self.img_shape)
# Extract feature representation
features = model(img)
# Determine validity and label of the image
validity = Dense(1, activation="sigmoid")(features)
label = Dense(self.num_classes, activation="softmax")(features)
return Model(img, [validity, label])
class TestACGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(Version(onnx.__version__) < Version("1.5.0"),
"Not supported before onnx 1.5.0")
def test_ACGAN(self):
keras_model = ACGAN().combined
batch = 5
x = np.random.rand(batch, 100).astype(np.float32)
y = np.random.rand(batch, 1).astype(np.float32)
expected = keras_model.predict([x, y])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 5,263 | 34.093333 | 168 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_cogan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/cogan/cogan.py
class COGAN():
"""Reference: https://wiseodd.github.io/techblog/2017/02/18/coupled_gan/"""
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Build and compile the discriminator
self.d1, self.d2 = self.build_discriminators()
# Build the generator
self.g1, self.g2 = self.build_generators()
# The generator takes noise as input and generated imgs
z = Input(shape=(self.latent_dim,))
img1 = self.g1(z)
img2 = self.g2(z)
# For the combined model we will only train the generators
self.d1.trainable = False
self.d2.trainable = False
# The valid takes generated images as input and determines validity
valid1 = self.d1(img1)
valid2 = self.d2(img2)
# The combined model (stacked generators and discriminators)
# Trains generators to fool discriminators
self.combined = Model(z, [valid1, valid2])
def build_generators(self):
# Shared weights between generators
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
noise = Input(shape=(self.latent_dim,))
feature_repr = model(noise)
# Generator 1
g1 = Dense(1024)(feature_repr)
g1 = LeakyReLU(alpha=0.2)(g1)
g1 = BatchNormalization(momentum=0.8)(g1)
g1 = Dense(np.prod(self.img_shape), activation='tanh')(g1)
img1 = Reshape(self.img_shape)(g1)
# Generator 2
g2 = Dense(1024)(feature_repr)
g2 = LeakyReLU(alpha=0.2)(g2)
g2 = BatchNormalization(momentum=0.8)(g2)
g2 = Dense(np.prod(self.img_shape), activation='tanh')(g2)
img2 = Reshape(self.img_shape)(g2)
return Model(noise, img1), Model(noise, img2)
def build_discriminators(self):
img1 = Input(shape=self.img_shape)
img2 = Input(shape=self.img_shape)
# Shared discriminator layers
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
img1_embedding = model(img1)
img2_embedding = model(img2)
# Discriminator 1
validity1 = Dense(1, activation='sigmoid')(img1_embedding)
# Discriminator 2
validity2 = Dense(1, activation='sigmoid')(img2_embedding)
return Model(img1, validity1), Model(img2, validity2)
class TestCOGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_COGAN(self):
keras_model = COGAN().combined
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,356 | 30.572464 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/run_all_v2.py | # SPDX-License-Identifier: Apache-2.0
import os
os.environ["PYTHONPATH"] = \
os.environ.get("PYTHONPATH", "") + os.pathsep + "../../keras2onnx_unit_tests" + os.pathsep + "../../../"
os.environ["TF2ONNX_CATCH_ERRORS"] = "FALSE"
files = ['test_keras_applications_v2.py', 'test_transformers.py', 'test_chatbot.py', 'test_efn.py', \
'test_resnext.py']
files.sort()
res_final = True
for f_ in files:
res = os.system("pytest " + f_ + " --no-cov "
"--doctest-modules --junitxml=junit/test-results-" + f_[5:-3] + ".xml")
if res > 0:
res_final = False
if res_final:
assert(True)
else:
assert(False)
| 652 | 27.391304 | 108 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_srgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from keras.applications import VGG19
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/srgan/srgan.py
class SRGAN():
def __init__(self):
# Input shape
self.channels = 3
self.lr_height = 64 # Low resolution height
self.lr_width = 64 # Low resolution width
self.lr_shape = (self.lr_height, self.lr_width, self.channels)
self.hr_height = self.lr_height*4 # High resolution height
self.hr_width = self.lr_width*4 # High resolution width
self.hr_shape = (self.hr_height, self.hr_width, self.channels)
# Number of residual blocks in the generator
self.n_residual_blocks = 16
# We use a pre-trained VGG19 model to extract image features from the high resolution
# and the generated high resolution images and minimize the mse between them
self.vgg = self.build_vgg()
self.vgg.trainable = False
# Calculate output shape of D (PatchGAN)
patch = int(self.hr_height / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# High res. and low res. images
img_hr = Input(shape=self.hr_shape)
img_lr = Input(shape=self.lr_shape)
# Generate high res. version from low res.
fake_hr = self.generator(img_lr)
# Extract image features of the generated img
fake_features = self.vgg(fake_hr)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# Discriminator determines validity of generated high res. images
validity = self.discriminator(fake_hr)
self.combined = Model([img_lr, img_hr], [validity, fake_features])
def build_vgg(self):
"""
Builds a pre-trained VGG19 model that outputs image features extracted at the
third block of the model
"""
vgg = VGG19(weights="imagenet")
# Set outputs to outputs of last conv. layer in block 3
# See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py
vgg.outputs = [vgg.layers[9].output]
img = Input(shape=self.hr_shape)
# Extract image features
img_features = vgg(img)
return Model(img, img_features)
def build_generator(self):
def residual_block(layer_input, filters):
"""Residual block described in paper"""
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Add()([d, layer_input])
return d
def deconv2d(layer_input):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
u = Activation('relu')(u)
return u
# Low resolution image input
img_lr = Input(shape=self.lr_shape)
# Pre-residual block
c1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr)
c1 = Activation('relu')(c1)
# Propogate through residual blocks
r = residual_block(c1, self.gf)
for _ in range(self.n_residual_blocks - 1):
r = residual_block(r, self.gf)
# Post-residual block
c2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r)
c2 = BatchNormalization(momentum=0.8)(c2)
c2 = Add()([c2, c1])
# Upsampling
u1 = deconv2d(c2)
u2 = deconv2d(u1)
# Generate high resolution output
gen_hr = Conv2D(self.channels, kernel_size=9, strides=1, padding='same', activation='tanh')(u2)
return Model(img_lr, gen_hr)
def build_discriminator(self):
def d_block(layer_input, filters, strides=1, bn=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
# Input img
d0 = Input(shape=self.hr_shape)
d1 = d_block(d0, self.df, bn=False)
d2 = d_block(d1, self.df, strides=2)
d3 = d_block(d2, self.df*2)
d4 = d_block(d3, self.df*2, strides=2)
d5 = d_block(d4, self.df*4)
d6 = d_block(d5, self.df*4, strides=2)
d7 = d_block(d6, self.df*8)
d8 = d_block(d7, self.df*8, strides=2)
d9 = Dense(self.df*16)(d8)
d10 = LeakyReLU(alpha=0.2)(d9)
validity = Dense(1, activation='sigmoid')(d10)
return Model(d0, validity)
class TestSRGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_SRGAN(self):
keras_model = SRGAN().combined
x = np.random.rand(5, 64, 64, 3).astype(np.float32)
y = np.random.rand(5, 256, 256, 3).astype(np.float32)
expected = keras_model.predict([x, y])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files, rtol=1.e-2, atol=1.e-3))
if __name__ == "__main__":
unittest.main()
| 6,736 | 33.372449 | 192 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_series_net.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from keras.regularizers import l2
from keras.initializers import TruncatedNormal
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv1D = keras.layers.Conv1D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def DC_CNN_Block(nb_filter, filter_length, dilation, l2_layer_reg):
def f(input_):
residual = input_
layer_out = Conv1D(filters=nb_filter, kernel_size=filter_length,
dilation_rate=dilation,
activation='linear', padding='causal', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(input_)
layer_out = Activation('selu')(layer_out)
skip_out = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_in = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_out = Add()([residual, network_in])
return network_out, skip_out
return f
def DC_CNN_Model(length):
input = Input(shape=(length, 1))
l1a, l1b = DC_CNN_Block(32, 2, 1, 0.001)(input)
l2a, l2b = DC_CNN_Block(32, 2, 2, 0.001)(l1a)
l3a, l3b = DC_CNN_Block(32, 2, 4, 0.001)(l2a)
l4a, l4b = DC_CNN_Block(32, 2, 8, 0.001)(l3a)
l5a, l5b = DC_CNN_Block(32, 2, 16, 0.001)(l4a)
l6a, l6b = DC_CNN_Block(32, 2, 32, 0.001)(l5a)
l6b = Dropout(0.8)(l6b) # dropout used to limit influence of earlier data
l7a, l7b = DC_CNN_Block(32, 2, 64, 0.001)(l6a)
l7b = Dropout(0.8)(l7b) # dropout used to limit influence of earlier data
l8 = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b])
l9 = Activation('relu')(l8)
l21 = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42),
kernel_regularizer=l2(0.001))(l9)
model = Model(input=input, output=l21)
return model
# Model from https://github.com/kristpapadopoulos/seriesnet
class TestSeriesNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(False,
"Test level 0 only.")
def test_series_net(self):
K.clear_session()
keras_model = DC_CNN_Model(20)
data = np.random.rand(2000, 20, 1).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,217 | 34.445378 | 120 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_wgan_gp.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
from keras.layers.merge import _Merge
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
K = keras.backend
class RandomWeightedAverage(_Merge):
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/wgan_gp/wgan_gp.py
class WGANGP():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Following parameter and optimizer set as recommended in paper
self.n_critic = 5
# Build the generator and critic
self.generator = self.build_generator()
self.critic = self.build_critic()
#-------------------------------
# Construct Computational Graph
# for the Critic
#-------------------------------
# Freeze generator's layers while training critic
self.generator.trainable = False
# Image input (real sample)
real_img = Input(shape=self.img_shape)
# Noise input
z_disc = Input(shape=(self.latent_dim,))
# Generate image based of noise (fake sample)
fake_img = self.generator(z_disc)
# Discriminator determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage()([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
#-------------------------------
# Construct Computational Graph
# for Generator
#-------------------------------
# For the generator we freeze the critic's layers
self.critic.trainable = False
self.generator.trainable = True
# Sampled noise for input to generator
z_gen = Input(shape=(self.latent_dim,))
# Generate images based of noise
img = self.generator(z_gen)
# Discriminator determines validity
valid = self.critic(img)
# Defines generator model
self.generator_model = Model(z_gen, valid)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_critic(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1))
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
class TestWGANGP(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_WGANGP(self):
keras_model = WGANGP().generator_model
x = np.random.rand(5, 100).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 5,803 | 32.94152 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_transformers.py | # SPDX-License-Identifier: Apache-2.0
import os
from os.path import dirname, abspath
import unittest
import sys
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
import json
import urllib.request
import pickle
import numpy as np
import tensorflow as tf
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
import mock_keras2onnx
from mock_keras2onnx.proto import keras, is_tensorflow_older_than
from test_utils import is_bloburl_access, run_onnx_runtime
enable_full_transformer_test = False
if os.environ.get('ENABLE_FULL_TRANSFORMER_TEST', '0') != '0':
enable_transformer_test = True
CONVERTER_TRANSFERMER_PATH = r'https://lotus.blob.core.windows.net/converter-models/transformer_tokenizer/'
@unittest.skipIf(is_tensorflow_older_than('2.1.0'),
"Transformers conversion need tensorflow 2.1.0+")
@unittest.skipIf(not is_bloburl_access(CONVERTER_TRANSFERMER_PATH), "Model blob url can't access.")
class TestTransformers(unittest.TestCase):
text_str = 'The quick brown fox jumps over lazy dog.'
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def _get_token_path(self, file_name):
return 'https://lotus.blob.core.windows.net/converter-models/transformer_tokenizer/' + file_name
def _get_tokenzier(self, tokenizer_file):
token_path = self._get_token_path(tokenizer_file)
if not os.path.exists(tokenizer_file):
urllib.request.urlretrieve(token_path, tokenizer_file)
with open(tokenizer_file, 'rb') as handle:
tokenizer = pickle.load(handle)
return tokenizer
def _prepare_inputs(self, tokenizer, batch_size=3):
raw_data = json.dumps({
'text': self.text_str
})
text = json.loads(raw_data)['text']
# The tokenizers are generated using transformers 2.5.0, but model_max_length is introduced and needed in 2.9.0.
if not hasattr(tokenizer, 'model_max_length'):
tokenizer.model_max_length = 1024
inputs_raw = tokenizer.encode_plus(text, add_special_tokens=True)
idx_not_None = [i_ for i_, v_ in enumerate(inputs_raw.data['input_ids']) if v_ is not None]
input_raw_not_None = inputs_raw if len(idx_not_None) == len(inputs_raw.data['input_ids']) else \
{k_: [v_[i_] for i_ in idx_not_None] for k_, v_ in inputs_raw.items()}
inputs_onnx = {k_: np.repeat(np.expand_dims(v_, axis=0), batch_size, axis=0) for k_, v_ in input_raw_not_None.items()}
inputs = {k_: tf.constant(v_) for k_, v_ in inputs_onnx.items()}
return text, inputs, inputs_onnx
@unittest.skip("Output shape mismatch for tf model prediction.")
def test_3layer_gpt2(self):
from transformers import GPT2Config, TFGPT2Model, BertTokenizer
mock_keras2onnx.proto.keras.backend.set_learning_phase(0)
config = GPT2Config(n_layer=3)
model = TFGPT2Model(config)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
inputs = tokenizer.encode_plus(text, add_special_tokens=True, return_tensors='tf')
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFBertModel(self):
from transformers import BertConfig, TFBertModel
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertModel(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFBertForPreTraining(self):
from transformers import BertConfig, TFBertForPreTraining
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForPreTraining(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
def test_TFBertForMaskedLM(self):
from transformers import BertConfig, TFBertForMaskedLM
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForMaskedLM(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFBertForNextSentencePrediction(self):
from transformers import BertConfig, TFBertForNextSentencePrediction
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForNextSentencePrediction(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFBertForSequenceClassification(self):
from transformers import BertConfig, TFBertForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFBertForTokenClassification(self):
from transformers import BertConfig, TFBertForTokenClassification
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForTokenClassification(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFBertForQuestionAnswering(self):
from transformers import BertConfig, TFBertForQuestionAnswering
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForQuestionAnswering(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFGPT2(self):
if enable_full_transformer_test:
from transformers import GPT2Config, TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel
model_list = [TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel]
else:
from transformers import GPT2Config, TFGPT2Model
model_list = [TFGPT2Model]
# pretrained_weights = 'gpt2'
tokenizer_file = 'gpt2_gpt2.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = GPT2Config()
for model_instance_ in model_list:
keras.backend.clear_session()
model = model_instance_(config)
model._set_inputs(inputs)
predictions_original = model(inputs)
predictions = [predictions_original[0]] + list(v_.numpy() for v_ in predictions_original[1])
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(get_maximum_opset_supported() < 12, "Einsum is not supported until opset 12.")
def test_TFXLNet(self):
if enable_full_transformer_test:
from transformers import XLNetConfig, TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \
TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple, XLNetTokenizer
model_list = [TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, \
TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple]
else:
from transformers import XLNetConfig, TFXLNetModel, XLNetTokenizer
model_list = [TFXLNetModel]
# XLNetTokenizer need SentencePiece, so the pickle file does not work here.
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
config = XLNetConfig(n_layer=2)
# The model with input mask has MatrixDiagV3 which is not a registered function/op
token = np.asarray(tokenizer.encode(self.text_str, add_special_tokens=True), dtype=np.int32)
inputs_onnx = {'input_1': np.expand_dims(token, axis=0)}
inputs = tf.constant(token)[None, :] # Batch size 1
for model_instance_ in model_list:
keras.backend.clear_session()
model = model_instance_(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFOpenAIGPTModel(self):
from transformers import OpenAIGPTConfig, TFOpenAIGPTModel
keras.backend.clear_session()
# pretrained_weights = 'openai-gpt'
tokenizer_file = 'openai_openai-gpt.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = OpenAIGPTConfig()
model = TFOpenAIGPTModel(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFOpenAIGPTLMHeadModel(self):
from transformers import OpenAIGPTConfig, TFOpenAIGPTLMHeadModel
keras.backend.clear_session()
# pretrained_weights = 'openai-gpt'
tokenizer_file = 'openai_openai-gpt.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = OpenAIGPTConfig()
model = TFOpenAIGPTLMHeadModel(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
def test_TFOpenAIGPTDoubleHeadsModel(self):
from transformers import OpenAIGPTConfig, TFOpenAIGPTDoubleHeadsModel
keras.backend.clear_session()
# pretrained_weights = 'openai-gpt'
tokenizer_file = 'openai_openai-gpt.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
# tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2), batch_dims = 1 in this case
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer, batch_size=1)
config = OpenAIGPTConfig()
model = TFOpenAIGPTDoubleHeadsModel(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
def test_TFXLMModel(self):
from transformers import XLMConfig, TFXLMModel
keras.backend.clear_session()
# pretrained_weights = 'xlm-mlm-enfr-1024'
tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = XLMConfig()
model = TFXLMModel(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
def test_TFXLMWithLMHeadModel(self):
from transformers import XLMConfig, TFXLMWithLMHeadModel
keras.backend.clear_session()
# pretrained_weights = 'xlm-mlm-enfr-1024'
tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = XLMConfig()
model = TFXLMWithLMHeadModel(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
def test_TFXLMForSequenceClassification(self):
from transformers import XLMConfig, TFXLMForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'xlm-mlm-enfr-1024'
tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = XLMConfig()
model = TFXLMForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
@unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
def test_TFXLMForQuestionAnsweringSimple(self):
from transformers import XLMConfig, TFXLMForQuestionAnsweringSimple
keras.backend.clear_session()
# pretrained_weights = 'xlm-mlm-enfr-1024'
tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = XLMConfig()
model = TFXLMForQuestionAnsweringSimple(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFDistilBertModel(self):
from transformers import DistilBertConfig, TFDistilBertModel
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertModel(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFDistilBertForMaskedLM(self):
from transformers import DistilBertConfig, TFDistilBertForMaskedLM
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertForMaskedLM(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFDistilBertForSequenceClassification(self):
from transformers import DistilBertConfig, TFDistilBertForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFDistilBertForTokenClassification(self):
from transformers import DistilBertConfig, TFDistilBertForTokenClassification
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertForTokenClassification(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFDistilBertForQuestionAnswering(self):
from transformers import DistilBertConfig, TFDistilBertForQuestionAnswering
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertForQuestionAnswering(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFRobertaModel(self):
from transformers import RobertaConfig, TFRobertaModel
keras.backend.clear_session()
# pretrained_weights = 'roberta-base'
tokenizer_file = 'roberta_roberta-base.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = RobertaConfig()
model = TFRobertaModel(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFRobertaForMaskedLM(self):
from transformers import RobertaConfig, TFRobertaForMaskedLM
keras.backend.clear_session()
# pretrained_weights = 'roberta-base'
tokenizer_file = 'roberta_roberta-base.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = RobertaConfig()
model = TFRobertaForMaskedLM(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
def test_TFRobertaForSequenceClassification(self):
from transformers import RobertaConfig, TFRobertaForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'roberta-base'
tokenizer_file = 'roberta_roberta-base.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = RobertaConfig()
model = TFRobertaForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFRobertaForTokenClassification(self):
from transformers import RobertaConfig, TFRobertaForTokenClassification
keras.backend.clear_session()
# pretrained_weights = 'roberta-base'
tokenizer_file = 'roberta_roberta-base.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = RobertaConfig()
model = TFRobertaForTokenClassification(config)
predictions = model.predict(inputs)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
if __name__ == "__main__":
unittest.main()
| 24,209 | 51.516269 | 135 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_efn.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
from os.path import dirname, abspath
from mock_keras2onnx.proto import keras, is_tensorflow_older_than
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
@unittest.skipIf(is_tensorflow_older_than('2.1.0'), "efficientnet needs tensorflow >= 2.1.0")
class TestEfn(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skip("TODO: model discrepancy")
def test_custom(self):
from efficientnet import tfkeras as efn
keras.backend.set_learning_phase(0)
base_model = efn.EfficientNetB0(input_shape=(600, 600, 3), weights=None)
backbone = keras.Model(base_model.input, base_model.get_layer("top_activation").output)
res = run_image(backbone, self.model_files, img_path, target_size=(600, 600),
rtol=1e-2, atol=1e-1)
self.assertTrue(*res)
def test_efn(self):
from efficientnet import tfkeras as efn
keras.backend.set_learning_phase(0)
model = efn.EfficientNetB0(weights=None)
res = run_image(model, self.model_files, img_path, target_size=(224, 224), rtol=1e-2)
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 1,491 | 32.155556 | 95 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_chatbot.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from mock_keras2onnx.proto.tfcompat import is_tf2
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0, get_max_opset_supported_for_test
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Bidirectional = keras.layers.Bidirectional
concatenate = keras.layers.concatenate
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
LSTM = keras.layers.LSTM
MaxPooling2D = keras.layers.MaxPooling2D
Multiply = keras.layers.Multiply
Permute = keras.layers.Permute
RepeatVector = keras.layers.RepeatVector
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# Model from https://github.com/Dimsmary/Ossas_ChatBot
@unittest.skipIf(not is_tf2, "Tensorflow 2.x only tests")
class TestChatBot(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_chatbot(self):
K.clear_session()
vocabulary_size = 1085
embedding_dim = int(pow(vocabulary_size, 1.0 / 4))
latent_dim = embedding_dim * 40
encoder_inputs = Input(shape=(None,), name='encoder_input')
encoder_embedding = Embedding(vocabulary_size,
embedding_dim,
mask_zero=True,
name='encoder_Embedding')(encoder_inputs)
encoder = Bidirectional(LSTM(latent_dim, return_sequences=True, return_state=True, dropout=0.5),
name='encoder_BiLSTM')
encoder_outputs, fw_state_h, fw_state_c, bw_state_h, bw_state_c = encoder(encoder_embedding)
state_h = Concatenate(axis=-1, name='encoder_state_h')([fw_state_h, bw_state_h])
state_c = Concatenate(axis=-1, name='encoder_state_c')([fw_state_c, bw_state_c])
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(None,), name='decoder_input')
decoder_embedding = Embedding(vocabulary_size,
embedding_dim,
mask_zero=True,
name='decoder_embedding')(decoder_inputs)
decoder_lstm = LSTM(latent_dim * 2,
return_sequences=True,
return_state=True,
name='decoder_LSTM',
dropout=0.5)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding,
initial_state=encoder_states)
attention = Dense(1, activation='tanh')(encoder_outputs)
attention = Flatten()(attention)
attention = Activation('softmax')(attention)
attention = RepeatVector(latent_dim * 2)(attention)
attention = Permute([2, 1])(attention)
sent_dense = Multiply()([decoder_outputs, attention])
decoder_dense = Dense(vocabulary_size, activation='softmax', name='dense_layer')
decoder_outputs = decoder_dense(sent_dense)
keras_model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
data1 = np.random.rand(2, 12).astype(np.float32)
data2 = np.random.rand(2, 12).astype(np.float32)
expected = keras_model.predict([data1, data2])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name, target_opset=get_max_opset_supported_for_test())
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, [data1, data2], expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,426 | 40.764151 | 130 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_unet_plus_plus.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import numpy as np
import onnxruntime
from os.path import dirname, abspath
from mock_keras2onnx.proto import keras, is_keras_older_than
from keras.applications.vgg16 import VGG16
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from packaging.version import Version
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
Input = keras.layers.Input
Activation = keras.layers.Activation
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Conv2DTranspose = keras.layers.Conv2DTranspose
MaxPooling2D = keras.layers.MaxPooling2D
BatchNormalization = keras.layers.BatchNormalization
Model = keras.models.Model
def handle_block_names(stage):
conv_name = 'decoder_stage{}_conv'.format(stage)
bn_name = 'decoder_stage{}_bn'.format(stage)
relu_name = 'decoder_stage{}_relu'.format(stage)
up_name = 'decoder_stage{}_upsample'.format(stage)
return conv_name, bn_name, relu_name, up_name
def ConvRelu(filters, kernel_size, use_batchnorm=False, conv_name='conv', bn_name='bn', relu_name='relu'):
def layer(x):
x = Conv2D(filters, kernel_size, padding="same", name=conv_name, use_bias=not(use_batchnorm))(x)
if use_batchnorm:
x = BatchNormalization(name=bn_name)(x)
x = Activation('relu', name=relu_name)(x)
return x
return layer
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name = handle_block_names(stage)
x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor)
if skip is not None:
x = Concatenate()([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x)
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer
def Transpose2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
transpose_kernel_size=(4,4), use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name = handle_block_names(stage)
x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate,
padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor)
if use_batchnorm:
x = BatchNormalization(name=bn_name+'1')(x)
x = Activation('relu', name=relu_name+'1')(x)
if skip is not None:
x = Concatenate()([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer
def get_layer_number(model, layer_name):
for i, l in enumerate(model.layers):
if l.name == layer_name:
return i
raise ValueError('No layer with name {} in model {}.'.format(layer_name, model.name))
def to_tuple(x):
if isinstance(x, tuple):
if len(x) == 2:
return x
elif np.isscalar(x):
return (x, x)
raise ValueError('Value should be tuple of length 2 or int value, got "{}"'.format(x))
# From https://github.com/MrGiovanni/UNetPlusPlus
class TestUnetPlusPlus(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(Version(onnxruntime.__version__.split('-')[0]) < Version('1.7.0'),
"ConvTranspose stride > 1 is fixed in onnxruntime 1.7.0.")
def test_unet_plus_plus(self):
backbone_name = 'vgg16'
input_shape = (None, None, 3)
input_tensor = None
encoder_weights = None#'imagenet'
backbone = VGG16(input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
input = backbone.input
x = backbone.output
block_type = 'transpose'
if block_type == 'transpose':
up_block = Transpose2D_block
else:
up_block = Upsample2D_block
skip_connection_layers = ('block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2', 'block1_conv2')
# convert layer names to indices
skip_connection_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l
for l in skip_connection_layers])
n_upsample_blocks = 5
upsample_rates = (2,2,2,2,2)
decoder_filters = (256,128,64,32,16)
block_type='upsampling'
activation='sigmoid'
use_batchnorm=True
classes=1
for i in range(n_upsample_blocks):
# check if there is a skip connection
skip_connection = None
if i < len(skip_connection_idx):
skip_connection = backbone.layers[skip_connection_idx[i]].output
upsample_rate = to_tuple(upsample_rates[i])
x = up_block(decoder_filters[i], i, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(x)
x = Conv2D(classes, (3,3), padding='same', name='final_conv')(x)
x = Activation(activation, name=activation)(x)
model = Model(input, x)
res = run_image(model, self.model_files, img_path, target_size=(256, 256, 3))
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 5,964 | 35.820988 | 113 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_ccgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import keras_contrib
import numpy as np
from mock_keras2onnx import set_converter
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime, convert_InstanceNormalizationLayer
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/ccgan/ccgan.py
class CCGAN():
def __init__(self):
self.img_rows = 32
self.img_cols = 32
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.mask_height = 10
self.mask_width = 10
self.num_classes = 10
# Number of filters in first layer of generator and discriminator
self.gf = 32
self.df = 32
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
masked_img = Input(shape=self.img_shape)
gen_img = self.generator(masked_img)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid, _ = self.discriminator(gen_img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(masked_img , valid)
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
u = Concatenate()([u, skip_input])
return u
img = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(img, self.gf, bn=False)
d2 = conv2d(d1, self.gf*2)
d3 = conv2d(d2, self.gf*4)
d4 = conv2d(d3, self.gf*8)
# Upsampling
u1 = deconv2d(d4, d3, self.gf*4)
u2 = deconv2d(u1, d2, self.gf*2)
u3 = deconv2d(u2, d1, self.gf)
u4 = UpSampling2D(size=2)(u3)
output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)
return Model(img, output_img)
def build_discriminator(self):
img = Input(shape=self.img_shape)
model = Sequential()
model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape))
model.add(LeakyReLU(alpha=0.8))
model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(InstanceNormalization())
model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(InstanceNormalization())
features = model(img)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features)
label = Flatten()(features)
label = Dense(self.num_classes+1, activation="softmax")(label)
return Model(img, [validity, label])
set_converter(keras_contrib.layers.InstanceNormalization, convert_InstanceNormalizationLayer)
class TestCCGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_CCGAN(self):
keras_model = CCGAN().combined
x = np.random.rand(2, 32, 32, 1).astype(np.float32)
expected = keras_model.predict([x])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files, rtol=1.e-2, atol=1.e-4))
if __name__ == "__main__":
unittest.main()
| 5,232 | 33.202614 | 131 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_nbeats.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
Subtract = keras.layers.Subtract
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
class NBeatsNet:
GENERIC_BLOCK = 'generic'
TREND_BLOCK = 'trend'
SEASONALITY_BLOCK = 'seasonality'
def __init__(self,
input_dim=1,
exo_dim=0,
backcast_length=10,
forecast_length=2,
stack_types=(TREND_BLOCK, SEASONALITY_BLOCK),
nb_blocks_per_stack=3,
thetas_dim=(4, 8),
share_weights_in_stack=False,
hidden_layer_units=256,
nb_harmonics=None
):
self.stack_types = stack_types
self.nb_blocks_per_stack = nb_blocks_per_stack
self.thetas_dim = thetas_dim
self.units = hidden_layer_units
self.share_weights_in_stack = share_weights_in_stack
self.backcast_length = backcast_length
self.forecast_length = forecast_length
self.input_dim = input_dim
self.exo_dim = exo_dim
self.input_shape = (self.backcast_length, self.input_dim)
self.exo_shape = (self.backcast_length, self.exo_dim)
self.output_shape = (self.forecast_length, self.input_dim)
self.weights = {}
self.nb_harmonics = nb_harmonics
assert len(self.stack_types) == len(self.thetas_dim)
x = Input(shape=self.input_shape, name='input_variable')
x_ = {}
for k in range(self.input_dim):
x_[k] = Lambda(lambda z: z[..., k])(x)
e_ = {}
if self.has_exog():
e = Input(shape=self.exo_shape, name='exos_variables')
for k in range(self.exo_dim):
e_[k] = Lambda(lambda z: z[..., k])(e)
else:
e = None
y_ = {}
for stack_id in range(len(self.stack_types)):
stack_type = self.stack_types[stack_id]
nb_poly = self.thetas_dim[stack_id]
for block_id in range(self.nb_blocks_per_stack):
backcast, forecast = self.create_block(x_, e_, stack_id, block_id, stack_type, nb_poly)
for k in range(self.input_dim):
x_[k] = Subtract()([x_[k], backcast[k]])
if stack_id == 0 and block_id == 0:
y_[k] = forecast[k]
else:
y_[k] = Add()([y_[k], forecast[k]])
for k in range(self.input_dim):
y_[k] = Reshape(target_shape=(self.forecast_length, 1))(y_[k])
if self.input_dim > 1:
y_ = concatenate(axis=-1)([y_[ll] for ll in range(self.input_dim)])
else:
y_ = y_[0]
if self.has_exog():
model = Model([x, e], y_)
else:
model = Model(x, y_)
model.summary()
self.n_beats = model
def has_exog(self):
return self.exo_dim > 0
@staticmethod
def load(filepath, custom_objects=None, compile=True):
from keras.models import load_model
return load_model(filepath, custom_objects, compile)
def _r(self, layer_with_weights, stack_id):
# mechanism to restore weights when block share the same weights.
# only useful when share_weights_in_stack=True.
if self.share_weights_in_stack:
layer_name = layer_with_weights.name.split('/')[-1]
try:
reused_weights = self.weights[stack_id][layer_name]
return reused_weights
except KeyError:
pass
if stack_id not in self.weights:
self.weights[stack_id] = {}
self.weights[stack_id][layer_name] = layer_with_weights
return layer_with_weights
def create_block(self, x, e, stack_id, block_id, stack_type, nb_poly):
# register weights (useful when share_weights_in_stack=True)
def reg(layer):
return self._r(layer, stack_id)
# update name (useful when share_weights_in_stack=True)
def n(layer_name):
return '/'.join([str(stack_id), str(block_id), stack_type, layer_name])
backcast_ = {}
forecast_ = {}
d1 = reg(Dense(self.units, activation='relu', name=n('d1')))
d2 = reg(Dense(self.units, activation='relu', name=n('d2')))
d3 = reg(Dense(self.units, activation='relu', name=n('d3')))
d4 = reg(Dense(self.units, activation='relu', name=n('d4')))
if stack_type == 'generic':
theta_b = reg(Dense(nb_poly, activation='linear', use_bias=False, name=n('theta_b')))
theta_f = reg(Dense(nb_poly, activation='linear', use_bias=False, name=n('theta_f')))
backcast = reg(Dense(self.backcast_length, activation='linear', name=n('backcast')))
forecast = reg(Dense(self.forecast_length, activation='linear', name=n('forecast')))
elif stack_type == 'trend':
theta_f = theta_b = reg(Dense(nb_poly, activation='linear', use_bias=False, name=n('theta_f_b')))
backcast = Lambda(trend_model, arguments={"is_forecast": False, "backcast_length": self.backcast_length,
"forecast_length": self.forecast_length})
forecast = Lambda(trend_model, arguments={"is_forecast": True, "backcast_length": self.backcast_length,
"forecast_length": self.forecast_length})
else: # 'seasonality'
if self.nb_harmonics:
theta_b = reg(Dense(self.nb_harmonics, activation='linear', use_bias=False, name=n('theta_b')))
else:
theta_b = reg(Dense(self.forecast_length, activation='linear', use_bias=False, name=n('theta_b')))
theta_f = reg(Dense(self.forecast_length, activation='linear', use_bias=False, name=n('theta_f')))
backcast = Lambda(seasonality_model,
arguments={"is_forecast": False, "backcast_length": self.backcast_length,
"forecast_length": self.forecast_length})
forecast = Lambda(seasonality_model,
arguments={"is_forecast": True, "backcast_length": self.backcast_length,
"forecast_length": self.forecast_length})
for k in range(self.input_dim):
if self.has_exog():
d0 = concatenate()([x[k]] + [e[ll] for ll in range(self.exo_dim)])
else:
d0 = x[k]
d1_ = d1(d0)
d2_ = d2(d1_)
d3_ = d3(d2_)
d4_ = d4(d3_)
theta_f_ = theta_f(d4_)
theta_b_ = theta_b(d4_)
backcast_[k] = backcast(theta_b_)
forecast_[k] = forecast(theta_f_)
return backcast_, forecast_
def __getattr__(self, name):
# https://github.com/faif/python-patterns
# model.predict() instead of model.n_beats.predict()
# same for fit(), train_on_batch()...
attr = getattr(self.n_beats, name)
if not callable(attr):
return attr
def wrapper(*args, **kwargs):
return attr(*args, **kwargs)
return wrapper
def linear_space(backcast_length, forecast_length, fwd_looking=True):
ls = K.arange(-float(backcast_length), float(forecast_length), 1) / backcast_length
if fwd_looking:
ls = ls[backcast_length:]
else:
ls = ls[:backcast_length]
return ls
def seasonality_model(thetas, backcast_length, forecast_length, is_forecast):
p = thetas.get_shape().as_list()[-1]
p1, p2 = (p // 2, p // 2) if p % 2 == 0 else (p // 2, p // 2 + 1)
t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
s1 = K.stack([K.cos(2 * np.pi * i * t) for i in range(p1)], axis=0)
s2 = K.stack([K.sin(2 * np.pi * i * t) for i in range(p2)], axis=0)
if p == 1:
s = s2
else:
s = K.concatenate([s1, s2], axis=0)
s = K.cast(s, np.float32)
return K.dot(thetas, s)
def trend_model(thetas, backcast_length, forecast_length, is_forecast):
p = thetas.shape[-1]
t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
t = K.transpose(K.stack([t ** i for i in range(p)], axis=0))
t = K.cast(t, np.float32)
return K.dot(thetas, K.transpose(t))
# Model from https://github.com/philipperemy/n-beats
class TestNBeats(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_NBeats(self):
K.clear_session()
num_samples, time_steps, input_dim, output_dim = 50000, 10, 1, 1
# Definition of the model.
keras_model = NBeatsNet(backcast_length=time_steps, forecast_length=output_dim,
stack_types=(NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK), nb_blocks_per_stack=2,
thetas_dim=(4, 4), share_weights_in_stack=True, hidden_layer_units=64)
data = np.random.rand(num_samples, time_steps, input_dim).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 10,673 | 38.680297 | 116 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_resnext.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from mock_keras2onnx.proto.tfcompat import is_tf2
if is_tf2:
def l2(weight_decay):
# old keras layer expects a tuple but tf keras wants a single value
return keras.regularizers.l2(weight_decay[0])
else:
from keras.regularizers import l2
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
from test_utils import test_level_0, run_image
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
add = keras.layers.add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GlobalMaxPooling2D = keras.layers.GlobalMaxPooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def initial_conv_block(input, weight_decay=5e-4):
channel_axis = -1
x = Conv2D(64, (3, 3), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
def initial_conv_block_inception(input_tensor, weight_decay=5e-4):
channel_axis = -1
x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input_tensor)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
def __grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
init = input
channel_axis = -1
group_list = []
if cardinality == 1:
# with cardinality 1, it is a standard convolution
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
'''
for c in range(cardinality):
x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels])(input)
if K.image_data_format() == 'channels_last' else
lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
'''
x = Lambda(lambda z: z[:, :, :, 0 * grouped_channels:(0 + 1) * grouped_channels])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 1 * grouped_channels:(1 + 1) * grouped_channels])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 2 * grouped_channels:(2 + 1) * grouped_channels])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 3 * grouped_channels:(3 + 1) * grouped_channels])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 4 * grouped_channels:(4 + 1) * grouped_channels])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 5 * grouped_channels:(5 + 1) * grouped_channels])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 6 * grouped_channels:(6 + 1) * grouped_channels])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 7 * grouped_channels:(7 + 1) * grouped_channels])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
group_merge = concatenate(group_list, axis=channel_axis)
x = BatchNormalization(axis=channel_axis)(group_merge)
x = Activation('relu')(x)
return x
def bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay=5e-4):
init = input
grouped_channels = int(filters / cardinality)
channel_axis = -1
# Check if input number of filters is same as 16 * k, else create convolution2d for this input
if init.shape[-1] != 2 * filters:
init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
init = BatchNormalization(axis=channel_axis)(init)
x = Conv2D(filters, (1, 1), padding='same', use_bias=False,
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = __grouped_convolution_block(x, grouped_channels, cardinality, strides, weight_decay)
x = Conv2D(filters * 2, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_axis)(x)
x = add([init, x])
x = Activation('relu')(x)
return x
def squeeze_excite_block(input, ratio=16):
init = input
channel_axis = -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def __grouped_se_convolution_block(input_tensor, grouped_channels, cardinality, strides, weight_decay=5e-4):
init = input_tensor
channel_axis = -1
group_list = []
if cardinality == 1:
# with cardinality 1, it is a standard convolution
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
return x
'''
for c in range(cardinality):
x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
if K.image_data_format() == 'channels_last' else
lambda _z: _z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
'''
x = Lambda(lambda z: z[:, :, :, 0 * grouped_channels:(0 + 1) * grouped_channels])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 1 * grouped_channels:(1 + 1) * grouped_channels])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 2 * grouped_channels:(2 + 1) * grouped_channels])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 3 * grouped_channels:(3 + 1) * grouped_channels])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 4 * grouped_channels:(4 + 1) * grouped_channels])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 5 * grouped_channels:(5 + 1) * grouped_channels])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 6 * grouped_channels:(6 + 1) * grouped_channels])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
x = Lambda(lambda z: z[:, :, :, 7 * grouped_channels:(7 + 1) * grouped_channels])(input_tensor)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
group_merge = concatenate(group_list, axis=channel_axis)
x = BatchNormalization(axis=channel_axis)(group_merge)
x = LeakyReLU()(x)
return x
def se_bottleneck_block(input_tensor, filters=64, cardinality=8, strides=1, weight_decay=5e-4):
init = input_tensor
grouped_channels = int(filters / cardinality)
channel_axis = -1
if init.shape[-1] != 2 * filters:
init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
init = BatchNormalization(axis=channel_axis)(init)
x = Conv2D(filters, (1, 1), padding='same', use_bias=False,
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input_tensor)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
x = __grouped_se_convolution_block(x, grouped_channels, cardinality, strides, weight_decay)
x = Conv2D(filters * 2, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_axis)(x)
# squeeze and excite block
x = squeeze_excite_block(x)
x = add([init, x])
x = LeakyReLU()(x)
return x
def create_res_next(__initial_conv_block, __bottleneck_block,
nb_classes, img_input, include_top, depth=29, cardinality=8, width=4,
weight_decay=5e-4, pooling=None):
if type(depth) is list or type(depth) is tuple:
# If a list is provided, defer to user how many blocks are present
N = list(depth)
else:
# Otherwise, default to 3 blocks each of default number of group convolution blocks
N = [(depth - 2) // 9 for _ in range(3)]
filters = cardinality * width
filters_list = []
for i in range(len(N)):
filters_list.append(filters)
filters *= 2 # double the size of the filters
x = __initial_conv_block(img_input, weight_decay)
# block 1 (no pooling)
for i in range(N[0]):
x = __bottleneck_block(x, filters_list[0], cardinality, strides=1, weight_decay=weight_decay)
N = N[1:] # remove the first block from block definition list
filters_list = filters_list[1:] # remove the first filter from the filter list
# block 2 to N
for block_idx, n_i in enumerate(N):
for i in range(n_i):
if i == 0:
x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=2,
weight_decay=weight_decay)
else:
x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=1,
weight_decay=weight_decay)
if include_top:
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, use_bias=False, kernel_regularizer=l2(weight_decay),
kernel_initializer='he_normal', activation='softmax')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
return x
class TestResNext(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
# Model from https://github.com/titu1994/Keras-ResNeXt
@unittest.skipIf(not is_tf2,
"This is a tf2 model.")
def test_ResNext(self):
K.clear_session()
input_shape = (112, 112, 3)
depth = 29
cardinality = 8
width = 64
weight_decay = 5e-4,
include_top = True
pooling = None
classes = 10
img_input = keras.layers.Input(shape=input_shape)
x = create_res_next(initial_conv_block, bottleneck_block,
classes, img_input, include_top, depth, cardinality, width,
weight_decay, pooling)
inputs = img_input
keras_model = Model(inputs, x, name='resnext')
res = run_image(keras_model, self.model_files, img_path, atol=5e-3, target_size=112)
self.assertTrue(*res)
# Model from https://github.com/titu1994/keras-squeeze-excite-network
@unittest.skipIf(not is_tf2,
"This is a tf2 model.")
def test_SEResNext(self):
K.clear_session()
input_shape = (112, 112, 3)
depth = 29
cardinality = 8
width = 64
weight_decay = 5e-4,
include_top = True
pooling = None
classes = 10
img_input = keras.layers.Input(shape=input_shape)
x = create_res_next(initial_conv_block_inception, se_bottleneck_block,
classes, img_input, include_top, depth, cardinality, width,
weight_decay, pooling)
inputs = img_input
keras_model = Model(inputs, x, name='se_resnext')
res = run_image(keras_model, self.model_files, img_path, atol=5e-3, target_size=112)
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 16,488 | 41.497423 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_lipnet.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Bidirectional = keras.layers.Bidirectional
concatenate = keras.layers.concatenate
Conv3D = keras.layers.Conv3D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GRU = keras.layers.GRU
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling3D = keras.layers.MaxPooling3D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
TimeDistributed = keras.layers.TimeDistributed
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
ZeroPadding3D = keras.layers.ZeroPadding3D
Sequential = keras.models.Sequential
Model = keras.models.Model
# Model from https://github.com/rizkiarm/LipNet
class TestLipNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_LipNet(self):
K.clear_session()
frames_n = 2
img_w = 128
img_h = 128
img_c = 3
output_size = 28
input_shape = (frames_n, img_w, img_h, img_c)
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
zero1 = ZeroPadding3D(padding=(1, 2, 2), name='zero1')(input_data)
conv1 = Conv3D(32, (3, 5, 5), strides=(1, 2, 2), activation='relu', kernel_initializer='he_normal', name='conv1')(zero1)
maxp1 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max1')(conv1)
drop1 = Dropout(0.5)(maxp1)
zero2 = ZeroPadding3D(padding=(1, 2, 2), name='zero2')(drop1)
conv2 = Conv3D(64, (3, 5, 5), strides=(1, 1, 1), activation='relu', kernel_initializer='he_normal', name='conv2')(zero2)
maxp2 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max2')(conv2)
drop2 = Dropout(0.5)(maxp2)
zero3 = ZeroPadding3D(padding=(1, 1, 1), name='zero3')(drop2)
conv3 = Conv3D(96, (3, 3, 3), strides=(1, 1, 1), activation='relu', kernel_initializer='he_normal', name='conv3')(zero3)
maxp3 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max3')(conv3)
drop3 = Dropout(0.5)(maxp3)
resh1 = TimeDistributed(Flatten())(drop3)
gru_1 = Bidirectional(GRU(256, return_sequences=True, kernel_initializer='Orthogonal', name='gru1'), merge_mode='concat')(resh1)
gru_2 = Bidirectional(GRU(256, return_sequences=True, kernel_initializer='Orthogonal', name='gru2'), merge_mode='concat')(gru_1)
# transforms RNN output to character activations:
dense1 = Dense(output_size, kernel_initializer='he_normal', name='dense1')(gru_2)
y_pred = Activation('softmax', name='softmax')(dense1)
keras_model = Model(inputs=input_data, outputs=y_pred)
data = np.random.rand(2, frames_n, img_w, img_h, img_c).astype(np.float32)
expected = keras_model.predict([data])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 3,930 | 37.920792 | 136 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_deeplab_v3.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
add = keras.layers.add
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
DepthwiseConv2D = keras.layers.DepthwiseConv2D
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
import tensorflow as tf
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation(tf.nn.relu)(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation(tf.nn.relu)(x)
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation(tf.nn.relu)(x)
return x
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):
if stride == 1:
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='same', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='valid', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,
rate=1, depth_activation=False, return_skip=False):
residual = inputs
for i in range(3):
residual = SepConv_BN(residual,
depth_list[i],
prefix + '_separable_conv{}'.format(i + 1),
stride=stride if i == 2 else 1,
rate=rate,
depth_activation=depth_activation)
if i == 1:
skip = residual
if skip_connection_type == 'conv':
shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',
kernel_size=1,
stride=stride)
shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
outputs = add([residual, shortcut])
elif skip_connection_type == 'sum':
outputs = add([residual, inputs])
elif skip_connection_type == 'none':
outputs = residual
if return_skip:
return outputs, skip
else:
return outputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
in_channels = inputs.shape[-1].value # inputs._keras_shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'expanded_conv_{}_'.format(block_id)
if block_id:
# Expand
x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
use_bias=False, activation=None,
name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'expand_BN')(x)
x = Activation(tf.nn.relu6, name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
use_bias=False, padding='same', dilation_rate=(rate, rate),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'depthwise_BN')(x)
x = Activation(tf.nn.relu6, name=prefix + 'depthwise_relu')(x)
# Project
x = Conv2D(pointwise_filters,
kernel_size=1, padding='same', use_bias=False, activation=None,
name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'project_BN')(x)
if skip_connection:
return Add(name=prefix + 'add')([inputs, x])
# if in_channels == pointwise_filters and stride == 1:
# return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
def Deeplabv3(weights='pascal_voc', input_tensor=None, input_shape=(512, 512, 3), classes=21, backbone='mobilenetv2',
OS=16, alpha=1., activation=None):
img_input = Input(shape=input_shape)
if backbone == 'xception':
if OS == 8:
entry_block3_stride = 1
middle_block_rate = 2 # ! Not mentioned in paper, but required
exit_block_rates = (2, 4)
atrous_rates = (12, 24, 36)
else:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
atrous_rates = (6, 12, 18)
x = Conv2D(32, (3, 3), strides=(2, 2),
name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input)
x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
x = Activation(tf.nn.relu)(x)
x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)
x = Activation(tf.nn.relu)(x)
x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',
skip_connection_type='conv', stride=2,
depth_activation=False)
x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',
skip_connection_type='conv', stride=2,
depth_activation=False, return_skip=True)
x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',
skip_connection_type='conv', stride=entry_block3_stride,
depth_activation=False)
for i in range(16):
x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),
skip_connection_type='sum', stride=1, rate=middle_block_rate,
depth_activation=False)
x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',
skip_connection_type='conv', stride=1, rate=exit_block_rates[0],
depth_activation=False)
x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',
skip_connection_type='none', stride=1, rate=exit_block_rates[1],
depth_activation=True)
else:
OS = 8
first_block_filters = _make_divisible(32 * alpha, 8)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2), padding='same',
use_bias=False, name='Conv')(img_input)
x = BatchNormalization(
epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
x = Activation(tf.nn.relu6, name='Conv_Relu6')(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3, skip_connection=False)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5, skip_connection=True)
# stride in block 6 changed from 2 -> 1, so we need to use rate = 2
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!
expansion=6, block_id=6, skip_connection=False)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=7, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=8, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=9, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=10, skip_connection=False)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=11, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=12, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!
expansion=6, block_id=13, skip_connection=False)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=14, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=15, skip_connection=True)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=16, skip_connection=False)
# end of feature extractor
# branching for Atrous Spatial Pyramid Pooling
# Image Feature branch
shape_before = tf.shape(x)
b4 = GlobalAveragePooling2D()(x)
# from (b_size, channels)->(b_size, 1, 1, channels)
b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4)
b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4)
b4 = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='image_pooling')(b4)
b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
b4 = Activation(tf.nn.relu)(b4)
# upsample. have to use compat because of the option align_corners
size_before = tf.keras.backend.int_shape(x)
b4 = Lambda(lambda x: tf.compat.v1.image.resize(x, size_before[1:3],
method='bilinear', align_corners=True))(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)
b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = Activation(tf.nn.relu, name='aspp0_activation')(b0)
# there are only 2 branches in mobilenetV2. not sure why
if backbone == 'xception':
# rate = 6 (12)
b1 = SepConv_BN(x, 256, 'aspp1',
rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256, 'aspp2',
rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256, 'aspp3',
rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
else:
x = Concatenate()([b4, b0])
x = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='concat_projection')(x)
x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation(tf.nn.relu)(x)
x = Dropout(0.1)(x)
# DeepLab v.3+ decoder
if backbone == 'xception':
# Feature projection
# x4 (x2) block
skip_size = tf.keras.backend.int_shape(skip1)
x = Lambda(lambda xx: tf.compat.v1.image.resize(xx,
skip_size[1:3],
method='bilinear', align_corners=True))(x)
dec_skip1 = Conv2D(48, (1, 1), padding='same',
use_bias=False, name='feature_projection0')(skip1)
dec_skip1 = BatchNormalization(
name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
dec_skip1 = Activation(tf.nn.relu)(dec_skip1)
x = Concatenate()([x, dec_skip1])
x = SepConv_BN(x, 256, 'decoder_conv0',
depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256, 'decoder_conv1',
depth_activation=True, epsilon=1e-5)
# you can use it with arbitary number of classes
if (weights == 'pascal_voc' and classes == 21) or (weights == 'cityscapes' and classes == 19):
last_layer_name = 'logits_semantic'
else:
last_layer_name = 'custom_logits_semantic'
x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name)(x)
size_before3 = tf.keras.backend.int_shape(img_input)
x = Lambda(lambda xx: tf.compat.v1.image.resize(xx,
size_before3[1:3],
method='bilinear', align_corners=True))(x)
inputs = img_input
if activation in {'softmax', 'sigmoid'}:
x = tf.keras.layers.Activation(activation)(x)
model = Model(inputs, x, name='deeplabv3plus')
return model
# Model from https://github.com/bonlime/keras-deeplab-v3-plus
class TestDeepLabV3(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(get_maximum_opset_supported() < 11,
"DeeplabV3 is not supported for opset < 11.")
def test_Deeplab_v3(self):
K.clear_session()
keras_model = Deeplabv3(weights=None)
data = np.random.rand(2, 512, 512, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 16,644 | 42.233766 | 117 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_nasnet_mobile.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
add = keras.layers.add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Cropping2D = keras.layers.Cropping2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GlobalMaxPooling2D = keras.layers.GlobalMaxPooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
from keras.regularizers import l2
def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), weight_decay=5e-5, id=None):
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('separable_conv_block_%s' % id):
x = Activation('relu')(ip)
x = SeparableConv2D(filters, kernel_size, strides=strides, name='separable_conv_1_%s' % id,
padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name="separable_conv_1_bn_%s" % (id))(x)
x = Activation('relu')(x)
x = SeparableConv2D(filters, kernel_size, name='separable_conv_2_%s' % id,
padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name="separable_conv_2_bn_%s" % (id))(x)
return x
def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
img_dim = 2 if K.image_data_format() == 'channels_first' else -2
with K.name_scope('adjust_block'):
if p is None:
p = ip
elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:
with K.name_scope('adjust_reduction_block_%s' % id):
p = Activation('relu', name='adjust_relu_1_%s' % id)(p)
p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_1_%s' % id)(p)
p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name='adjust_conv_1_%s' % id, kernel_initializer='he_normal')(p1)
p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % id)(p2)
p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name='adjust_conv_2_%s' % id, kernel_initializer='he_normal')(p2)
p = concatenate([p1, p2], axis=channel_dim)
p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='adjust_bn_%s' % id)(p)
elif p._keras_shape[channel_dim] != filters:
with K.name_scope('adjust_projection_block_%s' % id):
p = Activation('relu')(p)
p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % id,
use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_normal')(p)
p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='adjust_bn_%s' % id)(p)
return p
def _normal_A(ip, p, filters, weight_decay=5e-5, id=None):
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('normal_A_block_%s' % id):
p = _adjust_block(p, ip, filters, weight_decay, id)
h = Activation('relu')(ip)
h = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='normal_conv_1_%s' % id,
use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(h)
h = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='normal_bn_1_%s' % id)(h)
with K.name_scope('block_1'):
x1_1 = _separable_conv_block(h, filters, kernel_size=(5, 5), weight_decay=weight_decay,
id='normal_left1_%s' % id)
x1_2 = _separable_conv_block(p, filters, weight_decay=weight_decay, id='normal_right1_%s' % id)
x1 = add([x1_1, x1_2], name='normal_add_1_%s' % id)
with K.name_scope('block_2'):
x2_1 = _separable_conv_block(p, filters, (5, 5), weight_decay=weight_decay, id='normal_left2_%s' % id)
x2_2 = _separable_conv_block(p, filters, (3, 3), weight_decay=weight_decay, id='normal_right2_%s' % id)
x2 = add([x2_1, x2_2], name='normal_add_2_%s' % id)
with K.name_scope('block_3'):
x3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_left3_%s' % (id))(h)
x3 = add([x3, p], name='normal_add_3_%s' % id)
with K.name_scope('block_4'):
x4_1 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_left4_%s' % (id))(p)
x4_2 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_right4_%s' % (id))(p)
x4 = add([x4_1, x4_2], name='normal_add_4_%s' % id)
with K.name_scope('block_5'):
x5 = _separable_conv_block(h, filters, weight_decay=weight_decay, id='normal_left5_%s' % id)
x5 = add([x5, h], name='normal_add_5_%s' % id)
x = concatenate([p, x1, x2, x3, x4, x5], axis=channel_dim, name='normal_concat_%s' % id)
return x, ip
def _reduction_A(ip, p, filters, weight_decay=5e-5, id=None):
""""""
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('reduction_A_block_%s' % id):
p = _adjust_block(p, ip, filters, weight_decay, id)
h = Activation('relu')(ip)
h = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='reduction_conv_1_%s' % id,
use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(h)
h = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='reduction_bn_1_%s' % id)(h)
with K.name_scope('block_1'):
x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2), weight_decay=weight_decay,
id='reduction_left1_%s' % id)
x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), weight_decay=weight_decay,
id='reduction_1_%s' % id)
x1 = add([x1_1, x1_2], name='reduction_add_1_%s' % id)
with K.name_scope('block_2'):
x2_1 = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_left2_%s' % id)(h)
x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), weight_decay=weight_decay,
id='reduction_right2_%s' % id)
x2 = add([x2_1, x2_2], name='reduction_add_2_%s' % id)
with K.name_scope('block_3'):
x3_1 = AveragePooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_left3_%s' % id)(h)
x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2), weight_decay=weight_decay,
id='reduction_right3_%s' % id)
x3 = add([x3_1, x3_2], name='reduction_add3_%s' % id)
with K.name_scope('block_4'):
x4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='reduction_left4_%s' % id)(x1)
x4 = add([x2, x4])
with K.name_scope('block_5'):
x5_1 = _separable_conv_block(x1, filters, (3, 3), weight_decay=weight_decay, id='reduction_left4_%s' % id)
x5_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_right5_%s' % id)(h)
x5 = add([x5_1, x5_2], name='reduction_add4_%s' % id)
x = concatenate([x2, x3, x4, x5], axis=channel_dim, name='reduction_concat_%s' % id)
return x, ip
def _add_auxiliary_head(x, classes, weight_decay):
img_height = 1 if K.image_data_format() == 'channels_last' else 2
img_width = 2 if K.image_data_format() == 'channels_last' else 3
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('auxiliary_branch'):
auxiliary_x = Activation('relu')(x)
auxiliary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid', name='aux_pool')(auxiliary_x)
auxiliary_x = Conv2D(128, (1, 1), padding='same', use_bias=False, name='aux_conv_projection',
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(auxiliary_x)
auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='aux_bn_projection')(auxiliary_x)
auxiliary_x = Activation('relu')(auxiliary_x)
auxiliary_x = Conv2D(768, (auxiliary_x._keras_shape[img_height], auxiliary_x._keras_shape[img_width]),
padding='valid', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), name='aux_conv_reduction')(auxiliary_x)
auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='aux_bn_reduction')(auxiliary_x)
auxiliary_x = Activation('relu')(auxiliary_x)
auxiliary_x = GlobalAveragePooling2D()(auxiliary_x)
auxiliary_x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay),
name='aux_predictions')(auxiliary_x)
return auxiliary_x
def NASNet(input_shape=None,
penultimate_filters=4032,
nb_blocks=6,
stem_filters=96,
skip_reduction=True,
use_auxiliary_branch=False,
filters_multiplier=2,
dropout=0.5,
weight_decay=5e-5,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000,
default_size=None):
if K.backend() != 'tensorflow':
raise RuntimeError('Only Tensorflow backend is currently supported, '
'as other backends do not support '
'separable convolution.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
if default_size is None:
default_size = 331
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
img_input = Input(shape=input_shape)
assert penultimate_filters % 24 == 0, "`penultimate_filters` needs to be divisible " \
"by 24."
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
filters = penultimate_filters // 24
if not skip_reduction:
x = Conv2D(stem_filters, (3, 3), strides=(2, 2), padding='valid', use_bias=False, name='stem_conv1',
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(img_input)
else:
x = Conv2D(stem_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False, name='stem_conv1',
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='stem_bn1')(x)
p = None
if not skip_reduction: # imagenet / mobile mode
x, p = _reduction_A(x, p, filters // (filters_multiplier ** 2), weight_decay, id='stem_1')
x, p = _reduction_A(x, p, filters // filters_multiplier, weight_decay, id='stem_2')
for i in range(nb_blocks):
x, p = _normal_A(x, p, filters, weight_decay, id='%d' % (i))
x, p0 = _reduction_A(x, p, filters * filters_multiplier, weight_decay, id='reduce_%d' % (nb_blocks))
p = p0 if not skip_reduction else p
for i in range(nb_blocks):
x, p = _normal_A(x, p, filters * filters_multiplier, weight_decay, id='%d' % (nb_blocks + i + 1))
auxiliary_x = None
if not skip_reduction: # imagenet / mobile mode
if use_auxiliary_branch:
auxiliary_x = _add_auxiliary_head(x, classes, weight_decay)
x, p0 = _reduction_A(x, p, filters * filters_multiplier ** 2, weight_decay, id='reduce_%d' % (2 * nb_blocks))
if skip_reduction: # CIFAR mode
if use_auxiliary_branch:
auxiliary_x = _add_auxiliary_head(x, classes, weight_decay)
p = p0 if not skip_reduction else p
for i in range(nb_blocks):
x, p = _normal_A(x, p, filters * filters_multiplier ** 2, weight_decay, id='%d' % (2 * nb_blocks + i + 1))
x = Activation('relu')(x)
if include_top:
x = GlobalAveragePooling2D()(x)
x = Dropout(dropout)(x)
x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay), name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
inputs = img_input
# Create model.
if use_auxiliary_branch:
model = Model(inputs, [x, auxiliary_x], name='NASNet_with_auxiliary')
else:
model = Model(inputs, x, name='NASNet')
if old_data_format:
K.set_image_data_format(old_data_format)
return model
def NASNetMobile(input_shape=(224, 224, 3),
dropout=0.5,
weight_decay=4e-5,
use_auxiliary_branch=False,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
global _BN_DECAY, _BN_EPSILON
_BN_DECAY = 0.9997
_BN_EPSILON = 1e-3
return NASNet(input_shape,
penultimate_filters=1056,
nb_blocks=4,
stem_filters=32,
skip_reduction=False,
use_auxiliary_branch=use_auxiliary_branch,
filters_multiplier=2,
dropout=dropout,
weight_decay=weight_decay,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224)
# Model from https://github.com/titu1994/neural-image-assessment/blob/master/utils/nasnet.py
class TestNASNetMobile(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_NASNetMobile(self):
K.clear_session()
keras_model = NASNetMobile()
data = np.random.rand(2, 224, 224, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 16,919 | 44.12 | 118 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_autoencoder.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Convolution1D = keras.layers.Convolution1D
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GRU = keras.layers.GRU
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
RepeatVector = keras.layers.RepeatVector
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
TimeDistributed = keras.layers.TimeDistributed
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
class MoleculeVAE():
autoencoder = None
def create(self,
charset_length,
max_length=120,
latent_rep_size=292):
x = Input(shape=(max_length, charset_length))
z = self._buildEncoder(x, latent_rep_size, max_length)
self.encoder = Model(x, z)
encoded_input = Input(shape=(latent_rep_size,))
self.decoder = Model(
encoded_input,
self._buildDecoder(
encoded_input,
latent_rep_size,
max_length,
charset_length
)
)
x1 = Input(shape=(max_length, charset_length))
z1 = self._buildEncoder(x1, latent_rep_size, max_length)
self.autoencoder = Model(
x1,
self._buildDecoder(
z1,
latent_rep_size,
max_length,
charset_length
)
)
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation='relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., stddev=epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)
return Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var])
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
h = Dense(latent_rep_size, name='latent_input', activation='relu')(z)
h = RepeatVector(max_length, name='repeat_vector')(h)
h = GRU(501, return_sequences=True, name='gru_1')(h)
h = GRU(501, return_sequences=True, name='gru_2')(h)
h = GRU(501, return_sequences=True, name='gru_3')(h)
return TimeDistributed(Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
def load(self, charset_length, latent_rep_size=292):
self.create(charset_length, latent_rep_size=latent_rep_size)
# Model from https://github.com/maxhodak/keras-molecules
class TestAutoEncoder(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_autoencoder(self):
K.clear_session()
vae = MoleculeVAE()
vae.load(256)
keras_model = vae.autoencoder
data = np.random.rand(2, 120, 256).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files, atol=1e-3))
if __name__ == "__main__":
unittest.main()
| 4,717 | 33.948148 | 123 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_keras_applications.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from mock_keras2onnx.proto import keras, is_keras_older_than
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image, test_level_0, run_keras_and_ort
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
Activation = keras.layers.Activation
Average = keras.layers.Average
AveragePooling2D = keras.layers.AveragePooling2D
BatchNormalization = keras.layers.BatchNormalization
Bidirectional = keras.layers.Bidirectional
Concatenate = keras.layers.Concatenate
concatenate = keras.layers.concatenate
Convolution2D = keras.layers.Convolution2D
Conv1D = keras.layers.Conv1D
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling1D = keras.layers.GlobalAveragePooling1D
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
MaxPool2D = keras.layers.MaxPool2D
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Model = keras.models.Model
Sequential = keras.models.Sequential
class TestKerasApplications(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_MobileNet(self):
mobilenet = keras.applications.mobilenet
model = mobilenet.MobileNet(weights='imagenet')
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
@unittest.skipIf(is_keras_older_than("2.2.3"),
"There is no mobilenet_v2 module before keras 2.2.3.")
def test_MobileNetV2(self):
mobilenet_v2 = keras.applications.mobilenet_v2
model = mobilenet_v2.MobileNetV2(weights='imagenet')
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
def test_ResNet50(self):
from keras.applications.resnet50 import ResNet50
model = ResNet50(include_top=True, weights='imagenet')
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
def test_InceptionV3(self):
from keras.applications.inception_v3 import InceptionV3
model = InceptionV3(include_top=True, weights='imagenet')
res = run_image(model, self.model_files, img_path, target_size=299)
self.assertTrue(*res)
def test_DenseNet121(self):
from keras.applications.densenet import DenseNet121
model = DenseNet121(include_top=True, weights='imagenet')
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
def test_Xception(self):
from keras.applications.xception import Xception
model = Xception(include_top=True, weights='imagenet')
res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=299)
self.assertTrue(*res)
def test_SmileCNN(self):
# From https://github.com/kylemcdonald/SmileCNN/blob/master/2%20Training.ipynb
nb_filters = 32
nb_pool = 2
nb_conv = 3
nb_classes = 2
model = Sequential()
model.add(Conv2D(nb_filters, (nb_conv, nb_conv), activation='relu', input_shape=(32, 32, 3)))
model.add(Conv2D(nb_filters, (nb_conv, nb_conv), activation='relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=32)
self.assertTrue(*res)
@unittest.skipIf(is_keras_older_than("2.2.4"),
"keras-resnet requires keras 2.2.4 or later.")
def test_keras_resnet_batchnormalization(self):
N, C, H, W = 2, 3, 120, 120
import keras_resnet
model = Sequential()
model.add(ZeroPadding2D(padding=((3, 3), (3, 3)), input_shape=(H, W, C), data_format='channels_last'))
model.add(Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='valid', dilation_rate=(1, 1), use_bias=False,
data_format='channels_last'))
model.add(keras_resnet.layers.BatchNormalization(freeze=True, axis=3))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
data = np.random.rand(N, H, W, C).astype(np.float32).reshape((N, H, W, C))
expected = model.predict(data)
self.assertTrue(run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
# model from https://github.com/titu1994/Image-Super-Resolution
def test_ExpantionSuperResolution(self):
init = Input(shape=(32, 32, 3))
x = Convolution2D(64, (9, 9), activation='relu', padding='same', name='level1')(init)
x1 = Convolution2D(32, (1, 1), activation='relu', padding='same', name='lavel1_1')(x)
x2 = Convolution2D(32, (3, 3), activation='relu', padding='same', name='lavel1_2')(x)
x3 = Convolution2D(32, (5, 5), activation='relu', padding='same', name='lavel1_3')(x)
x = Average()([x1, x2, x3])
out = Convolution2D(3, (5, 5), activation='relu', padding='same', name='output')(x)
model = keras.models.Model(init, out)
res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=32)
self.assertTrue(*res)
def test_tcn(self):
from tcn import TCN
batch_size, timesteps, input_dim = None, 20, 1
actual_batch_size = 3
i = Input(batch_shape=(batch_size, timesteps, input_dim))
np.random.seed(1000) # set the random seed to avoid the output result discrepancies.
for return_sequences in [True, False]:
o = TCN(return_sequences=return_sequences)(i) # The TCN layers are here.
o = Dense(1)(o)
model = keras.models.Model(inputs=[i], outputs=[o])
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
data = np.random.rand(actual_batch_size, timesteps, input_dim).astype(np.float32).reshape((actual_batch_size, timesteps, input_dim))
expected = model.predict(data)
self.assertTrue(run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
# model from https://github.com/titu1994/LSTM-FCN
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_lstm_fcn(self):
MAX_SEQUENCE_LENGTH = 176
NUM_CELLS = 8
NB_CLASS = 37
ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
x = LSTM(NUM_CELLS)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
model = Model(ip, out)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
batch_size = 2
data = np.random.rand(batch_size, 1, MAX_SEQUENCE_LENGTH).astype(np.float32).reshape(batch_size, 1, MAX_SEQUENCE_LENGTH)
expected = model.predict(data)
self.assertTrue(run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
# model from https://github.com/CyberZHG/keras-self-attention
@unittest.skipIf(test_level_0 or get_maximum_opset_supported() < 11,
"Test level 0 only.")
def test_keras_self_attention(self):
from keras_self_attention import SeqSelfAttention
keras.backend.clear_session()
model = keras.models.Sequential()
model.add(keras.layers.Embedding(input_dim=10000,
output_dim=300,
mask_zero=True))
model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=128,
return_sequences=True)))
model.add(SeqSelfAttention(attention_activation='sigmoid'))
model.add(keras.layers.Dense(units=5))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
data = np.random.rand(5, 10).astype(np.float32).reshape(5, 10)
expected = model.predict(data)
self.assertTrue(run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
# Model from https://github.com/chandrikadeb7/Face-Mask-Detection
@unittest.skipIf(test_level_0 or is_keras_older_than("2.2.3"),
"There is no mobilenet_v2 module before keras 2.2.3.")
def test_FaceMaskDetection(self):
mobilenet_v2 = keras.applications.mobilenet_v2
baseModel = mobilenet_v2.MobileNetV2(weights=None, include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
# Model from https://github.com/abhishekrana/DeepFashion
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DeepFashion(self):
base_model = keras.applications.VGG16(weights=None, include_top=False, input_shape=(224, 224, 3))
model_inputs = base_model.input
common_inputs = base_model.output
dropout_rate = 0.5
output_classes = 20
x = Flatten()(common_inputs)
x = Dense(256, activation='tanh')(x)
x = Dropout(dropout_rate)(x)
predictions_class = Dense(output_classes, activation='softmax', name='predictions_class')(x)
## Model (Regression) IOU score
x = Flatten()(common_inputs)
x = Dense(256, activation='tanh')(x)
x = Dropout(dropout_rate)(x)
x = Dense(256, activation='tanh')(x)
x = Dropout(dropout_rate)(x)
predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x)
## Create Model
keras_model = Model(inputs=model_inputs, outputs=[predictions_class, predictions_iou])
res = run_image(keras_model, self.model_files, img_path, atol=5e-3, target_size=224)
self.assertTrue(*res)
# Model from https://github.com/manicman1999/Keras-BiGAN
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_bigan_generator(self):
def g_block(inp, fil, u=True):
if u:
out = UpSampling2D(interpolation='bilinear')(inp)
else:
out = Activation('linear')(inp)
skip = Conv2D(fil, 1, padding='same', kernel_initializer='he_normal')(out)
out = Conv2D(filters=fil, kernel_size=3, padding='same', kernel_initializer='he_normal')(out)
out = LeakyReLU(0.2)(out)
out = Conv2D(filters=fil, kernel_size=3, padding='same', kernel_initializer='he_normal')(out)
out = LeakyReLU(0.2)(out)
out = Conv2D(fil, 1, padding='same', kernel_initializer='he_normal')(out)
out = keras.layers.add([out, skip])
out = LeakyReLU(0.2)(out)
return out
latent_size = 64
cha = 16
inp = Input(shape = [latent_size])
x = Dense(4*4*16*cha, kernel_initializer = 'he_normal')(inp)
x = Reshape([4, 4, 16*cha])(x)
x = g_block(x, 16 * cha, u = False) #4
x = g_block(x, 8 * cha) #8
x = g_block(x, 4 * cha) #16
x = g_block(x, 3 * cha) #32
x = g_block(x, 2 * cha) #64
x = g_block(x, 1 * cha) #128
x = Conv2D(filters = 3, kernel_size = 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(x)
model = Model(inputs = inp, outputs = x)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
data = np.random.rand(200, latent_size).astype(np.float32).reshape(200, latent_size)
expected = model.predict(data)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
# Model from https://github.com/ankur219/ECG-Arrhythmia-classification
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_ecg_classification(self):
model = Sequential()
model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=[128, 128, 3], kernel_initializer='glorot_uniform'))
model.add(keras.layers.ELU())
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
model.add(keras.layers.ELU())
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
model.add(keras.layers.ELU())
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
model.add(keras.layers.ELU())
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
model.add(keras.layers.ELU())
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform'))
model.add(keras.layers.ELU())
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(2048))
model.add(keras.layers.ELU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
data = np.random.rand(2, 128, 128, 3).astype(np.float32)
expected = model.predict(data)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
# Model from https://github.com/arunponnusamy/gender-detection-keras
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_gender_detection(self):
model = Sequential()
inputShape = (224, 224, 3)
chanDim = -1
model.add(Conv2D(32, (3,3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3,3)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(128, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(80))
model.add(Activation("sigmoid"))
res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=224)
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 16,893 | 42.096939 | 144 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_resume_parser.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Bidirectional = keras.layers.Bidirectional
concatenate = keras.layers.concatenate
Conv1D = keras.layers.Conv1D
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GlobalMaxPool1D = keras.layers.GlobalMaxPool1D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
LSTM = keras.layers.LSTM
MaxPooling1D = keras.layers.MaxPooling1D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
SpatialDropout1D = keras.layers.SpatialDropout1D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# Model from https://github.com/chen0040/keras-english-resume-parser-and-analyzer
class TestResumeParser(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_1D_CNN(self):
K.clear_session()
vocab_size = 50
max_len = 20
embedding_size = 100
model = Sequential()
model.add(Embedding(input_dim=vocab_size, input_length=max_len, output_dim=embedding_size))
model.add(SpatialDropout1D(0.2))
model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu'))
model.add(GlobalMaxPool1D())
model.add(Dense(units=30, activation='softmax'))
data = np.random.rand(1000, max_len).astype(np.float32)
expected = model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_Multi_Channel_CNN(self):
K.clear_session()
embedding_size = 100
cnn_filter_size = 32
length = 20
vocab_size = 50
inputs1 = Input(shape=(length,))
embedding1 = Embedding(vocab_size, embedding_size)(inputs1)
conv1 = Conv1D(filters=cnn_filter_size, kernel_size=4, activation='relu')(
embedding1)
drop1 = Dropout(0.5)(conv1)
pool1 = MaxPooling1D(pool_size=2)(drop1)
flat1 = Flatten()(pool1)
inputs2 = Input(shape=(length,))
embedding2 = Embedding(vocab_size, embedding_size)(inputs2)
conv2 = Conv1D(filters=cnn_filter_size, kernel_size=6, activation='relu')(
embedding2)
drop2 = Dropout(0.5)(conv2)
pool2 = MaxPooling1D(pool_size=2)(drop2)
flat2 = Flatten()(pool2)
inputs3 = Input(shape=(length,))
embedding3 = Embedding(vocab_size, embedding_size)(inputs3)
conv3 = Conv1D(filters=cnn_filter_size, kernel_size=8, activation='relu')(
embedding3)
drop3 = Dropout(0.5)(conv3)
pool3 = MaxPooling1D(pool_size=2)(drop3)
flat3 = Flatten()(pool3)
merged = concatenate([flat1, flat2, flat3])
# interpretation
dense1 = Dense(10, activation='relu')(merged)
outputs = Dense(units=30, activation='softmax')(dense1)
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
batch_size = 2
data0 = np.random.rand(batch_size, length).astype(np.float32)
data1 = np.random.rand(batch_size, length).astype(np.float32)
data2 = np.random.rand(batch_size, length).astype(np.float32)
expected = model.predict([data0, data1, data2])
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, [data0, data1, data2], expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_CNN_LSTM(self):
K.clear_session()
max_len = 20
vocab_size = 50
lstm_output_size = 70
embedding_size = 100
model = Sequential()
model.add(Embedding(input_dim=vocab_size, input_length=max_len, output_dim=embedding_size))
model.add(SpatialDropout1D(0.2))
model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(lstm_output_size))
model.add(Dense(units=30, activation='softmax'))
data = np.random.rand(2, max_len).astype(np.float32)
expected = model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_Bidirectional_LSTM(self):
K.clear_session()
max_len = 20
vocab_size = 50
embedding_size = 100
model = Sequential()
model.add(Embedding(input_dim=vocab_size, output_dim=embedding_size, input_length=max_len))
model.add(SpatialDropout1D(0.2))
model.add(
Bidirectional(LSTM(units=64, dropout=0.2, recurrent_dropout=0.2, input_shape=(max_len, embedding_size))))
model.add(Dense(30, activation='softmax'))
data = np.random.rand(2, max_len).astype(np.float32)
expected = model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 6,421 | 37.22619 | 123 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_deepface.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime, test_level_0
Activation = keras.layers.Activation
add = keras.layers.add
Add = keras.layers.Add
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Convolution2D = keras.layers.Convolution2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Flatten = keras.layers.Flatten
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
LocallyConnected2D = keras.layers.LocallyConnected2D
MaxPooling2D = keras.layers.MaxPooling2D
Multiply = keras.layers.Multiply
Reshape = keras.layers.Reshape
Sequential = keras.models.Sequential
Model = keras.models.Model
# model from https://github.com/serengil/deepface
class TestDeepFace(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skip("LocallyConnected2D conversion is slow.")
def test_DeepFace(self):
base_model = Sequential()
base_model.add(Convolution2D(32, (11, 11), activation='relu', name='C1', input_shape=(152, 152, 3)))
base_model.add(MaxPooling2D(pool_size=3, strides=2, padding='same', name='M2'))
base_model.add(Convolution2D(16, (9, 9), activation='relu', name='C3'))
base_model.add(LocallyConnected2D(16, (9, 9), activation='relu', name='L4'))
base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation='relu', name='L5'))
base_model.add(LocallyConnected2D(16, (5, 5), activation='relu', name='L6'))
base_model.add(Flatten(name='F0'))
base_model.add(Dense(4096, activation='relu', name='F7'))
base_model.add(Dropout(rate=0.5, name='D0'))
base_model.add(Dense(8631, activation='softmax', name='F8'))
data = np.random.rand(1, 152, 152, 3).astype(np.float32)
expected = base_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(base_model, base_model.name, debug_mode=True)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DeepID(self):
myInput = Input(shape=(55, 47, 3))
x = Conv2D(20, (4, 4), name='Conv1', activation='relu', input_shape=(55, 47, 3))(myInput)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool1')(x)
x = Dropout(rate=1, name='D1')(x)
x = Conv2D(40, (3, 3), name='Conv2', activation='relu')(x)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool2')(x)
x = Dropout(rate=1, name='D2')(x)
x = Conv2D(60, (3, 3), name='Conv3', activation='relu')(x)
x = MaxPooling2D(pool_size=2, strides=2, name='Pool3')(x)
x = Dropout(rate=1, name='D3')(x)
x1 = Flatten()(x)
fc11 = Dense(160, name='fc11')(x1)
x2 = Conv2D(80, (2, 2), name='Conv4', activation='relu')(x)
x2 = Flatten()(x2)
fc12 = Dense(160, name='fc12')(x2)
y = Add()([fc11, fc12])
y = Activation('relu', name='deepid')(y)
keras_model = Model(inputs=[myInput], outputs=y)
data = np.random.rand(50, 55, 47, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 3,815 | 37.16 | 108 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_semantic_embeddings.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from keras import regularizers
from keras.engine import Layer, InputSpec
from keras.utils import layer_utils, conv_utils
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
try:
from keras.utils.conv_utils import normalize_data_format
except ImportError:
from keras.backend import normalize_data_format
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Convolution2D = keras.layers.Convolution2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GlobalMaxPooling2D = keras.layers.GlobalMaxPooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
class ChannelPadding(Layer):
def __init__(self, padding=1, data_format=None, **kwargs):
super(ChannelPadding, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.data_format = normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
axis = -1
if input_shape[axis] is None:
return input_shape
else:
length = input_shape[axis] + self.padding[0] + self.padding[1]
if axis == 1:
return input_shape[:1] + (length,) + input_shape[2:]
else:
return input_shape[:-1] + (length,)
def call(self, inputs):
pattern = [[0, 0] for i in range(len(inputs.shape))]
axis = -1
pattern[axis] = self.padding
return K.tf.pad(inputs, pattern)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ChannelPadding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def simple_block(input_tensor, filters, prefix, kernel_size=3, stride=1,
regularizer=None, activation='relu', conv_shortcut=False, bn=True):
bn_axis = 3
conv_name_base = 'res' + prefix
bn_name_base = 'bn' + prefix
x = Conv2D(filters[1], kernel_size, padding='same', strides=(stride, stride),
kernel_regularizer=regularizer,
name=conv_name_base + 'x')(input_tensor)
if bn:
x = BatchNormalization(axis=bn_axis, name=bn_name_base + 'x')(x)
x = Activation(activation)(x)
x = Conv2D(filters[1], kernel_size, padding='same',
kernel_regularizer=regularizer,
name=conv_name_base + 'y')(x)
if bn:
x = BatchNormalization(axis=bn_axis, name=bn_name_base + 'y')(x)
shortcut = input_tensor
if (filters[0] != filters[1]) and conv_shortcut:
shortcut = Conv2D(filters[1], (1, 1), strides=(stride, stride),
kernel_regularizer=regularizer,
name=conv_name_base + 'z')(shortcut)
if bn:
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + 'z')(shortcut)
else:
if stride > 1:
shortcut = AveragePooling2D((stride, stride), name='avg' + prefix)(shortcut)
if filters[0] < filters[1]:
shortcut = ChannelPadding(
((filters[1] - filters[0]) // 2, filters[1] - filters[0] - (filters[1] - filters[0]) // 2),
name='pad' + prefix)(shortcut)
x = keras.layers.add([x, shortcut])
x = Activation(activation)(x)
return x
def unit(input_tensor, filters, n, prefix, kernel_size=3, stride=1, **kwargs):
x = simple_block(input_tensor, filters, prefix + '1', kernel_size=kernel_size, stride=stride, **kwargs)
for i in range(1, n):
x = simple_block(x, [filters[1], filters[1]], prefix + str(i + 1), kernel_size=kernel_size, **kwargs)
return x
def SmallResNet(n=9, filters=[16, 32, 64],
include_top=True, weights=None,
input_tensor=None, input_shape=None,
pooling='avg', regularizer=regularizers.l2(0.0002), activation='relu',
top_activation='softmax',
conv_shortcut=False, bn=True,
classes=100, name=None):
# Determine proper input shape
if input_shape is None:
input_shape = (32, 32, 3) if include_top and pooling is None else (None, None, 3)
# Build network
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3
x = Conv2D(filters[0], (3, 3), padding='same', name='conv0', kernel_regularizer=regularizer)(img_input)
if bn:
x = BatchNormalization(axis=bn_axis, name='bn0')(x)
x = Activation(activation)(x)
x = unit(x, [filters[0], filters[0]], n, '1-', kernel_size=3, stride=1, regularizer=regularizer,
activation=activation, conv_shortcut=conv_shortcut, bn=bn)
for i in range(1, len(filters)):
x = unit(x, [filters[i - 1], filters[i]], n, str(i + 1) + '-', kernel_size=3, stride=2, regularizer=regularizer,
activation=activation, conv_shortcut=conv_shortcut, bn=bn)
if pooling == 'avg':
x = GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = GlobalMaxPooling2D(name='max_pool')(x)
if include_top:
x = Dense(classes, activation=top_activation, name='embedding' if top_activation is None else 'prob',
kernel_regularizer=regularizer)(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='cifar-resnet{}'.format(2 * len(filters) * n) if name is None else name)
return model
def PyramidNet(depth, alpha, bottleneck=True,
include_top=True, weights=None,
input_tensor=None, input_shape=None,
pooling='avg', regularizer=regularizers.l2(0.0002),
activation='relu', top_activation='softmax',
classes=100, name=None):
def shortcut(x, n, stride):
if stride > 1:
x = AveragePooling2D(stride)(x)
input_channels = int(x.shape[-1])
if input_channels < n:
x = ChannelPadding((0, n - input_channels))(x)
return x
def basic_block(x, n, stride):
s = BatchNormalization()(x)
s = Conv2D(n, (3, 3), strides=stride, padding='same', kernel_initializer='glorot_normal',
kernel_regularizer=regularizer)(s)
s = BatchNormalization()(s)
s = Activation(activation)(s)
s = Conv2D(n, (3, 3), padding='same', kernel_initializer='glorot_normal', kernel_regularizer=regularizer)(s)
s = BatchNormalization()(s)
return keras.layers.add([s, shortcut(x, n, stride)])
def bottleneck_block(x, n, stride):
s = BatchNormalization()(x)
s = Conv2D(n, (1, 1), kernel_initializer='glorot_normal', kernel_regularizer=regularizer)(s)
s = BatchNormalization()(s)
s = Activation(activation)(s)
s = Conv2D(n, (3, 3), strides=stride, padding='same', kernel_initializer='glorot_normal',
kernel_regularizer=regularizer)(s)
s = BatchNormalization()(s)
s = Activation(activation)(s)
s = Conv2D(n * 4, (1, 1), kernel_initializer='glorot_normal', kernel_regularizer=regularizer)(s)
s = BatchNormalization()(s)
return keras.layers.add([s, shortcut(x, n * 4, stride)])
def unit(x, features, count, stride):
block = bottleneck_block if bottleneck else basic_block
for i in range(count):
x = block(x, features, stride)
return x
# Derived parameters
n = (depth - 2) // 9 if bottleneck else (depth - 2) // 6
channels = 16
start_channel = 16
add_channel = float(alpha) / (3 * n)
# Determine proper input shape
if input_shape is None:
input_shape = (32, 32, 3) if include_top else (None, None, 3)
# Build network
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3
x = Conv2D(start_channel, (3, 3), padding='same', name='conv0', kernel_initializer='glorot_normal',
kernel_regularizer=regularizer)(img_input)
x = BatchNormalization(axis=bn_axis, name='bn0')(x)
for b in range(3):
start_channel += add_channel
x = unit(x, round(start_channel), 1, 1 if b == 0 else 2)
for i in range(1, n):
start_channel += add_channel
x = unit(x, round(start_channel), 1, 1)
x = BatchNormalization(axis=bn_axis, name='bn4')(x)
x = Activation(activation, name='act4')(x)
# Final pooling
if pooling == 'avg':
x = GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = GlobalMaxPooling2D(name='max_pool')(x)
# Top layer
if include_top:
x = Dense(classes, activation=top_activation, name='embedding' if top_activation is None else 'prob',
kernel_regularizer=regularizer)(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='pyramidnet-{}-{}'.format(depth, alpha) if name is None else name)
return model
def PlainNet(output_dim,
filters=[64, 64, 'ap', 128, 128, 128, 'ap', 256, 256, 256, 'ap', 512, 'gap', 'fc512'],
activation='relu',
regularizer=keras.regularizers.l2(0.0005),
final_activation=None,
input_shape=(None, None, 3),
pool_size=(2, 2),
name=None):
prefix = '' if name is None else name + '_'
flattened = False
layers = [
keras.layers.Conv2D(filters[0], (3, 3), padding='same', activation=activation, kernel_regularizer=regularizer,
input_shape=input_shape, name=prefix + 'conv1'),
keras.layers.BatchNormalization(name=prefix + 'bn1')
]
for i, f in enumerate(filters[1:], start=2):
if f == 'mp':
layers.append(keras.layers.MaxPooling2D(pool_size=pool_size, name='{}mp{}'.format(prefix, i)))
elif f == 'ap':
layers.append(keras.layers.AveragePooling2D(pool_size=pool_size, name='{}ap{}'.format(prefix, i)))
elif f == 'gap':
layers.append(keras.layers.GlobalAvgPool2D(name=prefix + 'avg_pool'))
flattened = True
elif isinstance(f, str) and f.startswith('fc'):
if not flattened:
layers.append(keras.layers.Flatten(name=prefix + 'flatten'))
flattened = True
layers.append(keras.layers.Dense(int(f[2:]), activation=activation, kernel_regularizer=regularizer,
name='{}fc{}'.format(prefix, i)))
layers.append(keras.layers.BatchNormalization(name='{}bn{}'.format(prefix, i)))
else:
layers.append(
keras.layers.Conv2D(f, (3, 3), padding='same', activation=activation, kernel_regularizer=regularizer,
name='{}conv{}'.format(prefix, i)))
layers.append(keras.layers.BatchNormalization(name='{}bn{}'.format(prefix, i)))
if not flattened:
layers.append(keras.layers.Flatten(name=prefix + 'flatten'))
flattened = True
layers.append(keras.layers.Dense(output_dim, activation=final_activation,
name=prefix + ('prob' if final_activation == 'softmax' else 'embedding')))
return keras.models.Sequential(layers, name=name)
def initial_conv(input):
x = Convolution2D(16, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False)(input)
channel_axis = -1
x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x)
x = Activation('relu')(x)
return x
def expand_conv(init, base, k, strides=(1, 1)):
x = Convolution2D(base * k, (3, 3), padding='same', strides=strides, kernel_initializer='he_normal',
use_bias=False)(init)
channel_axis = -1
x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x)
x = Activation('relu')(x)
x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False)(x)
skip = Convolution2D(base * k, (1, 1), padding='same', strides=strides, kernel_initializer='he_normal',
use_bias=False)(init)
m = Add()([x, skip])
return m
def conv_block(input, base, k=1, dropout=0.0):
init = input
channel_axis = -1
x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(input)
x = Activation('relu')(x)
x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False)(x)
if dropout > 0.0: x = Dropout(dropout)(x)
x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x)
x = Activation('relu')(x)
x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False)(x)
m = Add()([init, x])
return m
def create_wide_residual_network(input_dim, nb_classes=100, N=2, k=1, dropout=0.0, final_activation='softmax',
verbose=1, name=None):
channel_axis = -1
ip = Input(shape=input_dim)
x = initial_conv(ip)
nb_conv = 4
for block_index, base in enumerate([16, 32, 64]):
x = expand_conv(x, base, k, strides=(2, 2) if block_index > 0 else (1, 1))
for i in range(N - 1):
x = conv_block(x, base, k, dropout)
nb_conv += 2
x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, activation=final_activation, name='prob' if final_activation == 'softmax' else 'embedding')(x)
model = Model(ip, x, name=name)
return model
# Models from https://github.com/cvjena/semantic-embeddings
class TestSemanticEmbeddings(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_PyramidNet(self):
K.clear_session()
keras_model = PyramidNet(272, 200)
data = np.random.rand(1, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_SmallResNet(self):
K.clear_session()
keras_model = SmallResNet()
data = np.random.rand(20, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_PlainNet(self):
K.clear_session()
keras_model = PlainNet(100)
data = np.random.rand(200, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_wide_residual_network(self):
K.clear_session()
keras_model = create_wide_residual_network(input_dim=(32, 32, 3))
data = np.random.rand(200, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 17,495 | 37.537445 | 120 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_dualgan.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_onnx_runtime
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/dualgan/dualgan.py
class DUALGAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_dim = self.img_rows*self.img_cols
# Build and compile the discriminators
self.D_A = self.build_discriminator()
self.D_B = self.build_discriminator()
#-------------------------
# Construct Computational
# Graph of Generators
#-------------------------
# Build the generators
self.G_AB = self.build_generator()
self.G_BA = self.build_generator()
# For the combined model we will only train the generators
self.D_A.trainable = False
self.D_B.trainable = False
# The generator takes images from their respective domains as inputs
imgs_A = Input(shape=(self.img_dim,))
imgs_B = Input(shape=(self.img_dim,))
# Generators translates the images to the opposite domain
fake_B = self.G_AB(imgs_A)
fake_A = self.G_BA(imgs_B)
# The discriminators determines validity of translated images
valid_A = self.D_A(fake_A)
valid_B = self.D_B(fake_B)
# Generators translate the images back to their original domain
recov_A = self.G_BA(fake_B)
recov_B = self.G_AB(fake_A)
# The combined model (stacked generators and discriminators)
self.combined = Model(inputs=[imgs_A, imgs_B], outputs=[valid_A, valid_B, recov_A, recov_B])
def build_generator(self):
X = Input(shape=(self.img_dim,))
model = Sequential()
model.add(Dense(256, input_dim=self.img_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dropout(0.4))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dropout(0.4))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dropout(0.4))
model.add(Dense(self.img_dim, activation='tanh'))
X_translated = model(X)
return Model(X, X_translated)
def build_discriminator(self):
img = Input(shape=(self.img_dim,))
model = Sequential()
model.add(Dense(512, input_dim=self.img_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1))
validity = model(img)
return Model(img, validity)
class TestDualGAN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_DualGAN(self):
keras_model = DUALGAN().combined
batch = 5
x = np.random.rand(batch, 784).astype(np.float32)
y = np.random.rand(batch, 784).astype(np.float32)
expected = keras_model.predict([x, y])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, {keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 4,281 | 30.485294 | 168 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/test_layers.py | # SPDX-License-Identifier: Apache-2.0
import pytest
import numpy as np
from mock_keras2onnx.proto.tfcompat import is_tf2, tensorflow as tf
from mock_keras2onnx.proto import (keras, is_tf_keras,
is_tensorflow_older_than, is_tensorflow_later_than,
is_keras_older_than, is_keras_later_than, python_keras_is_deprecated)
from test_utils import no_loops_in_tf2, all_recurrents_should_bidirectional, convert_keras_for_test as convert_keras, get_max_opset_supported_for_test as get_maximum_opset_supported
K = keras.backend
Activation = keras.layers.Activation
Add = keras.layers.Add
if python_keras_is_deprecated():
advanced_activations = keras.layers
layers_core = keras.layers
else:
advanced_activations = keras.layers.advanced_activations
layers_core = keras.layers.core
AlphaDropout = keras.layers.AlphaDropout
Average = keras.layers.Average
AveragePooling1D = keras.layers.AveragePooling1D
AveragePooling2D = keras.layers.AveragePooling2D
AveragePooling3D = keras.layers.AveragePooling3D
BatchNormalization = keras.layers.BatchNormalization
Bidirectional = keras.layers.Bidirectional
Concatenate = keras.layers.Concatenate
Conv1D = keras.layers.Conv1D
Conv2D = keras.layers.Conv2D
Conv2DTranspose = keras.layers.Conv2DTranspose
Conv3D = keras.layers.Conv3D
Conv3DTranspose = keras.layers.Conv3DTranspose
Cropping1D = keras.layers.Cropping1D
Cropping2D = keras.layers.Cropping2D
Cropping3D = keras.layers.Cropping3D
Dense = keras.layers.Dense
Dot = keras.layers.Dot
dot = keras.layers.dot
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GaussianDropout = keras.layers.GaussianDropout
GaussianNoise = keras.layers.GaussianNoise
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
GRU = keras.layers.GRU
Input = keras.layers.Input
InputLayer = keras.layers.InputLayer
Lambda = keras.layers.Lambda
Layer = keras.layers.Layer
LeakyReLU = keras.layers.LeakyReLU
LSTM = keras.layers.LSTM
LSTMCell = keras.layers.LSTMCell
Maximum = keras.layers.Maximum
MaxPool1D = keras.layers.MaxPool1D
MaxPool3D = keras.layers.MaxPool3D
MaxPooling2D = keras.layers.MaxPooling2D
Model = keras.models.Model
Multiply = keras.layers.Multiply
Reshape = keras.layers.Reshape
RNN = keras.layers.RNN
SeparableConv1D = keras.layers.SeparableConv1D
SeparableConv2D = keras.layers.SeparableConv2D
Sequential = keras.models.Sequential
SimpleRNN = keras.layers.SimpleRNN
SpatialDropout2D = keras.layers.SpatialDropout2D
Subtract = keras.layers.Subtract
TimeDistributed = keras.layers.TimeDistributed
UpSampling1D = keras.layers.UpSampling1D
UpSampling2D = keras.layers.UpSampling2D
UpSampling3D = keras.layers.UpSampling3D
ZeroPadding2D = keras.layers.ZeroPadding2D
if not is_keras_older_than("2.2.4"):
ReLU = keras.layers.ReLU
GRU_CLASSES = [(GRU, "v1")]
LSTM_CLASSES = [(LSTM, LSTMCell, "v1")]
RNN_CLASSES = [SimpleRNN, GRU, LSTM]
if is_tf_keras and is_tensorflow_later_than("1.14.0") and not python_keras_is_deprecated():
# Add the TF v2 compatability layers (available after TF 1.14)
from tensorflow.python.keras.layers import recurrent_v2
GRU_CLASSES.append((recurrent_v2.GRU, "v2"))
LSTM_CLASSES.append((recurrent_v2.LSTM, recurrent_v2.LSTMCell, "v2"))
RNN_CLASSES.extend([recurrent_v2.GRU, recurrent_v2.LSTM])
def _asarray(*a):
return np.array([a], dtype='f')
def test_keras_lambda(runner):
model = Sequential()
model.add(Lambda(lambda x: x ** 2, input_shape=[3, 5]))
if get_maximum_opset_supported() >= 11:
model.add(Lambda(lambda x: tf.round(x), input_shape=[3, 5]))
model.add(Flatten(data_format='channels_last'))
model.compile(optimizer='sgd', loss='mse')
onnx_model = convert_keras(model, 'test_keras_lambda')
data = np.random.rand(3 * 5).astype(np.float32).reshape(1, 3, 5)
expected = model.predict(data)
assert runner('onnx_lambda', onnx_model, data, expected)
@pytest.mark.skipif(is_tensorflow_older_than('1.12.0'),
reason="tf.nn.depth_to_space not supported.")
@pytest.mark.skipif(get_maximum_opset_supported() < 11,
reason="DepthToSpace is not supported before opset 11.")
@pytest.mark.parametrize("data_format", ["NCHW", "NHWC"])
@pytest.mark.parametrize("input_shape", [(4, 6, 8), (None, None, 8)])
def test_keras_lambda_depth_to_space(runner, data_format, input_shape):
if data_format == "NCHW" and (is_tensorflow_older_than("2.2.0") or is_tensorflow_later_than("2.4.0")):
pytest.skip("tf.nn.depth_to_space with NCHW only supported for Tensorflow 2.2 and 2.3")
model = Sequential()
model.add(Lambda(
lambda x: tf.nn.depth_to_space(x, block_size=2, data_format=data_format),
input_shape=input_shape
))
onnx_model = convert_keras(model, 'test_keras_lambda_depth_to_space')
data = np.random.rand(3, 4, 6, 8).astype(np.float32) # batch dimension + 'input_shape'
expected = model.predict(data)
assert runner('tf_depth_to_space', onnx_model, data, expected)
def test_tf_addn(runner):
input1 = Input(shape=(5, 3, 4), dtype=tf.float32)
input2 = Input(shape=(5, 3, 4), dtype=tf.float32)
sum = Lambda(tf.add_n)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=sum)
onnx_model = convert_keras(model, 'tf_add_n')
batch_data1_shape = (2, 5, 3, 4)
batch_data2_shape = (2, 5, 3, 4)
data1 = np.random.rand(*batch_data1_shape).astype(np.float32)
data2 = np.random.rand(*batch_data2_shape).astype(np.float32)
expected = model.predict([data1, data2])
assert runner('tf_add_n', onnx_model, [data1, data2], expected)
@pytest.mark.parametrize("arg_func", [tf.argmax, tf.argmin])
def test_tf_argmax_argmin(runner, arg_func):
model = Sequential()
model.add(Lambda(lambda x: arg_func(x, axis=2), input_shape=[3, 4, 2]))
onnx_model = convert_keras(model, 'test_tf_arg')
data = np.random.rand(5, 3, 4, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_arg', onnx_model, data, expected)
model = Sequential()
model.add(Lambda(lambda x: arg_func(x, axis=2, output_type=tf.int32), input_shape=[3, 4, 2]))
onnx_model = convert_keras(model, 'test_tf_arg')
data = np.random.rand(5, 3, 4, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_arg', onnx_model, data, expected)
@pytest.mark.parametrize("arg_func", [[tf.nn.avg_pool, tf.nn.avg_pool3d], [tf.nn.max_pool, tf.nn.max_pool3d]])
@pytest.mark.parametrize("padding_method", ['SAME', 'VALID'])
def test_tf_pool(runner, arg_func, padding_method):
model = Sequential()
k_size = [1, 2, 2, 1] if is_tensorflow_older_than('1.14.0') else 2
model.add(Lambda(lambda x: arg_func[0](x, k_size, strides=[1, 1, 2, 1], padding=padding_method, data_format='NHWC'),
input_shape=[10, 12, 3]))
onnx_model = convert_keras(model, 'test_tf_pool')
data = np.random.rand(5, 10, 12, 3).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_pool2d', onnx_model, data, expected)
model = Sequential()
strides = [1, 1, 1, 2, 1] if is_tensorflow_older_than('1.14.0') else [1, 1, 2]
k_size = [1, 2, 2, 2, 1] if is_tensorflow_older_than('1.14.0') else 2
model.add(Lambda(lambda x: arg_func[1](x, k_size, strides=strides, padding=padding_method, data_format='NDHWC'),
input_shape=[10, 12, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_pool')
data = np.random.rand(5, 10, 12, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_pool3d', onnx_model, data, expected)
def test_tf_conv(runner):
model = Sequential()
k = tf.constant(np.random.normal(loc=0.0, scale=1.0, size=(1, 2, 3, 5)).astype(np.float32))
model.add(Lambda(lambda x: tf.nn.conv2d(x, k, strides=[1, 1, 2, 1], padding='SAME', data_format='NHWC'),
input_shape=[10, 14, 3]))
onnx_model = convert_keras(model, 'test_tf_conv2d')
data = np.random.rand(1, 10, 14, 3).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_conv2d', onnx_model, data, expected)
model = Sequential()
k = tf.constant(np.random.normal(loc=0.0, scale=1.0, size=(1, 2, 3, 5)).astype(np.float32))
model.add(Lambda(lambda x: tf.nn.conv2d(x, k, strides=[1, 1, 2, 1], padding='VALID', data_format='NHWC'),
input_shape=[10, 14, 3]))
onnx_model = convert_keras(model, 'test_tf_conv2d')
data = np.random.rand(1, 10, 14, 3).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_conv2d', onnx_model, data, expected)
model = Sequential()
k = tf.constant(np.random.normal(loc=0.0, scale=1.0, size=(1, 3, 5)).astype(np.float32))
model.add(Lambda(lambda x: tf.nn.conv1d(x, k, stride=2, padding='SAME', data_format='NWC'),
input_shape=[10, 3]))
onnx_model = convert_keras(model, 'test_tf_conv1d')
data = np.random.rand(1, 10, 3).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_conv1d', onnx_model, data, expected)
model = Sequential()
k = tf.constant(np.random.normal(loc=0.0, scale=1.0, size=(1, 2, 3, 5, 2)).astype(np.float32))
model.add(Lambda(lambda x: tf.nn.conv3d(x, k, strides=[1, 1, 2, 1, 1], padding='SAME', data_format='NDHWC'),
input_shape=[10, 14, 3, 5]))
onnx_model = convert_keras(model, 'test_tf_conv3d')
data = np.random.rand(1, 10, 14, 3, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_conv3d', onnx_model, data, expected)
@pytest.mark.skipif(is_tensorflow_older_than('1.14.0'),
reason="tf.math has no attribute 'floormod'.")
def test_tf_floormod(runner):
def my_func_1(x):
return tf.math.floormod(x[0], x[1])
def my_func_2(x):
return tf.math.floormod(tf.cast(x[0], tf.int32), tf.cast(x[1], tf.int32))
for my_func_ in [my_func_1, my_func_2]:
input1 = Input(shape=[2, 2])
input2 = Input(shape=[2, 2])
added = Lambda(my_func_)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'test_tf_floormod')
data1 = 100 * np.random.rand(2, 2, 2).astype(np.float32) + 1.0
data2 = 10 * np.random.rand(2, 2, 2).astype(np.float32) + 1.0
expected = model.predict([data1, data2])
assert runner('onnx_tf_floormod', onnx_model, [data1, data2], expected)
def test_tf_rsqrt(runner):
def my_func_1(x):
beta = tf.constant([0.0, 0.0, 0.0, 0.0])
gamma = tf.constant([0.0, 0.0, 0.0, 0.0])
mean = tf.constant([0.0, 0.0, 0.0, 0.0])
variance = tf.constant([1.0, 1.0, 1.0, 1.0])
return tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
model = Sequential()
model.add(Lambda(lambda x: my_func_1(x), input_shape=[2, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_rsqrt')
data = np.random.rand(1, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_rsqrt', onnx_model, data, expected)
def test_tf_bias_add(runner):
model = Sequential()
model.add(Lambda(lambda x: tf.nn.bias_add(x, tf.constant([100., -100.])), input_shape=[3, 4, 2]))
onnx_model = convert_keras(model, 'test_tf_bias_add')
data = np.random.rand(5, 3, 4, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_bias_add', onnx_model, data, expected)
model = Sequential()
model.add(
Lambda(lambda x: tf.nn.bias_add(x, tf.constant([100., -100.]), data_format='NCHW'), input_shape=[2, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_bias_add')
data = np.random.rand(5, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_bias_add', onnx_model, data, expected)
def test_tf_clip(runner):
model = Sequential()
model.add(Lambda(lambda x: K.clip(x, 0, 10), input_shape=[5, 5]))
data = np.random.randint(-5, 15, size=(1, 5, 5)).astype(np.float32)
expected = model.predict(data)
onnx_model = convert_keras(model, 'test_tf_clip')
assert runner('onnx_tf_clip', onnx_model, data, expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 12,
reason="Result mismatch on ORT, skip conversion for unsupported types.")
def test_tf_pow(runner):
model = Sequential()
y = tf.constant([[2.0, 2.0], [2.0, 2.0]])
model.add(Lambda(lambda x: tf.math.pow(tf.cast(x, tf.int32), tf.cast(y, tf.int32)), input_shape=[2, 2]))
data = (100 * np.random.rand(3, 2, 2)).astype(np.float32)
expected = model.predict(data)
onnx_model = convert_keras(model, 'test_tf_pow')
assert runner('onnx_tf_pow', onnx_model, data, expected)
def test_tf_concat(runner):
def my_func_1(x):
return tf.concat([x[0], x[1]], 1)
def my_func_2(x):
return tf.concat([x[0], x[1]], -1)
input1_shape = [(2, 3), (3, 2)]
input2_shape = [(4, 3), (3, 4)]
myFunc = [my_func_1, my_func_2]
for idx_ in range(2):
input1 = Input(shape=input1_shape[idx_])
input2 = Input(shape=input2_shape[idx_])
added = Lambda(myFunc[idx_])([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'test_tf_concat')
batch_data1_shape = (2,) + input1_shape[idx_]
batch_data2_shape = (2,) + input2_shape[idx_]
data1 = np.random.rand(*batch_data1_shape).astype(np.float32)
data2 = np.random.rand(*batch_data2_shape).astype(np.float32)
expected = model.predict([data1, data2])
assert runner('onnx_concat', onnx_model, [data1, data2], expected)
@pytest.mark.parametrize("use_bias", [True, False])
def test_depthwise_conv2d(runner, use_bias):
model = Sequential()
model.add(InputLayer(input_shape=(8, 8, 2)))
model.add(keras.layers.DepthwiseConv2D(
kernel_size=(3, 3), strides=(1, 1), padding="VALID",
data_format='channels_last', use_bias=use_bias))
onnx_model = convert_keras(model, 'test_depthwise_conv2d')
data = np.random.rand(3, 8, 8, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_depthwise_conv2d', onnx_model, data, expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 12,
reason="Einsum is not supported until opset 12.")
def test_tf_einsum(runner):
def my_func_1(x):
return tf.einsum('i,d->id', x[0][:, 0], x[1][:, 1])
def my_func_2(x):
return tf.einsum('ibh,hnd->ibnd', x[0][:, 0], x[1][:, 1])
def my_func_3(x):
return tf.einsum('ibnd,hnd->ibh', x[0][:, 0], x[1][:, 1])
def my_func_4(x):
return tf.einsum('ibnd,jbnd->ijbn', x[0][:, 0], x[1][:, 1])
def my_func_5(x):
return tf.einsum('ijbn,jbnd->ibnd', x[0][:, 0], x[1][:, 1])
input1_shape = [(3,), (2, 3, 2), (2, 3, 4, 2), (2, 3, 4, 2), (2, 2, 4, 3)]
input2_shape = [(3,), (2, 4, 5), (2, 4, 2), (2, 3, 4, 2), (2, 4, 3, 5)]
myFunc = [my_func_1, my_func_2, my_func_3, my_func_4, my_func_5]
for idx_ in range(len(myFunc)):
K.clear_session()
input1 = Input(shape=input1_shape[idx_])
input2 = Input(shape=input2_shape[idx_])
added = Lambda(myFunc[idx_])([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'test_tf_einsum')
batch_data1_shape = (2,) + input1_shape[idx_]
batch_data2_shape = (2,) + input2_shape[idx_]
data1 = np.random.rand(*batch_data1_shape).astype(np.float32)
data2 = np.random.rand(*batch_data2_shape).astype(np.float32)
expected = model.predict([data1, data2])
assert runner('onnx_einsum', onnx_model, [data1, data2], expected)
def test_tf_expand_dims(runner):
for dim in [0, 1, -1]:
model = Sequential()
model.add(Lambda(lambda x: tf.expand_dims(x, dim), input_shape=[2, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_expand_dims')
data = np.random.rand(1, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_expand_dims', onnx_model, data, expected)
def test_tf_fill(runner):
model = Sequential()
model.add(Lambda(lambda x: x + tf.fill([2, 3], 2.3), input_shape=[2, 3]))
onnx_model = convert_keras(model, 'test_tf_fill')
data = np.random.rand(3, 2, 3).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_fill', onnx_model, data, expected)
def test_tf_fused_batch_norm(runner):
def my_func_1(x):
beta = tf.constant([0.2, 0.3, 0.4, 0.5])
gamma = tf.constant([0.5, 0.4, 0.3, 0.2])
mean = tf.constant([0.1, 0.2, 0.3, 0.4])
variance = tf.constant([0.9, 1.0, 1.0, 1.1])
return tf.nn.fused_batch_norm(x, mean, variance, beta, gamma, 0.001, data_format='NHWC', is_training=False)[
0]
def my_func_2(x):
beta = tf.constant([0.2, 0.3])
gamma = tf.constant([0.5, 0.4])
mean = tf.constant([0.1, 0.2])
variance = tf.constant([0.9, 1.0])
return tf.nn.fused_batch_norm(x, mean, variance, beta, gamma, 0.001, data_format='NCHW', is_training=False)[
0]
for my_func in [my_func_1, my_func_2]:
model = Sequential()
model.add(Lambda(lambda x: my_func(x), input_shape=[2, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_fused_batch_norm')
data = np.random.rand(1, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_fused_batch_norm', onnx_model, data, expected)
def test_tf_gather(runner):
model = Sequential()
model.add(Lambda(lambda x: tf.gather(x, [1, 1], axis=1), input_shape=[5, 5]))
onnx_model = convert_keras(model, 'test_tf_gather')
data = np.random.rand(3, 5, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_gather', onnx_model, data, expected)
def test_tf_maximum_minimum(runner):
input1_shape_list = [(2, 3), (2, 3)]
input2_shape_list = [(2, 3), (2, 1)]
def my_func_1(x):
return tf.minimum(tf.maximum(x[0], x[1]), 0.5)
def my_func_2(x):
return tf.minimum(tf.maximum(x[0], 0.5), x[1])
for idx_ in range(len(input1_shape_list)):
for myFunc in [my_func_1, my_func_2]:
input1 = Input(shape=input1_shape_list[idx_], dtype=tf.float32)
input2 = Input(shape=input2_shape_list[idx_], dtype=tf.float32)
added = Lambda(myFunc)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'tf_maximum_minimum')
batch_data1_shape = (2,) + input1_shape_list[idx_]
batch_data2_shape = (2,) + input2_shape_list[idx_]
data1 = np.random.rand(*batch_data1_shape).astype(np.float32)
data2 = np.random.rand(*batch_data2_shape).astype(np.float32)
expected = model.predict([data1, data2])
assert runner('tf_maximum_minimum', onnx_model, [data1, data2], expected)
def my_func_3(x):
return tf.minimum(tf.maximum(x[0], x[1]), 50)
def my_func_4(x):
return tf.minimum(tf.maximum(x[0], 50), x[1])
for idx_ in range(len(input1_shape_list)):
for myFunc in [my_func_3, my_func_4]:
input1 = Input(shape=input1_shape_list[idx_], dtype=tf.int32)
input2 = Input(shape=input2_shape_list[idx_], dtype=tf.int32)
added = Lambda(myFunc)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'tf_maximum_minimum')
batch_data1_shape = (2,) + input1_shape_list[idx_]
batch_data2_shape = (2,) + input2_shape_list[idx_]
data1 = (100 * np.random.rand(*batch_data1_shape)).astype(np.int32)
data2 = (100 * np.random.rand(*batch_data2_shape)).astype(np.int32)
expected = model.predict([data1, data2])
assert runner('tf_maximum_minimum', onnx_model, [data1, data2], expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 9,
reason="opset < 9 is not supported.")
def test_tf_one_hot(runner):
def my_func(x):
return tf.one_hot(tf.cast(x, tf.int32), 3, 5.0, -1.0, 1)
model = Sequential()
model.add(Lambda(lambda x: my_func(x), input_shape=[3]))
onnx_model = convert_keras(model, 'test_tf_one_hot')
data = np.array([[0, 1, 2]]).astype(np.float32)
expected = model.predict(data)
assert runner('tf_one_hot', onnx_model, data, expected)
def test_tf_pad(runner):
def my_func_1(x):
paddings = tf.constant([[0, 0], [1, 3], [2, 4]])
return tf.pad(x, paddings, mode='CONSTANT')
def my_func_2(x):
paddings = tf.constant([[0, 0], [1, 3], [2, 4]])
return tf.pad(x, paddings, mode='CONSTANT', constant_values=1)
def my_func_3(x):
paddings = tf.constant([[0, 0], [1, 3], [2, 4]])
return tf.pad(x, paddings, mode='REFLECT')
for my_func in [my_func_1, my_func_2, my_func_3]:
model = Sequential()
model.add(Lambda(lambda x: my_func(x), input_shape=[5, 5]))
onnx_model = convert_keras(model, 'test_tf_pad')
data = np.random.rand(2, 5, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_pad', onnx_model, data, expected)
def test_tf_range(runner):
def my_func_1(x):
return x + tf.cast(tf.range(3, 18, 3), tf.float32)
def my_func_2(x):
return x + tf.range(2.3, 4.6, 0.8, dtype=tf.float32)
for my_func_ in [my_func_1, my_func_2]:
K.clear_session()
model = Sequential()
model.add(Lambda(lambda x: my_func_(x), input_shape=[1]))
onnx_model = convert_keras(model, 'test_tf_range')
data = np.random.rand(3, 1).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_range_1', onnx_model, data, expected)
def my_func_3(x):
return x[0] + tf.cast(tf.range(3, 18, tf.cast(x[1][0, 0], tf.int32)), tf.float32)
K.clear_session()
input1 = Input(shape=(5,))
input2 = Input(shape=(1,))
added = Lambda(my_func_3)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'test_tf_range')
data_1 = np.random.randint(1, 3, size=(1, 5)).astype(np.float32)
data_2 = np.array([3]).astype(np.float32).reshape(1, 1)
expected = model.predict([data_1, data_2])
assert runner('onnx_range_2', onnx_model, [data_1, data_2], expected)
def test_tf_compare_equal(runner):
for tf_op_ in [tf.not_equal, tf.less_equal, tf.greater_equal]:
input1_shape = [[3], [3]]
input1 = Input(shape=input1_shape[0], dtype='int32')
input2 = Input(shape=input1_shape[1], dtype='int32')
comp = Lambda(lambda x: tf_op_(x[0], x[1]))([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=comp)
onnx_model = convert_keras(model, 'tf_compare_equal')
data1 = np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int32)
data2 = np.array([[1, 2, 3], [2, 1, 4]]).astype(np.int32)
expected = model.predict([data1, data2])
assert runner('tf_compare_equal', onnx_model, [data1, data2], expected)
def test_tf_realdiv(runner):
input1_shape = [(2, 3), (2, 3)]
input2_shape = [(2, 3), (3,)]
for idx_ in range(2):
input1 = Input(shape=input1_shape[idx_])
input2 = Input(shape=input2_shape[idx_])
added = Lambda(lambda x: tf.realdiv(x[0], x[1]))([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added, name='realdiv')
onnx_model = convert_keras(model, model.name)
batch_data1_shape = (2,) + input1_shape[idx_]
batch_data2_shape = (2,) + input2_shape[idx_]
data1 = np.random.rand(*batch_data1_shape).astype(np.float32)
data2 = np.random.rand(*batch_data2_shape).astype(np.float32)
expected = model.predict([data1, data2])
assert runner(model.name, onnx_model, [data1, data2], expected)
def test_tf_reduce_op(runner):
reduce_name = ['tf_min', 'tf_max', 'tf_mean', 'tf_sum', 'tf_prod']
reduce_ops = [K.min, K.max, K.mean, K.sum, K.prod]
axis_list = [1] if is_tf_keras else [1, None]
keepdims_val = [True] if is_tf_keras else [True, False]
for idx, reduce_op in enumerate(reduce_ops):
for axis in axis_list:
for keepdims in keepdims_val:
model = Sequential()
model.add(Lambda(lambda x: reduce_op(x, axis=axis, keepdims=keepdims), input_shape=[2, 2]))
onnx_model = convert_keras(model, 'test_' + reduce_name[idx])
data = np.random.rand(3, 2, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_' + reduce_name[idx], onnx_model, data, expected)
axis_list = [1] if is_tf2 and is_tf_keras else [1, None]
for idx, reduce_op in enumerate(reduce_ops):
for axis in axis_list:
for keepdims in keepdims_val:
model = Sequential()
model.add(Lambda(lambda x: reduce_op(x, axis=axis, keepdims=keepdims), input_shape=[2, 2]))
onnx_model = convert_keras(model, 'test_' + reduce_name[idx])
data = np.random.rand(1, 2, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_' + reduce_name[idx], onnx_model, data, expected)
def test_tf_reshape(runner):
model = Sequential()
model.add(Lambda(lambda x: tf.reshape(x, [-1, 2, 4]), input_shape=[2, 2, 2]))
onnx_model = convert_keras(model, 'test_tf_reshape_float')
data = np.random.rand(3, 2, 2, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_reshape_float', onnx_model, data, expected)
model = Sequential()
model.add(Lambda(lambda x: tf.reshape(x, [-1, 2, 4]), input_shape=[2, 2, 2], dtype=tf.int32))
onnx_model = convert_keras(model, 'test_tf_reshape_int')
data = np.random.randint(5, size=(3, 2, 2, 2)).astype(np.int32)
expected = model.predict(data)
assert runner('onnx_reshape_int', onnx_model, data, expected)
def my_func(x):
return tf.reshape(x[0][0], tf.cast(x[1][0], tf.int32))
input1 = Input(shape=(6,))
input2 = Input(shape=(3,))
added = Lambda(my_func)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'test_tf_reshape_dynamic')
data_1 = np.random.rand(1, 6).astype(np.float32).reshape(1, 6)
data_2 = np.array([1, 2, 3]).astype(np.float32).reshape(1, 3)
expected = model.predict([data_1, data_2])
assert runner('onnx_reshape_dynamic', onnx_model, [data_1, data_2], expected)
def test_tf_resize(runner):
target_opset = get_maximum_opset_supported()
shape_list = [10, None] if target_opset >= 10 else [10]
size_list = [[5, 10], [20, 30]] if target_opset >= 10 else [[20, 30]]
for g in [tf.image.resize_bilinear, tf.image.resize_nearest_neighbor]:
for shape_1_dim in shape_list:
for size in size_list:
model = Sequential()
model.add(Lambda(lambda x: g(x, size=size), input_shape=[shape_1_dim, 20, 3]))
onnx_model = convert_keras(model, 'test_tf_resize', target_opset=target_opset)
data = np.random.rand(2, 10, 20, 3).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_resize', onnx_model, data, expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 11,
reason="Resize coordinate_transformation_mode need opset >= 11.")
@pytest.mark.skipif(is_tensorflow_older_than('1.14.0') or (is_tf2 and is_tensorflow_older_than('2.2.0')),
reason="module 'tensorflow.compat' has no attribute 'v1'.")
def test_tf_resize_2(runner):
model = Sequential()
model.add(Lambda(lambda x: tf.compat.v1.image.resize(x,
[3, 4],
method='bilinear', align_corners=True),
input_shape=[5, 7, 3]))
onnx_model = convert_keras(model, 'test_tf_resize_2')
data = np.random.rand(2, 5, 7, 3).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_resize_2', onnx_model, data, expected)
def test_tf_size(runner):
model = Sequential()
model.add(Lambda(lambda x: x + tf.cast(tf.size(x), tf.float32), input_shape=[2, 3, 5]))
onnx_model = convert_keras(model, 'test_tf_size')
data = np.random.rand(3, 2, 3, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_size', onnx_model, data, expected)
def test_tf_slice(runner):
model = Sequential()
# Need 0th: start=0 size=batch_dim
model.add(Lambda(lambda x: tf.slice(x, [0, 1, 0, 2], [3, 1, 2, 2]), input_shape=[2, 3, 5]))
onnx_model = convert_keras(model, 'test_tf_slice')
data = np.random.rand(3, 2, 3, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_slice', onnx_model, data, expected)
if get_maximum_opset_supported() < 10:
return
def my_func_1(x):
return tf.slice(x[0], tf.cast(x[1][0], tf.int32), [3, 1, 1, 2])
input1 = Input(shape=(2, 3, 5), name='inputs')
input2 = Input(shape=(4,), dtype=tf.int32, name='begin')
added = Lambda(my_func_1)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'test_tf_slice')
data1 = np.random.rand(3, 2, 3, 5).astype(np.float32)
data2 = np.array([[0, 1, 0, 2], [0, 1, 0, 2], [0, 1, 0, 2]]).astype(np.int32)
expected = model.predict([data1, data2])
assert runner('onnx_tf_slice', onnx_model, {"inputs": data1, 'begin': data2}, expected)
def my_func_2(x):
return tf.slice(x[0], [0, 1, 0, 2], tf.cast(x[1][0], tf.int32))
input1 = Input(shape=(2, 3, 5), name='inputs')
input2 = Input(shape=(4,), dtype=tf.int32, name='size')
added = Lambda(my_func_2)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, 'test_tf_slice')
data1 = np.random.rand(3, 2, 3, 5).astype(np.float32)
data2 = np.array([[3, 1, 1, 2], [3, 1, 1, 2], [3, 1, 1, 2]]).astype(np.int32)
expected = model.predict([data1, data2])
assert runner('onnx_tf_slice', onnx_model, {"inputs": data1, 'size': data2}, expected)
def test_tf_softmax(runner):
for func_ in [lambda x: tf.nn.softmax(x), lambda x: tf.nn.softmax(x, axis=-1), lambda x: tf.nn.softmax(x, axis=1)]:
model = Sequential()
model.add(Lambda(func_, input_shape=[2, 3, 5]))
onnx_model = convert_keras(model, 'test_tf_softmax')
data = np.random.rand(3, 2, 3, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_softmax', onnx_model, data, expected)
def test_tf_sqrt(runner):
model = Sequential()
model.add(Lambda(lambda x: tf.sqrt(x + 1.0), input_shape=[2, 5]))
onnx_model = convert_keras(model, 'test_tf_sqrt')
data = np.random.rand(3, 2, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_sqrt', onnx_model, data, expected)
@pytest.mark.skipif(is_tensorflow_older_than('1.14.0'),
reason="dilations in tf.nn.depthwise_conv2d not supported.")
def test_tf_space_to_batch_nd(runner):
model = Sequential()
filter_value = np.random.rand(3, 3, 2, 2).astype(np.float32)
filter_constant = tf.constant(filter_value.tolist(), dtype=tf.float32)
model.add(Lambda(lambda x: tf.nn.depthwise_conv2d(
x, filter=filter_constant, strides=(1, 1, 1, 1), padding="VALID",
data_format='NHWC', dilations=(2, 2)), input_shape=(8, 8, 2)))
onnx_model = convert_keras(model, 'test_tf_space_to_batch_nd')
data = np.random.rand(3, 8, 8, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_space_to_batch_nd', onnx_model, data, expected)
def test_tf_splitv(runner):
def my_func_1(x):
return tf.split(x, [4, 15, 11], 2)[0]
model = Sequential()
model.add(Lambda(lambda x: my_func_1(x), input_shape=[5, 30]))
onnx_model = convert_keras(model, 'test_tf_splitv')
data = np.random.rand(2, 5, 30).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_splitv', onnx_model, data, expected)
def test_tf_square(runner):
model = Sequential()
model.add(Lambda(lambda x: x + tf.square(x), input_shape=[2, 3, 5]))
onnx_model = convert_keras(model, 'test_tf_square')
data = np.random.rand(3, 2, 3, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tf_square', onnx_model, data, expected)
def test_tf_squeeze(runner):
for func_ in [lambda x: tf.squeeze(x, [1]), lambda x: tf.squeeze(x), lambda x: tf.squeeze(x, [-2])]:
model = Sequential()
model.add(Lambda(func_, input_shape=[1, 2, 1, 2]))
onnx_model = convert_keras(model, 'test_tf_squeeze')
data = np.random.rand(3, 1, 2, 1, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_squeeze', onnx_model, data, expected)
def test_tf_stack(runner):
def my_func_1(x):
return tf.stack([x[0], x[1], x[2]], axis=1)
def my_func_2(x):
return tf.stack([x[0], x[1], x[2]], axis=-1)
for myFunc in [my_func_1, my_func_2]:
K.clear_session()
input_shape = (2, 3)
input1 = Input(shape=input_shape)
input2 = Input(shape=input_shape)
input3 = Input(shape=input_shape)
added = Lambda(myFunc)([input1, input2, input3])
model = keras.models.Model(inputs=[input1, input2, input3], outputs=added)
onnx_model = convert_keras(model, 'test_tf_stack')
batch_data_shape = (1,) + input_shape
data1 = np.random.rand(*batch_data_shape).astype(np.float32)
data2 = np.random.rand(*batch_data_shape).astype(np.float32)
data3 = np.random.rand(*batch_data_shape).astype(np.float32)
expected = model.predict([data1, data2, data3])
assert runner('onnx_stack', onnx_model, [data1, data2, data3], expected)
def test_stridedslice_with_version(runner):
target_opset = get_maximum_opset_supported()
for v1 in [-1, 1]:
for v2 in [-1, 2]:
model = Sequential()
model.add(
Lambda(lambda x: x[:, tf.newaxis, v1:, tf.newaxis, :v2, tf.newaxis, 3], input_shape=[2, 3, 4, 5]))
onnx_model = convert_keras(model, 'test', target_opset=target_opset)
data = np.random.rand(6 * 2 * 3 * 4 * 5).astype(np.float32).reshape(6, 2, 3, 4, 5)
expected = model.predict(data)
assert runner('onnx_stridedslice', onnx_model, data, expected)
def test_stridedslice_ellipse_newaxis(runner):
target_opset = get_maximum_opset_supported()
model = Sequential()
model.add(
Lambda(lambda x: x[:, 1:, tf.newaxis, ..., :, 1:, tf.newaxis], input_shape=[2, 3, 4, 3, 2, 2]))
onnx_model = convert_keras(model, 'test', target_opset=target_opset)
data = np.random.rand(6 * 2 * 3 * 4 * 3 * 2 * 2).astype(np.float32).reshape(6, 2, 3, 4, 3, 2, 2)
expected = model.predict(data)
assert runner('onnx_stridedslice', onnx_model, data, expected)
model = Sequential()
model.add(
Lambda(lambda x: x[...], input_shape=[2, 3, 4, 5]))
onnx_model = convert_keras(model, 'test', target_opset=target_opset)
data = np.random.rand(6 * 2 * 3 * 4 * 5).astype(np.float32).reshape(6, 2, 3, 4, 5)
expected = model.predict(data)
assert runner('onnx_stridedslice', onnx_model, data, expected)
def test_stridedslice_ellipsis_mask_with_version(runner):
target_opset = get_maximum_opset_supported()
model = Sequential()
model.add(Lambda(lambda x: x[:, :2, ..., 1:], input_shape=[3, 4, 5, 6, 3]))
onnx_model = convert_keras(model, 'test', target_opset=target_opset)
data = np.random.rand(5 * 3 * 4 * 5 * 6 * 3).astype(np.float32).reshape(5, 3, 4, 5, 6, 3)
expected = model.predict(data)
assert runner('onnx_stridedslice_ellipsis_mask', onnx_model, data, expected)
def test_stridedslice_shrink_mask_with_version(runner):
target_opset = get_maximum_opset_supported()
for shrink_value in [-1, 2]:
model = Sequential()
model.add(Lambda(lambda x: x[:, shrink_value, :], input_shape=[3, 4, 5]))
onnx_model = convert_keras(model, 'test', target_opset=target_opset)
data = np.random.rand(2 * 3 * 4 * 5).astype(np.float32).reshape(2, 3, 4, 5)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 10,
reason="dynamic end is not supported for Slice op, opset < 10.")
def test_stridedslice_dynamic_end(runner):
def my_func(x):
frame_dim = tf.shape(x)[2]
return x[:, :-1, 1:frame_dim - 1, :]
model = Sequential()
filters = 8
kernel_size = (2, 5)
strides = (1, 2)
model.add(Conv2DTranspose(filters, kernel_size, strides=strides, use_bias=False,
padding="valid", name='conv2d_transpose', input_shape=[3, 4, 5]))
model.add(Lambda(my_func))
data1 = np.random.rand(2 * 3 * 4 * 5).astype(np.float32).reshape(2, 3, 4, 5)
expected = model.predict(data1)
onnx_model = convert_keras(model, 'test_strided_slice_dynamic_input')
assert runner(onnx_model.graph.name, onnx_model, data1, expected)
@pytest.mark.skip("ConvTranspose does not support dynamic input for padding=same")
def test_conv_transpose_dynamic(runner):
def my_func(x):
frame_dim = tf.shape(x)[2]
return x[:, :-1, 1:frame_dim - 1, :]
model = Sequential()
filters = 8
kernel_size = (2, 5)
strides = (1, 1)
model.add(Lambda(my_func, input_shape=[3, 4, 5]))
model.add(Conv2DTranspose(filters, kernel_size, strides=strides, use_bias=False,
padding="same", name='conv2d_transpose'))
data1 = np.random.rand(2 * 3 * 4 * 5).astype(np.float32).reshape(2, 3, 4, 5)
expected = model.predict(data1)
onnx_model = convert_keras(model, 'test_conv_transpose_dynamic')
assert runner(onnx_model.graph.name, onnx_model, data1, expected)
def test_tf_tile(runner):
model = Sequential()
model.add(Lambda(lambda x: tf.tile(x, [1, 1, 3]), input_shape=[2, 2]))
onnx_model = convert_keras(model, 'test_tf_tile')
data = np.random.rand(3, 2, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_tile', onnx_model, data, expected)
def test_tf_topk(runner):
model = Sequential()
model.add(Lambda(lambda x: tf.nn.top_k(x, k=2)[0], input_shape=[5, 5]))
onnx_model = convert_keras(model, 'test_tf_topk')
data = np.random.rand(3, 5, 5).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_topk', onnx_model, data, expected)
def test_tf_transpose(runner):
model = Sequential()
model.add(Lambda(lambda x: tf.transpose(x, perm=[0, 2, 3, 1]), input_shape=[2, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_transpose')
data = np.random.rand(2, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_transpose_1', onnx_model, data, expected)
if is_tensorflow_later_than('1.13.0'):
model = Sequential()
model.add(Lambda(lambda x: tf.transpose(x), input_shape=[2, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_transpose')
data = np.random.rand(4, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_transpose_2', onnx_model, data, expected)
def my_func_1(x):
a = tf.constant([[1, 2, 3], [4, 5, 6]], tf.float32)
return x + tf.transpose(a)
model = Sequential()
model.add(Lambda(lambda x: my_func_1(x), input_shape=[3, 2]))
onnx_model = convert_keras(model, 'test_tf_transpose')
data = np.random.rand(2, 3, 2).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_transpose_3', onnx_model, data, expected)
def test_tf_unpack(runner):
for axis in [1, -1]:
model = Sequential()
model.add(Lambda(lambda x: tf.unstack(x, axis=axis)[0], input_shape=[2, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_unpack')
data = np.random.rand(3, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_unpack', onnx_model, data, expected)
@pytest.mark.skipif(is_tf2,
reason="tf 2.0 is not supported.")
def test_tf_variable(runner):
val = np.random.random((2, 3, 4))
for var_ in [K.variable(value=val), K.zeros(shape=(2, 3, 4)), K.ones(shape=(2, 3, 4))]:
model = Sequential()
model.add(Lambda(lambda x: x + var_, input_shape=[2, 3, 4]))
onnx_model = convert_keras(model, 'test_tf_variable')
data = np.random.rand(3, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner('onnx_variable', onnx_model, data, expected)
@pytest.mark.skipif(is_tf2 or get_maximum_opset_supported() < 9,
reason="tf 2.0 or opset < 9 is not supported.")
def test_tf_where(runner):
model = Sequential()
a = tf.constant([[[1, 1], [3, 6]], [[7, 8], [9, 9]]])
b = tf.where(tf.equal(a, 3))
model.add(Lambda(lambda x: b, input_shape=(2,)))
data = np.random.rand(1, 2).astype(np.float32)
expected = model.predict(data)
onnx_model = convert_keras(model, 'test_tf_where')
assert runner('onnx_where', onnx_model, data, expected)
model = Sequential()
a = tf.constant([[[1, 1], [3, 6]], [[7, 8], [3, 3]]])
b = tf.where(tf.equal(a, 3))
model.add(Lambda(lambda x: b, input_shape=(2,)))
data = np.random.rand(3, 2).astype(np.float32)
expected = model.predict(data)
onnx_model = convert_keras(model, 'test_tf_where')
assert runner('onnx_where', onnx_model, data, expected)
target_opset = get_maximum_opset_supported()
if target_opset >= 9:
model = Sequential()
x = tf.constant([[1, 2, 3], [4, 5, 6]])
y = tf.constant([[7, 8, 9], [10, 11, 12]])
condition = tf.constant([[True, False, False], [False, True, True]])
b = tf.where(condition, x, y)
model.add(Lambda(lambda x: b, input_shape=(2,)))
data = np.random.rand(2, 2).astype(np.float32)
expected = model.predict(data)
onnx_model = convert_keras(model, 'test_tf_where')
assert runner('onnx_where', onnx_model, data, expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 9, reason="conversion needs opset 9.")
def test_any_all(runner):
for l_ in [keras.backend.any, keras.backend.all]:
for axis in [1, -1]:
keras_model = Sequential()
keras_model.add(Lambda(lambda x: l_(x, axis=axis), input_shape=[3, 5]))
onnx_model = convert_keras(keras_model, keras_model.name)
x = np.random.rand(2, 3, 5).astype(np.float32)
expected = keras_model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
def test_dense(runner):
for bias_value in [True, False]:
model = keras.Sequential()
model.add(Dense(5, input_shape=(4,), activation='sigmoid'))
model.add(Dense(3, input_shape=(5,), use_bias=bias_value))
model.compile('sgd', 'mse')
onnx_model = convert_keras(model, model.name)
data = _asarray(1, 0, 0, 1)
expected = model.predict(data)
assert runner('dense', onnx_model, data, expected)
def test_dense_add(runner):
input1 = Input(shape=(4,))
x1 = Dense(3, activation='relu')(input1)
input2 = Input(shape=(5,))
x2 = Dense(3, activation='sigmoid')(input2)
input3 = Input(shape=(3,))
x3 = Dense(3)(input3)
added = Add()([x1, x2, x3]) # equivalent to added = add([x1, x2])
model = keras.models.Model(inputs=[input1, input2, input3], outputs=added)
model.compile('sgd', 'mse')
onnx_model = convert_keras(model, model.name)
data = [_asarray(1.2, 2.4, -2, 1), _asarray(-1, -2, 0, 1, 2), _asarray(0.5, 1.5, -3.14159)]
expected = model.predict(data)
assert runner('onnx_dense_add', onnx_model, data, expected)
@pytest.mark.skipif(is_tf2, reason="const is not initialized this way for tf2")
def test_conv_add(runner):
input1 = Input(shape=(10, 10, 1))
x1 = Conv2D(32, strides=(2, 2), kernel_size=3,
bias_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=42))(input1)
input2 = Input(tensor=tf.constant(np.random.rand(1, 32).astype(np.float32)))
added = Add()([x1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=added)
onnx_model = convert_keras(model, model.name)
data = [np.random.rand(1, 10, 10, 1).astype(np.float32)]
expected = model.predict(data)
data += [np.random.rand(1, 32).astype(np.float32)]
assert runner('onnx_conv_add', onnx_model, data, expected)
def test_dense_softmax(runner):
data = _asarray(1, 2, 3, 4)
model = Sequential()
model.add(Dense(5, input_shape=(4,), activation='softmax'))
model.add(Dense(3, input_shape=(5,), use_bias=True))
model.compile('sgd', 'mse')
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner('dense_softmax_1', onnx_model, data, expected)
model = Sequential()
model.add(Dense(5, input_shape=(4,)))
model.add(Activation('softmax'))
model.add(Dense(3, input_shape=(5,), use_bias=True))
model.compile('sgd', 'mse')
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner('dense_softmax_2', onnx_model, data, expected)
@pytest.mark.parametrize("layer_type, data", [
(Add, ([1, 2, 3], [4, 5, 6])),
(Add, ([1, 2, 3], [4, 5, 6], [-3, -1, 1.5])),
(Subtract, ([1, 2, 3], [4, 5, 6])),
(Multiply, ([1, 2, 3], [4, 5, 6])),
(Average, ([1, -2, 3], [3, 1, 1])),
(Maximum, ([1, -2, 3], [3, 1, 1])),
(lambda: Concatenate(), ([1, 2, 3], [4, 5, 6, 7])),
(lambda: Concatenate(), ([1, 2, 3], [4, 5, 6, 7])),
(lambda: Concatenate(-1), ([[1, 2], [3, 4]], [[4, 5], [6, 7]])),
(lambda: Concatenate(1), ([[1, 2], [3, 4]], [[4, 5], [6, 7]])),
(lambda: Concatenate(2), ([[1, 2], [3, 4]], [[4, 5], [6, 7]])),
])
def test_merge_layer(runner, layer_type, data):
data2 = [_asarray(*d) for d in data]
inputs = [Input(shape=d.shape[1:]) for d in data2]
layer = layer_type()(inputs)
model = keras.models.Model(inputs=inputs, outputs=layer)
onnx_model = convert_keras(model, model.name)
expected = model.predict(data2)
assert runner(onnx_model.graph.name, onnx_model, data2, expected)
@pytest.fixture(scope='function')
def conv_runner(runner):
def func(layer_type, input_channels, output_channels, kernel_size, strides, input_size, activation,
rtol, atol, bias, channels_first=False, padding='valid'):
model = keras.Sequential()
input_size_seq = (input_size,) if isinstance(input_size, int) else input_size
kwargs = {}
if channels_first:
input_shape = (input_channels,) + input_size_seq
if not isinstance(layer_type, Conv1D):
kwargs['data_format'] = 'channels_first'
else:
input_shape = input_size_seq + (input_channels,)
model.add(layer_type(output_channels, kernel_size, input_shape=input_shape, strides=strides, padding=padding,
dilation_rate=1, activation=activation, use_bias=bias, **kwargs))
data = np.random.uniform(-0.5, 0.5, size=(1,) + input_shape).astype(np.float32)
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected, rtol=rtol, atol=atol)
return func
@pytest.fixture(scope='function')
def conv1_runner(conv_runner):
def func(*args, activation=None, rtol=1e-4, atol=1e-6, bias=False, padding='valid'):
return conv_runner(Conv1D, *args, activation, rtol, atol, bias, padding=padding)
return func
def test_conv1d(conv1_runner):
conv1_runner(4, 5, 3, 1, 15)
conv1_runner(4, 5, 3, 2, 15)
def test_conv1d_padding(conv1_runner):
conv1_runner(4, 5, 3, 1, 15, padding='same')
test_causal = False
if is_tf_keras:
import tensorflow
from packaging.version import Version
if Version(tensorflow.__version__.split('-')[0]) >= Version('1.12.0'):
test_causal = True
else:
test_causal = True
if test_causal:
conv1_runner(4, 5, 3, 1, 15, padding='causal')
def test_conv1d_activation(conv1_runner):
conv1_runner(4, 5, 3, 1, 15, activation='sigmoid')
def test_conv1d_bias(conv1_runner):
conv1_runner(4, 5, 3, 1, 15, bias=True)
@pytest.fixture(scope='function')
def conv2_runner(conv_runner):
def func(*args, activation=None, rtol=1e-3, atol=1e-5, bias=False, channels_first=False, padding='valid'):
input_dims = args[-1]
assert len(input_dims) == 2
conv_runner(Conv2D, *args, activation, rtol, atol, bias, channels_first, padding)
return func
@pytest.fixture(scope='function')
def conv2trans_runner(conv_runner):
def func(*args, activation=None, rtol=1e-3, atol=1e-5, bias=False, channels_first=False, padding='valid'):
input_dims = args[-1]
assert len(input_dims) == 2
conv_runner(Conv2DTranspose, *args, activation, rtol, atol, bias, channels_first, padding)
return func
def test_conv2d(conv2_runner):
conv2_runner(3, 5, (2, 2), (1, 1), (5, 5))
def test_conv2d_transpose(conv2trans_runner):
conv2trans_runner(3, 5, (2, 2), (1, 1), (5, 5))
@pytest.mark.parametrize("padding", ["same", "valid"])
def test_conv2d_transpose_2(runner, padding):
size = 128
input_img = Input((size, size, 3))
x = Conv2DTranspose(256, (4, 4), strides=2, use_bias=False, padding=padding,
kernel_initializer='he_normal')(input_img)
y = BatchNormalization()(x)
model = Model(inputs=input_img, outputs=y)
data = np.random.rand(1, size, size, 3).astype(np.float32)
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
def test_conv2d_padding_same(conv2_runner):
conv2_runner(3, 5, (2, 2), (1, 1), (5, 5), padding='same')
conv2_runner(8, 16, (1, 1), (2, 2), (60, 60), padding='same')
conv2_runner(1, 1, (3, 3), (2, 2), (6, 6), padding='same')
conv2_runner(1, 1, (7, 7), (2, 2), (25, 25), padding='same')
conv2_runner(1, 1, (5, 7), (3, 5), (25, 25), padding='same')
@pytest.mark.skipif(is_tf_keras, reason="Generic conv implementation only supports NHWC tensor format in tf_keras")
def test_conv2d_format(conv2_runner):
conv2_runner(3, 5, (2, 2), (1, 1), (5, 5), channels_first=True)
def test_conv2d_activation(conv2_runner):
conv2_runner(3, 5, (2, 2), (1, 1), (5, 5), activation='relu')
conv2_runner(3, 5, (2, 2), (1, 1), (5, 5), activation='softmax')
def test_conv2d_bias(conv2_runner):
conv2_runner(3, 5, (2, 2), (1, 1), (5, 5), bias=True)
def test_conv2d_larger(conv2_runner):
conv2_runner(3, 5, (7, 9), 1, (30, 20))
def test_conv2d_uneven_stride(conv2_runner):
conv2_runner(3, 5, (4, 4), (3, 2), (20, 10))
@pytest.fixture(scope='function')
def conv3_runner(conv_runner):
def func(*args, activation=None, rtol=1e-3, atol=1e-5, bias=False, channels_first=False, padding='valid'):
input_dims = args[-1]
assert len(input_dims) == 3
conv_runner(Conv3D, *args, activation, rtol, atol, bias, channels_first, padding)
return func
def test_conv3d(conv3_runner):
conv3_runner(3, 5, (2, 2, 2), (1, 1, 1), (5, 5, 8))
@pytest.fixture(scope='function')
def conv3trans_runner(conv_runner):
def func(*args, activation=None, rtol=1e-3, atol=1e-5, bias=False, channels_first=False, padding='valid'):
input_dims = args[-1]
assert len(input_dims) == 3
conv_runner(Conv3DTranspose, *args, activation, rtol, atol, bias, channels_first, padding)
return func
@pytest.mark.skip("ONNXRuntime doesn't support 3D ConvTranspose.")
def test_conv3d_transpose(conv3trans_runner):
conv3trans_runner(3, 5, (2, 2, 2), (1, 1, 1), (5, 5, 8))
def test_flatten(runner):
model = keras.Sequential()
model.add(layers_core.Flatten(input_shape=(3, 2)))
model.add(Dense(3))
onnx_model = convert_keras(model, model.name)
data = np.array([[[1, 2], [3, 4], [5, 6]]]).astype(np.float32)
expected = model.predict(data)
assert runner('flatten', onnx_model, data, expected)
def test_opt_push_transpose_unsqueeze(runner):
for input_shape_ in [(1, 5, 7), (1, 5)]:
model = keras.Sequential()
if len(input_shape_) == 3:
model.add(Conv2D(64, (3, 3),
input_shape=input_shape_, padding='same', ))
else:
model.add(Conv1D(64, 3,
input_shape=input_shape_, padding='same', ))
model.add(Lambda(lambda x: tf.squeeze(x, [1])))
model.add(Lambda(lambda x: tf.expand_dims(x, 1)))
onnx_model = convert_keras(model, model.name)
batch_input_shape = (4,) + input_shape_
x = np.random.rand(*batch_input_shape).astype(np.float32)
expected = model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
def test_flatten2(runner):
C = 3
H = 5
W = 7
for data_format in ['channels_first', 'channels_last']:
model = keras.Sequential()
model.add(Conv2D(64, (3, 3),
input_shape=(C, H, W), padding='same', ))
model.add(Flatten(data_format=data_format))
onnx_model = convert_keras(model, model.name)
x = np.random.rand(4, C, H, W).astype(np.float32)
expected = model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
def test_reshape(runner):
model = keras.Sequential()
model.add(layers_core.Reshape((2, 3), input_shape=(3, 2)))
onnx_model = convert_keras(model, model.name)
data = np.array([[[1, 2], [3, 4], [5, 6]]]).astype(np.float32)
expected = model.predict(data)
assert runner('reshape', onnx_model, data, expected)
def test_permute(runner):
model = keras.Sequential()
model.add(layers_core.Permute((2, 1), input_shape=(3, 2)))
onnx_model = convert_keras(model, model.name)
data = np.array([[[1, 2], [3, 4], [5, 6]]]).astype(np.float32)
expected = model.predict(data)
assert runner('permute', onnx_model, data, expected)
def test_repeat_vector(runner):
model = keras.Sequential()
model.add(layers_core.RepeatVector(3, input_shape=(4,)))
onnx_model = convert_keras(model, model.name)
data = _asarray(1, 2, 3, 4)
expected = model.predict(data)
assert runner('repeat_vector', onnx_model, data, expected)
@pytest.fixture(scope='function')
def pooling_runner(runner):
def func(layer, ishape, data_format='channels_last'):
model = keras.Sequential()
if is_keras_later_than('2.1.6'):
nlayer = layer(data_format=data_format, input_shape=ishape) if \
(layer.__name__.startswith("Global")) else layer(2, data_format=data_format, input_shape=ishape)
else:
nlayer = layer(input_shape=ishape) if \
(layer.__name__.startswith("Global")) else layer(2, input_shape=ishape)
model.add(nlayer)
onnx_model = convert_keras(model, model.name)
data = np.random.uniform(-0.5, 0.5, size=(1,) + ishape).astype(np.float32)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
return func
def test_pooling_1d(pooling_runner):
pooling_runner(AveragePooling1D, (4, 6))
pooling_runner(MaxPool1D, (4, 6))
if is_keras_later_than('2.1.6'):
pooling_runner(AveragePooling1D, (4, 6), 'channels_first')
pooling_runner(MaxPool1D, (4, 6), 'channels_first')
def test_pooling_2d(pooling_runner, runner):
pooling_runner(AveragePooling2D, (4, 4, 3))
N, C, H, W = 2, 3, 5, 5
x = np.random.rand(N, H, W, C).astype(np.float32, copy=False)
model = Sequential()
model.add(MaxPooling2D((2, 2), strides=(2, 2), input_shape=(H, W, C), data_format='channels_last'))
model.compile(optimizer='sgd', loss='mse')
onnx_model = convert_keras(model, model.name)
expected = model.predict(x)
assert runner('max_pooling_2d', onnx_model, x, expected)
# test padding='same'
model = Sequential()
model.add(
MaxPooling2D((2, 2), strides=(2, 2), padding='same', input_shape=(H, W, C), data_format='channels_last'))
model.compile(optimizer='sgd', loss='mse')
onnx_model = convert_keras(model, model.name)
expected = model.predict(x)
assert runner('max_pooling_2d', onnx_model, x, expected)
def test_pooling_3d(pooling_runner):
pooling_runner(AveragePooling3D, (4, 4, 4, 3))
pooling_runner(MaxPool3D, (4, 4, 4, 3))
def test_pooling_global(pooling_runner):
pooling_runner(GlobalAveragePooling2D, (4, 6, 2))
@pytest.mark.parametrize("layer", [
'tanh',
keras.activations.tanh,
'sigmoid',
keras.activations.sigmoid,
'hard_sigmoid',
keras.activations.hard_sigmoid,
'relu',
keras.activations.relu,
'elu',
keras.activations.elu,
'selu',
keras.activations.selu,
'softsign',
keras.activations.softsign,
'softplus',
keras.activations.softplus,
'softmax',
keras.activations.softmax,
'linear',
keras.activations.linear,
])
def test_activation_layer(runner, layer):
data = _asarray(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
layer = Activation(layer, input_shape=(data.size,))
model = keras.Sequential()
model.add(layer)
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.fixture(scope='function')
def advanced_activation_runner(runner):
def runner_func(layer, data, op_version=None):
if op_version is None:
op_version = get_maximum_opset_supported()
model = keras.Sequential()
model.add(layer)
onnx_model = convert_keras(model, model.name, target_opset=op_version)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
return runner_func
def test_selu(runner):
SIZE = 10
NB_CLASS = 5
model = Sequential()
model.add(Conv2D(32, strides=(2, 2), kernel_size=3, input_shape=(SIZE, SIZE, 1)))
model.add(Flatten())
model.add(Dense(32, activation='selu'))
model.add(Dense(NB_CLASS, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
data = np.random.rand(5, SIZE, SIZE, 1).astype(np.float32)
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
def test_gelu(runner):
def gelu(x):
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
timesteps = 20
a_dim = 10
dense_hidden_size = 10
dense_layer = Dense(dense_hidden_size, name="dense", trainable=False)
mi = Input(shape=(timesteps, a_dim), dtype='float32', name="input")
x1 = keras.layers.GlobalMaxPooling1D()(mi)
x1 = dense_layer(x1)
x1 = Activation(gelu, name='dense_gelu')(x1)
keras_model = Model(inputs=mi, outputs=x1)
data = np.random.rand(2, timesteps, a_dim).astype(np.float32)
onnx_model = convert_keras(keras_model, keras_model.name)
expected = keras_model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
def test_LeakyReLU(advanced_activation_runner):
data = _asarray(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
layer = advanced_activations.LeakyReLU(alpha=0.1, input_shape=(data.size,))
advanced_activation_runner(layer, data)
@pytest.mark.skipif(get_maximum_opset_supported() < 8,
reason="ThresoldRelu needs ONNX opset 8")
def test_ThresholdedReLU(advanced_activation_runner):
data = _asarray(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
layer = advanced_activations.ThresholdedReLU(theta=1.0, input_shape=(data.size,))
advanced_activation_runner(layer, data, op_version=8)
layer = advanced_activations.ThresholdedReLU(theta=1.0, input_shape=(data.size,))
advanced_activation_runner(layer, data)
def test_ELU(advanced_activation_runner):
data = _asarray(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
layer = advanced_activations.ELU(alpha=1.0, input_shape=(data.size,))
advanced_activation_runner(layer, data)
def test_PReLU(advanced_activation_runner):
data = _asarray(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
layer = advanced_activations.PReLU(alpha_initializer='zeros', input_shape=(data.size,))
advanced_activation_runner(layer, data)
layer = advanced_activations.PReLU(alpha_initializer='ones', input_shape=(data.size,))
advanced_activation_runner(layer, data)
layer = advanced_activations.PReLU(alpha_initializer='RandomNormal', input_shape=(data.size,))
advanced_activation_runner(layer, data)
@pytest.mark.skipif(is_keras_older_than("2.2.4"),
reason="ReLU needs keras 2.2.4+")
def test_ReLU(advanced_activation_runner):
data = _asarray(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
layer = ReLU(input_shape=(data.size,))
advanced_activation_runner(layer, data)
layer = ReLU(max_value=1.5, input_shape=(data.size,))
advanced_activation_runner(layer, data)
layer = ReLU(max_value=1.5, negative_slope=0.1, input_shape=(data.size,))
advanced_activation_runner(layer, data)
def test_Softmax(advanced_activation_runner):
data = _asarray(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
layer = advanced_activations.Softmax(axis=-1, input_shape=(data.size,))
advanced_activation_runner(layer, data)
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_Softmax_2(runner, axis):
model = keras.Sequential()
model.add(keras.layers.InputLayer((2, 3, 4)))
model.add(keras.layers.Softmax(axis=axis))
data = np.random.rand(2, 2, 3, 4).astype(np.float32)
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.skipif(is_tensorflow_older_than('1.14.0') and is_tf_keras, reason='old tf version')
def test_tf_nn_activation(runner):
for activation in ['relu', tf.nn.relu, tf.nn.relu6, tf.nn.softmax, tf.nn.leaky_relu]:
model = keras.Sequential([
Dense(64, activation=activation, input_shape=[10]),
Dense(64, activation=activation),
Dense(1)
])
if is_tf_keras:
model.add(Activation(tf.keras.layers.LeakyReLU(alpha=0.2)))
model.add(Activation(tf.keras.layers.ReLU()))
model.add(tf.keras.layers.PReLU())
model.add(tf.keras.layers.LeakyReLU(alpha=0.5))
if is_tf2 and not is_tensorflow_older_than('2.2.0'):
model.add(Lambda(lambda x: tf.keras.activations.swish(x)))
if not is_tensorflow_older_than('1.15.0'):
model.add(Lambda(lambda x: tf.nn.swish(x)))
x = np.random.rand(5, 10).astype(np.float32)
expected = model.predict(x)
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
@pytest.fixture(scope='function')
def misc_conv_runner(runner):
def func(layer, ishape, target_opset=None):
if target_opset is None:
target_opset = get_maximum_opset_supported()
input = keras.Input(ishape)
out = layer(input)
model = keras.models.Model(input, out)
onnx_model = convert_keras(model, model.name, target_opset=target_opset)
data = np.random.uniform(0, 1, size=(1,) + ishape).astype(np.float32)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
return func
def test_crop(misc_conv_runner):
# It also passes the test for opset 9, we skip here because it uses a legacy experimental op DynamicSlice.
opset_ = get_maximum_opset_supported()
if opset_ >= 10:
ishape = (10, 20)
for crop_v in [2, (1, 2)]:
layer = Cropping1D(cropping=crop_v)
misc_conv_runner(layer, ishape, opset_)
for data_format_ in ['channels_last', 'channels_first']:
ishape = (20, 20, 10)
for crop_v in [2, (2, 2), ((1, 2), (2, 3))]:
layer = Cropping2D(cropping=crop_v, data_format=data_format_)
misc_conv_runner(layer, ishape, opset_)
ishape = (20, 20, 20, 10)
for crop_v in [2, (2, 3, 4), ((1, 2), (2, 3), (3, 5))]:
layer = Cropping3D(cropping=crop_v, data_format=data_format_)
misc_conv_runner(layer, ishape, opset_)
# TODO handle other cases for opset 8
ishape = (20, 20, 1)
layer = Cropping2D(cropping=((1, 2), (2, 3)), data_format='channels_last')
misc_conv_runner(layer, ishape, opset_)
def test_upsample(misc_conv_runner):
if is_keras_later_than('2.1.6'):
ishape = (20, 5)
layer = UpSampling1D(size=2)
misc_conv_runner(layer, ishape)
if not is_tf_keras:
ishape = (20,)
layer = UpSampling1D(size=2)
misc_conv_runner(layer, ishape)
ishape = (20, 20, 1)
for size in [2, (2, 3)]:
layer = UpSampling2D(size=size, data_format='channels_last')
misc_conv_runner(layer, ishape)
if not is_keras_older_than("2.2.3"):
opset_ = get_maximum_opset_supported()
if opset_ >= 11 or not is_tf_keras:
layer = UpSampling2D(size=size, data_format='channels_last', interpolation='bilinear')
misc_conv_runner(layer, ishape)
ishape = (20, 20, 20, 1)
layer = UpSampling3D(size=(2, 3, 4), data_format='channels_last')
misc_conv_runner(layer, ishape)
def test_padding(misc_conv_runner):
ishape = (20, 20, 1)
layer = ZeroPadding2D(padding=((1, 2), (2, 3)), data_format='channels_last')
misc_conv_runner(layer, ishape)
@pytest.mark.skipif(is_tf2 and is_tensorflow_older_than('2.2'),
reason="Variable freezing fails to replace ResourceGather op")
def test_embedding(runner):
model = keras.Sequential()
model.add(Embedding(1000, 64, input_length=10))
input_array = np.random.randint(1000, size=(1, 10)).astype(np.float32)
model.compile('rmsprop', 'mse')
onnx_model = convert_keras(model, model.name)
expected = model.predict(input_array)
assert runner(onnx_model.graph.name, onnx_model, input_array, expected)
@pytest.fixture(scope='function')
def dot_runner(runner):
def func(l2Normalize, input1, input2):
data = [input1, input2]
inputs = [Input(shape=d.shape[1:]) for d in data]
layer = Dot(axes=-1, normalize=l2Normalize)(inputs)
model = keras.models.Model(inputs=inputs, outputs=layer)
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
return func
def test_dot(dot_runner):
dot_runner(False, _asarray(1, 2, 3), _asarray(4, 5, 6))
dot_runner(True, _asarray(1, 2, 3), _asarray(4, 5, 6))
def test_dot2(runner):
input_1_shapes = [[32, 20, 1], [2, 3, 5], [2, 3, 5], [4, 3, 5], [2, 7], [2, 3, 4, 12, 3], [1, 3]]
input_2_shapes = [[32, 30, 20], [2, 3, 5], [2, 3, 5], [4, 5], [2, 7, 5], [2, 3, 4, 15, 3], [1, 3]]
axes_list = [[1, 2], 1, 2, [2, 1], [1, 1], 4, 1]
for i_ in range(len(input_1_shapes)):
for normalize in [True, False]:
drop2_embed_title = Input(batch_shape=tuple(input_1_shapes[i_]), name='input1')
att_weight = Input(batch_shape=tuple(input_2_shapes[i_]), name='input2')
doc_vec1 = dot([drop2_embed_title, att_weight], axes=axes_list[i_], normalize=normalize)
model = keras.models.Model(inputs=[drop2_embed_title, att_weight], outputs=doc_vec1)
data1 = np.random.rand(*input_1_shapes[i_]).astype(np.float32)
data2 = np.random.rand(*input_2_shapes[i_]).astype(np.float32)
expected = model.predict([data1, data2])
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, [data1, data2], expected)
drop2_embed_title = Input(batch_shape=(None, 7), name='input1')
att_weight = Input(batch_shape=(None, 7, 5), name='input2')
doc_vec1 = dot([drop2_embed_title, att_weight], axes=[1, 1])
model = keras.models.Model(inputs=[drop2_embed_title, att_weight], outputs=doc_vec1)
data1 = np.random.rand(2, 7).astype(np.float32)
data2 = np.random.rand(2, 7, 5).astype(np.float32)
expected = model.predict([data1, data2])
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, [data1, data2], expected)
def test_training_layer(runner):
model = keras.Sequential()
model.add(Dense(32, input_shape=(2, 3, 4)))
model.add(GaussianNoise(0.1))
model.add(Activation('relu'))
model.add(GaussianDropout(0.1))
model.add(AlphaDropout(0.1))
model.add(SpatialDropout2D(0.2))
model.add(Dense(1))
onnx_model = convert_keras(model, model.name)
data = np.random.rand(2, 2, 3, 4).astype(np.float32)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.fixture(scope='function')
def batch_norm_runner(runner):
def func(data, gamma, beta, scale, center, axis):
model = keras.Sequential()
layer = BatchNormalization(
axis=axis,
input_shape=data.shape[1:],
moving_mean_initializer=keras.initializers.constant(np.mean(data)),
moving_variance_initializer=keras.initializers.constant(np.var(data)),
gamma_initializer=gamma,
beta_initializer=beta,
center=center,
scale=scale,
)
model.add(layer)
onnx_model = convert_keras(model, model.name)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
return func
def test_batch_normalization(batch_norm_runner):
data = _asarray([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
batch_norm_runner(data, 'ones', 'zeros', True, True, 3)
batch_norm_runner(data, 'ones', 'ones', True, True, 3)
# The CPU implementation of FusedBatchNorm only supports NHWC tensor format in tf keras
if not is_tf_keras:
batch_norm_runner(data, 'ones', 'zeros', True, True, 1)
batch_norm_runner(data, 'ones', 'ones', True, True, 1)
batch_norm_runner(data, 'ones', 'ones', True, False, 1)
batch_norm_runner(data, 'zeros', 'zeros', False, True, 1)
def test_batch_normalization_2(runner):
# The CPU implementation of FusedBatchNorm only supports NHWC tensor format in tf keras
axis_list = [-1] if is_tf_keras else [1, -1]
for axis in axis_list:
batch_size = 4
input_dim_1 = 10
input_dim_2 = 20
input_dim_3 = 30
model = Sequential()
model.add(InputLayer(input_shape=(input_dim_1,)))
model.add(BatchNormalization(axis=axis))
model.add(Dense(5))
data = np.random.randn(batch_size, input_dim_1).astype(np.float32)
onnx_model = convert_keras(model)
expected = model.predict(data)
assert runner('test_batch_normalization_2_2d', onnx_model, [data], expected)
model = Sequential()
model.add(InputLayer(input_shape=(input_dim_1, input_dim_2)))
if axis == -1:
model.add(Conv1D(32, strides=(2,), kernel_size=3))
model.add(BatchNormalization(axis=axis))
model.add(Dense(5))
data = np.random.randn(batch_size, input_dim_1, input_dim_2).astype(np.float32)
onnx_model = convert_keras(model)
expected = model.predict(data)
assert runner('test_batch_normalization_2_3d', onnx_model, [data], expected)
model = Sequential()
model.add(InputLayer(input_shape=(input_dim_1, input_dim_2, input_dim_3)))
if axis == -1:
model.add(Conv2D(32, strides=(2, 2), kernel_size=3))
model.add(BatchNormalization(axis=axis))
model.add(Dense(5))
data = np.random.randn(batch_size, input_dim_1, input_dim_2, input_dim_3).astype(np.float32)
onnx_model = convert_keras(model)
expected = model.predict(data)
assert runner('test_batch_normalization_2_4d', onnx_model, [data], expected)
def test_simpleRNN(runner):
K.clear_session()
inputs1 = keras.Input(shape=(3, 1))
cls = SimpleRNN(2, return_state=False, return_sequences=True)
oname = cls(inputs1) # , initial_state=t0)
model = keras.Model(inputs=inputs1, outputs=[oname])
onnx_model = convert_keras(model, model.name)
data = np.array([0.1, 0.2, 0.3]).astype(np.float32).reshape((1, 3, 1))
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
# with initial state
inputs2 = keras.Input(shape=(1, 2))
state = keras.Input(shape=(5,))
hidden_1 = SimpleRNN(5, activation='relu', return_sequences=True)(inputs2, initial_state=[state])
output = Dense(2, activation='sigmoid')(hidden_1)
keras_model = keras.Model(inputs=[inputs2, state], outputs=output)
onnx_model = convert_keras(keras_model, keras_model.name)
N, H, W, C = 3, 1, 2, 5
x = np.random.rand(N, H, W).astype(np.float32, copy=False)
s = np.random.rand(N, C).astype(np.float32, copy=False)
expected = keras_model.predict([x, s])
assert runner(onnx_model.graph.name, onnx_model, [x, s], expected)
# with initial state and output state
input = keras.Input(shape=(1, 2))
state_in = keras.Input(shape=(10,))
hidden_1, state_out = SimpleRNN(10, activation='relu', return_sequences=True,
return_state=True)(input, initial_state=[state_in])
output = Dense(2, activation='linear')(hidden_1)
keras_model = keras.Model(inputs=[input, state_in], outputs=[output, state_out])
onnx_model = convert_keras(keras_model, keras_model.name)
N, H, W, C = 3, 1, 2, 10
x = np.random.rand(N, H, W).astype(np.float32, copy=False)
s = np.random.rand(N, C).astype(np.float32, copy=False)
expected = keras_model.predict([x, s])
assert runner(onnx_model.graph.name, onnx_model, [x, s], expected)
@pytest.mark.parametrize("gru_class, rnn_version", GRU_CLASSES)
@pytest.mark.parametrize("return_sequences", [True, False])
def test_GRU(runner, gru_class, rnn_version, return_sequences):
inputs1 = keras.Input(shape=(3, 1))
# GRU with no initial state
cls = gru_class(2, return_state=False, return_sequences=False)
oname = cls(inputs1)
model = keras.Model(inputs=inputs1, outputs=[oname])
onnx_model = convert_keras(model, model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
data = np.array([0.1, 0.2, 0.3]).astype(np.float32).reshape((1, 3, 1))
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
# GRU with initial state
cls = gru_class(2, return_state=False, return_sequences=return_sequences)
initial_state_input = keras.Input(shape=(2,))
oname = cls(inputs1, initial_state=initial_state_input)
model = keras.Model(inputs=[inputs1, initial_state_input], outputs=[oname])
onnx_model = convert_keras(model, model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
data = np.array([0.1, 0.2, 0.3]).astype(np.float32).reshape((1, 3, 1))
init_state = np.array([0.4, 0.5]).astype(np.float32).reshape((1, 2))
init_state_onnx = np.array([0.4, 0.5]).astype(np.float32).reshape((1, 2))
expected = model.predict([data, init_state])
assert runner(onnx_model.graph.name, onnx_model, [data, init_state_onnx], expected)
@pytest.mark.skipif(not is_tf_keras and is_tf2 and is_tensorflow_older_than('2.2'),
reason="Fails due to some reason involving bad graph captures. Works in new versions and tf_keras")
@pytest.mark.parametrize("gru_class, rnn_version", GRU_CLASSES)
def test_GRU_2(runner, gru_class, rnn_version):
model = keras.Sequential(name='TestGRU')
model.add(gru_class(400, reset_after=True, input_shape=(1, 257)))
model.add(Dense(257, activation='sigmoid'))
onnx_model = convert_keras(model, name=model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
data = np.random.rand(3, 257).astype(np.float32).reshape((3, 1, 257))
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.parametrize("lstm_class, lstmcell_class, rnn_version", LSTM_CLASSES)
@pytest.mark.parametrize('return_sequences', [False, True])
@pytest.mark.parametrize('use_bias', [False, True])
def test_LSTM(runner, lstm_class, lstmcell_class, rnn_version, return_sequences, use_bias):
inputs1 = keras.Input(shape=(3, 5))
data = np.random.rand(3, 5).astype(np.float32).reshape((1, 3, 5))
cls1 = lstm_class(units=2, return_state=True, return_sequences=return_sequences, use_bias=use_bias)
cls2 = RNN(lstmcell_class(units=2, use_bias=use_bias), return_state=True, return_sequences=return_sequences)
lstm1, state_h, state_c = cls1(inputs1)
lstm2, state_h_2, state_c_2 = cls2(inputs1)
model = keras.Model(inputs=inputs1, outputs=[lstm1, state_h, state_c, lstm2, state_h_2, state_c_2])
onnx_model = convert_keras(model, model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.skipif((is_tensorflow_older_than('1.14.0') or (not is_tf_keras)), reason='old tf version')
@pytest.mark.parametrize("lstm_class, lstmcell_class, rnn_version", LSTM_CLASSES)
@pytest.mark.parametrize('return_sequences', [False, True])
@pytest.mark.parametrize('use_bias', [False, True])
def test_LSTM_rev(runner, lstm_class, lstmcell_class, rnn_version, return_sequences, use_bias):
inputs1 = keras.Input(shape=(3, 5))
data = np.random.rand(3, 5).astype(np.float32).reshape((1, 3, 5))
cls = lstm_class(units=2, return_state=True, go_backwards=True, return_sequences=return_sequences, use_bias=use_bias)
lstm1, state_h, state_c = cls(inputs1)
model = keras.Model(inputs=inputs1, outputs=[lstm1, state_h, state_c])
onnx_model = convert_keras(model, model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.skipif((is_tensorflow_older_than('1.14.0') or (not is_tf_keras)),
reason="keras LSTM does not have time_major attribute")
@pytest.mark.parametrize("lstm_class, lstmcell_class, rnn_version", LSTM_CLASSES)
def test_LSTM_time_major_return_seq_true(runner, lstm_class, lstmcell_class, rnn_version):
inputs1 = keras.Input(shape=(3, 5))
data = np.random.rand(1, 3, 5).astype(np.float32)
# Transpose input to be time major
input_transposed = tf.transpose(inputs1, perm=[1, 0, 2])
lstm1, state_h, state_c = lstm_class(units=2, time_major=True, return_state=True,
return_sequences=True)(input_transposed)
lstm2, state_h_2, state_c_2 = RNN(lstmcell_class(units=2), time_major=True, return_state=True,
return_sequences=True)(input_transposed)
lstm1_trans = tf.transpose(lstm1, perm=[1, 0, 2])
lstm2_trans = tf.transpose(lstm2, perm=[1,0,2])
model = keras.Model(inputs=inputs1, outputs=[lstm1_trans, state_h, state_c,
lstm2_trans, state_h_2, state_c_2])
onnx_model = convert_keras(model, model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.skipif((is_tensorflow_older_than('1.14.0') or (not is_tf_keras)),
reason="keras LSTM does not have time_major attribute")
@pytest.mark.parametrize("lstm_class, lstmcell_class, rnn_version", LSTM_CLASSES)
def test_LSTM_time_major_return_seq_false(runner, lstm_class, lstmcell_class, rnn_version):
inputs1 = keras.Input(shape=(3, 5))
data = np.random.rand(1, 3, 5).astype(np.float32)
# Transpose input to be time major
input_transposed = tf.transpose(inputs1, perm=[1, 0, 2])
lstm1, state_h, state_c = lstm_class(units=2, time_major=True, return_state=True,
return_sequences=False)(input_transposed)
lstm2, state_h_2, state_c_2 = RNN(lstmcell_class(units=2), time_major=True, return_state=True,
return_sequences=False)(input_transposed)
model = keras.Model(inputs=inputs1, outputs=[lstm1, state_h, state_c,
lstm2, state_h_2, state_c_2])
onnx_model = convert_keras(model, model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.parametrize("lstm_class, lstmcell_class, rnn_version", LSTM_CLASSES)
def test_LSTM_with_bias(runner, lstm_class, lstmcell_class, rnn_version):
inputs1 = keras.Input(shape=(1, 1))
cls = lstm_class(units=1, return_state=True, return_sequences=True)
lstm1, state_h, state_c = cls(inputs1)
lstm2, state_h_2, state_c_2 = RNN(lstmcell_class(units=1), return_state=True,
return_sequences=True)(inputs1)
model = keras.Model(inputs=inputs1, outputs=[lstm1, state_h, state_c,
lstm2, state_h_2, state_c_2])
# Set weights: kernel, recurrent_kernel and bias
model.set_weights((np.array([[1, 2, 3, 4]]), np.array([[5, 6, 7, 8]]), np.array([1, 2, 3, 4]),
np.array([[1, 2, 3, 4]]), np.array([[5, 6, 7, 8]]), np.array([1, 2, 3, 4])))
data = np.random.rand(1, 1).astype(np.float32).reshape((1, 1, 1))
onnx_model = convert_keras(model, model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
expected = model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.parametrize("lstm_class, lstmcell_class, rnn_version", LSTM_CLASSES)
def test_LSTM_reshape(runner, lstm_class, lstmcell_class, rnn_version):
input_dim = 7
sequence_len = 3
inputs1 = keras.Input(shape=(sequence_len, input_dim))
cls = lstm_class(units=5, return_state=False, return_sequences=True)
lstm1 = cls(inputs1)
lstm2 = RNN(lstmcell_class(units=5), return_state=False, return_sequences=True)(inputs1)
output = Reshape((sequence_len, 5))(lstm1)
output_2 = Reshape((sequence_len, 5))(lstm2)
model = keras.Model(inputs=inputs1, outputs=[output, output_2])
model.compile(optimizer='sgd', loss='mse')
onnx_model = convert_keras(model, 'test')
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
data = np.random.rand(input_dim, sequence_len).astype(np.float32).reshape((1, sequence_len, input_dim))
expected = model.predict(data)
assert runner('tf_lstm', onnx_model, data, expected)
@pytest.mark.parametrize("lstm_class, lstmcell_class, rnn_version", LSTM_CLASSES)
def test_LSTM_with_initializer(runner, lstm_class, lstmcell_class, rnn_version):
# batch_size = N
# seq_length = H
# input_size = W
# hidden_size = C
N, H, W, C = 3, 1, 2, 5
# inputs shape: (batch_size, seq_length)
inputs = keras.Input(shape=(H, W), name='inputs')
# initial state shape: (hidden_size, 1)
state_h = keras.Input(shape=(C,), name='state_h')
state_c = keras.Input(shape=(C,), name='state_c')
# create keras model
lstm_layer = lstm_class(units=C, activation='relu', return_sequences=True)(inputs,
initial_state=[state_h,
state_c])
lstm_layer_2 = RNN(lstmcell_class(units=C, activation='relu'),
return_sequences=True)(inputs, initial_state=[state_h, state_c])
outputs = Dense(W, activation='sigmoid')(lstm_layer)
outputs_2 = Dense(W, activation='sigmoid')(lstm_layer_2)
keras_model = keras.Model(inputs=[inputs, state_h, state_c],
outputs=[outputs, outputs_2])
x = np.random.rand(1, H, W).astype(np.float32)
sh = np.random.rand(1, C).astype(np.float32)
sc = np.random.rand(1, C).astype(np.float32)
onnx_model = convert_keras(keras_model, keras_model.name)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
expected = keras_model.predict([x, sh, sc])
assert runner(onnx_model.graph.name, onnx_model, {"inputs": x, 'state_h': sh, 'state_c': sc}, expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 5,
reason="None seq_length LSTM is not supported before opset 5.")
@pytest.mark.skipif(is_tensorflow_older_than('2.2'), reason='require 2.2 to fix freezing')
@pytest.mark.parametrize("lstm_class, lstmcell_class, rnn_version", LSTM_CLASSES)
@pytest.mark.parametrize('return_sequences', [False, True])
def test_LSTM_seqlen_none(runner, lstm_class, lstmcell_class, rnn_version, return_sequences):
lstm_dim = 2
data = np.random.rand(1, 5, 1).astype(np.float32)
inp = Input(batch_shape=(1, None, 1))
out = lstm_class(lstm_dim, return_sequences=return_sequences, stateful=True)(inp)
out_2 = RNN(lstmcell_class(lstm_dim), return_sequences=return_sequences, stateful=True)(inp)
keras_model = keras.Model(inputs=inp, outputs=[out, out_2])
onnx_model = convert_keras(keras_model)
if rnn_version == "v2":
assert no_loops_in_tf2(onnx_model)
expected = keras_model.predict(data)
assert runner(onnx_model.graph.name, onnx_model, data, expected)
@pytest.mark.parametrize("return_sequences", [True, False])
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_bidirectional(runner, rnn_class, return_sequences):
input_dim = 10
sequence_len = 5
op_version = get_maximum_opset_supported()
batch_list = [1, 4] if op_version >= 9 else [1]
model = keras.Sequential()
model.add(Bidirectional(rnn_class(7, return_sequences=return_sequences),
input_shape=(5, 10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
onnx_model = convert_keras(model, 'test', target_opset=op_version)
assert all_recurrents_should_bidirectional(onnx_model)
for batch in batch_list:
data = np.random.rand(batch, sequence_len, input_dim).astype(np.float32)
expected = model.predict(data)
assert runner('bidirectional', onnx_model, data, expected)
for merge_mode in ['concat', None]:
sub_input1 = Input(shape=(sequence_len, input_dim))
sub_mapped1 = Bidirectional(rnn_class(7, return_sequences=return_sequences),
input_shape=(5, 10), merge_mode=merge_mode)(sub_input1)
keras_model = keras.Model(inputs=sub_input1, outputs=sub_mapped1)
onnx_model = convert_keras(keras_model, 'test_2', target_opset=op_version)
assert all_recurrents_should_bidirectional(onnx_model)
for batch in batch_list:
data = np.random.rand(batch, sequence_len, input_dim).astype(np.float32)
expected = keras_model.predict(data)
assert runner('bidirectional', onnx_model, data, expected)
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_bidirectional_with_bias(runner, rnn_class):
K.clear_session()
model = keras.Sequential()
model.add(Bidirectional(rnn_class(4, return_sequences=False),
input_shape=(3, 5), name='bi'))
x = np.random.uniform(100, 999, size=(2, 3, 5)).astype(np.float32)
# Test with the default bias
expected = model.predict(x)
onnx_model = convert_keras(model, model.name)
assert all_recurrents_should_bidirectional(onnx_model)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
# Set bias values to random floats
rnn_layer = model.get_layer('bi')
weights = rnn_layer.get_weights()
weights[2] = np.random.uniform(size=weights[2].shape)
weights[5] = weights[2]
rnn_layer.set_weights(weights)
# Test with random bias
expected = model.predict(x)
onnx_model = convert_keras(model, model.name)
assert all_recurrents_should_bidirectional(onnx_model)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
@pytest.mark.skipif((is_tensorflow_older_than('2.3.0') or (not is_tf_keras)),
reason=(
"keras LSTM does not have time_major attribute. There was a bug in tf.keras bidirectional lstm with time_major true which will be fixed in tf-2.3, See - https://github.com/tensorflow/tensorflow/issues/39635"))
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_bidirectional_time_major_true(runner, rnn_class):
feature_dim = 1
seq_len = 3
x = np.ones((1, seq_len, feature_dim), dtype=np.float32)
for ret_seq in [True, False]:
for merge_mode in ['concat', None]:
K.clear_session()
input = keras.Input(shape=(seq_len, feature_dim))
# Transpose input to be time major
input_transposed = tf.transpose(input, perm=[1, 0, 2])
output = Bidirectional(rnn_class(1, return_sequences=ret_seq,
time_major=True),
name='bi', merge_mode=merge_mode)(input_transposed)
if ret_seq and merge_mode == 'concat':
output = tf.transpose(output, perm=[1, 0, 2])
model = keras.Model(inputs=input, outputs=output)
expected = model.predict(x)
onnx_model = convert_keras(model, model.name)
assert all_recurrents_should_bidirectional(onnx_model)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_bidirectional_with_initial_states(runner, rnn_class):
input1 = Input(shape=(None, 5))
states = Bidirectional(rnn_class(2, return_state=True))(input1)
model = Model(input1, states)
x = np.random.uniform(0.1, 1.0, size=(4, 3, 5)).astype(np.float32)
inputs = [x]
expected = model.predict(inputs)
onnx_model = convert_keras(model, model.name)
assert all_recurrents_should_bidirectional(onnx_model)
assert runner(onnx_model.graph.name, onnx_model, inputs, expected)
input2 = Input(shape=(None, 5))
states = Bidirectional(rnn_class(2, return_state=True))(input1)[1:]
out = Bidirectional(rnn_class(2, return_sequences=True))(input2, initial_state=states)
model = Model([input1, input2], out)
inputs = [x, x]
expected = model.predict(inputs)
onnx_model = convert_keras(model, model.name)
assert all_recurrents_should_bidirectional(onnx_model)
assert runner(onnx_model.graph.name, onnx_model, inputs, expected, atol=1e-5)
@pytest.mark.skipif(get_maximum_opset_supported() < 5,
reason="None seq_length Bidirectional LSTM is not supported before opset 5.")
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
@pytest.mark.skipif(is_tf2 and is_tensorflow_older_than('2.2'),
reason="Variable freezing fails to replace GatherResource op")
def test_bidirectional_seqlen_none(runner, rnn_class):
model = Sequential()
model.add(Embedding(39, 128))
model.add(Bidirectional(rnn_class(256, input_shape=(None, 32), return_sequences=True)))
model.add(Dense(44))
onnx_model = convert_keras(model, model.name)
assert all_recurrents_should_bidirectional(onnx_model)
for batch in [1, 4]:
x = np.random.rand(batch, 50).astype(np.float32)
expected = model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_rnn_state_passing(runner, rnn_class):
input1 = Input(shape=(None, 5))
input2 = Input(shape=(None, 5))
states = rnn_class(2, return_state=True)(input1)[1:]
out = rnn_class(2, return_sequences=True)(input2, initial_state=states)
model = Model([input1, input2], out)
x = np.random.uniform(0.1, 1.0, size=(4, 3, 5)).astype(np.float32)
inputs = [x, x]
expected = model.predict(inputs)
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, inputs, expected, atol=1e-5)
def test_seq_dynamic_batch_size(runner):
K.clear_session()
data_dim = 4 # input_size
timesteps = 3 # seq_length
# expected input data shape: (batch_size, timesteps, data_dim)
test_input = np.random.random_sample((100, timesteps, data_dim))
test_output = np.random.random_sample((100, 128))
# Number of layer and number of neurons in each layer
num_neur = [128, 256, 128]
epochs = 200
batch_size = 50
nodeFuncList = [SimpleRNN, GRU, LSTM]
for nodeFunc in nodeFuncList:
model = Sequential()
for i in range(len(num_neur)): # multi-layer
if len(num_neur) == 1:
model.add(nodeFunc(num_neur[i], input_shape=(timesteps, data_dim), unroll=True))
else:
if i < len(num_neur) - 1:
model.add(
nodeFunc(num_neur[i], input_shape=(timesteps, data_dim), return_sequences=True,
unroll=True))
else:
model.add(nodeFunc(num_neur[i], input_shape=(timesteps, data_dim), unroll=True))
# Compile the neural network
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(test_input, test_output, epochs=epochs, batch_size=batch_size, verbose=0)
test_input = np.random.random_sample((5, timesteps, data_dim)).astype(np.float32)
test_output = model.predict(test_input)
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, test_input, test_output)
def test_separable_convolution(runner):
N, C, H, W = 2, 3, 5, 5
x = np.random.rand(N, H, W, C).astype(np.float32, copy=False)
model = Sequential()
model.add(
SeparableConv2D(filters=10, kernel_size=(1, 2), strides=(1, 1), padding='valid', input_shape=(H, W, C),
data_format='channels_last', depth_multiplier=4))
model.add(MaxPooling2D((2, 2), strides=(2, 2), data_format='channels_last'))
model.compile(optimizer='sgd', loss='mse')
onnx_model = convert_keras(model, 'test')
expected = model.predict(x)
assert runner('separable_convolution_1', onnx_model, x, expected)
x = np.random.rand(N, H, C).astype(np.float32, copy=False)
model = Sequential()
model.add(SeparableConv1D(filters=10, kernel_size=2, strides=1, padding='valid', input_shape=(H, C),
data_format='channels_last'))
model.compile(optimizer='sgd', loss='mse')
onnx_model = convert_keras(model, 'test')
expected = model.predict(x)
assert runner('separable_convolution_2', onnx_model, x, expected)
@pytest.mark.skipif(is_tf2 and is_tensorflow_older_than('2.2'),
reason="Variable freezing fails to replace GatherResource op")
def test_shared_embed(runner):
max_cont_length = 5
max_ques_length = 7
word_dict_len = 10
word_dim = 6
h_word_mat = 'aa'
# Input Embedding Layer
contw_input_ = Input((max_cont_length,)) # [bs, c_len]
quesw_input_ = Input((max_ques_length,)) # [bs, q_len]
# embedding word
WordEmbedding = Embedding(word_dict_len, word_dim, trainable=False,
name="word_embedding_" + h_word_mat)
xw_cont = Dropout(0.2)(WordEmbedding(contw_input_)) # [bs, c_len, word_dim]
xw_ques = Dropout(0.2)(WordEmbedding(quesw_input_)) # [bs, c_len, word_dim]
keras_model = keras.models.Model(inputs=[contw_input_, quesw_input_],
outputs=[xw_cont, xw_ques])
onnx_model = convert_keras(keras_model, keras_model.name)
batch_size = 3
x = np.random.rand(batch_size, max_cont_length).astype(np.float32)
y = np.random.rand(batch_size, max_ques_length).astype(np.float32)
expected = keras_model.predict([x, y])
assert runner(onnx_model.graph.name, onnx_model, [x, y], expected)
def test_recursive_model(runner):
keras.backend.set_learning_phase(0)
N, C, D = 2, 3, 3
x = np.random.rand(N, C).astype(np.float32, copy=False)
sub_input1 = Input(shape=(C,))
sub_mapped1 = Dense(D)(sub_input1)
sub_model1 = keras.Model(inputs=sub_input1, outputs=sub_mapped1)
sub_input2 = Input(shape=(C,))
sub_mapped2 = Dense(D)(sub_input2)
sub_model2 = keras.Model(inputs=sub_input2, outputs=sub_mapped2)
input1 = Input(shape=(D,))
input2 = Input(shape=(D,))
mapped1_2 = sub_model1(input1)
mapped2_2 = sub_model2(input2)
sub_sum = Add()([mapped1_2, mapped2_2])
keras_model = keras.Model(inputs=[input1, input2], outputs=sub_sum)
onnx_model = convert_keras(keras_model, keras_model.name)
x = [x, 2 * x]
expected = keras_model.predict(x)
assert runner('recursive', onnx_model, x, expected)
def test_recursive_and_shared_model(runner):
keras.backend.set_learning_phase(0)
N, C, D = 2, 3, 3
x = np.random.rand(N, C).astype(np.float32, copy=False)
sub_input1 = Input(shape=(C,))
sub_mapped1 = Dense(D)(sub_input1)
sub_output1 = Activation('sigmoid')(sub_mapped1)
sub_model1 = keras.Model(inputs=sub_input1, outputs=sub_output1)
sub_input2 = Input(shape=(C,))
sub_mapped2 = sub_model1(sub_input2)
sub_output2 = Activation('tanh')(sub_mapped2)
sub_model2 = keras.Model(inputs=sub_input2, outputs=sub_output2)
input1 = Input(shape=(D,))
input2 = Input(shape=(D,))
mapped1_1 = Activation('tanh')(input1)
mapped2_1 = Activation('sigmoid')(input2)
mapped1_2 = sub_model1(mapped1_1)
mapped1_3 = sub_model1(mapped1_2)
mapped2_2 = sub_model2(mapped2_1)
sub_sum = Add()([mapped1_3, mapped2_2])
keras_model = keras.Model(inputs=[input1, input2], outputs=sub_sum)
keras_model.compile('sgd', loss='mse')
onnx_model = convert_keras(keras_model, keras_model.name)
x = [x, 2 * x]
expected = keras_model.predict(x)
assert runner('recursive_and_shared', onnx_model, x, expected)
@pytest.mark.skipif(is_keras_older_than("2.2.4"),
reason="Low keras version is not supported.")
def test_shared_model_2(runner):
K.set_learning_phase(0)
def _conv_layer(input, filters, kernel_size, relu_flag=False, strides=1, dilation_rate=1):
padding = 'same' if strides == 1 else 'valid'
if strides > 1:
input = ZeroPadding2D(((0, 1), (0, 1)), data_format=K.image_data_format())(input)
x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, use_bias=False, dilation_rate=dilation_rate)(input)
ch_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=ch_axis)(x)
if relu_flag:
return ReLU()(x)
else:
return x
def _model(relu_flag=False):
input = Input(shape=(3, 320, 320), name='input_1')
x = _conv_layer(input, 16, 3, relu_flag)
return Model(inputs=input, outputs=x, name='backbone')
relu_flags = [False] if is_tf2 or is_tf_keras else [True, False]
for relu_flag_ in relu_flags:
input = Input(shape=(3, 320, 320), name='input')
backbone = _model(relu_flag_)
x = backbone(input)
x = _conv_layer(x, 16, 3)
model = Model(inputs=[input], outputs=[x])
onnx_model = convert_keras(model, model.name)
x = np.random.rand(2, 3, 320, 320).astype(np.float32)
expected = model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected, atol=1e-5)
@pytest.mark.skipif(is_keras_older_than("2.2.4"),
reason="ReLU support requires keras 2.2.4 or later.")
def test_shared_model_3(runner):
def _bottleneck(x, filters, activation, strides, block_id):
padding = 'same' if strides == 1 else 'valid'
ch_axis = 1 if K.image_data_format() == 'channels_first' else -1
if strides > 1:
x = ZeroPadding2D(((0, 1), (0, 1)), data_format=K.image_data_format())(x)
x = Conv2D(filters // 2, (1, 1), padding='same', name='bottleneck_' + str(block_id) + '_conv_0',
use_bias=False, data_format=K.image_data_format())(x)
x = BatchNormalization(axis=ch_axis, name='bottleneck_' + str(block_id) + '_bnorm_0')(x)
if activation == 'relu':
x = ReLU(name='bottleneck_' + str(block_id) + '_relu_0')(x)
elif activation == 'leaky':
x = LeakyReLU(name='bottleneck_' + str(block_id) + '_leaky_0')(x)
else:
assert False
x = Conv2D(filters // 2, (3, 3), padding=padding, name='bottleneck_' + str(block_id) + '_conv_1',
strides=strides, use_bias=False, data_format=K.image_data_format())(x)
x = BatchNormalization(axis=ch_axis, name='bottleneck_' + str(block_id) + '_bnorm_1')(x)
if activation == 'relu':
x = ReLU(name='bottleneck_' + str(block_id) + '_relu_1')(x)
elif activation == 'leaky':
x = LeakyReLU(name='bottleneck_' + str(block_id) + '_leaky_1')(x)
else:
assert False
x = Conv2D(filters, (1, 1), padding='same', name='bottleneck_' + str(block_id) + '_conv_2',
use_bias=False, data_format=K.image_data_format())(x)
x = BatchNormalization(axis=ch_axis, name='bottleneck_' + str(block_id) + '_bnorm_2')(x)
if activation == 'relu':
x = ReLU(name='bottleneck_' + str(block_id) + '_relu_2')(x)
elif activation == 'leaky':
x = LeakyReLU(name='bottleneck_' + str(block_id) + '_leaky_2')(x)
else:
assert False
return x
def convnet_7(input_shape, activation):
input = Input(shape=input_shape, name='input_1')
x = _bottleneck(input, filters=16, strides=1, activation=activation, block_id=1)
x = _bottleneck(x, filters=32, strides=2, activation=activation, block_id=2)
return Model(inputs=input, outputs=x, name='convnet_7')
activation_list = ['leaky'] if is_tf2 or is_tf_keras else ['relu', 'leaky']
for activation in activation_list:
model = convnet_7(input_shape=(3, 96, 128), activation=activation)
onnx_model = convert_keras(model, model.name)
x = np.random.rand(1, 3, 96, 128).astype(np.float32)
expected = model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_masking(runner, rnn_class):
timesteps, features = (3, 5)
model = Sequential([
keras.layers.Masking(mask_value=0., input_shape=(timesteps, features)),
rnn_class(8, return_state=False, return_sequences=False)
])
onnx_model = convert_keras(model, model.name)
x = np.random.uniform(100, 999, size=(2, 3, 5)).astype(np.float32)
expected = model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected, rtol=5e-3)
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_masking_bias(runner, rnn_class):
timesteps, features = (3, 5)
model = Sequential([
keras.layers.Masking(mask_value=0., input_shape=(timesteps, features)),
rnn_class(8, return_state=False, return_sequences=False, use_bias=True, name='rnn')
])
x = np.random.uniform(100, 999, size=(2, 3, 5)).astype(np.float32)
# Fill one of the entries with all zeros except the first timestep
x[1, 1:, :] = 0
# Test with the default bias
expected = model.predict(x)
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
# Set bias values to random floats
rnn_layer = model.get_layer('rnn')
weights = rnn_layer.get_weights()
weights[2] = np.random.uniform(size=weights[2].shape)
rnn_layer.set_weights(weights)
# Test with random bias
expected = model.predict(x)
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 9, reason='bidirectional is not supported for opset < 9')
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_masking_bias_bidirectional(runner, rnn_class):
timesteps, features = (3, 5)
model = Sequential([
keras.layers.Masking(mask_value=0., input_shape=(timesteps, features)),
Bidirectional(rnn_class(8, return_state=False, return_sequences=False, use_bias=True), name='bi')
])
x = np.random.uniform(100, 999, size=(2, 3, 5)).astype(np.float32)
# Fill one of the entries with all zeros except the first timestep
x[1, 1:, :] = 0
# Test with the default bias
expected = model.predict(x)
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
# Set bias values to random floats
rnn_layer = model.get_layer('bi')
weights = rnn_layer.get_weights()
weights[2] = np.random.uniform(size=weights[2].shape)
weights[5] = weights[2]
rnn_layer.set_weights(weights)
# Test with random bias
expected = model.predict(x)
onnx_model = convert_keras(model, model.name)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
@pytest.mark.parametrize("rnn_class", RNN_CLASSES)
def test_masking_value(runner, rnn_class):
if rnn_class is SimpleRNN:
pytest.skip('SimpleRNN intermittently fails this test')
timesteps, features = (3, 5)
mask_value = 5.
model = Sequential([
keras.layers.Masking(mask_value=mask_value, input_shape=(timesteps, features)),
rnn_class(8, return_state=False, return_sequences=False)
])
onnx_model = convert_keras(model, model.name)
x = np.random.uniform(100, 999, size=(2, 3, 5)).astype(np.float32)
x[1, :, :] = mask_value
expected = model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
def test_masking_custom(runner):
class MyPoolingMask(keras.layers.Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(MyPoolingMask, self).__init__(**kwargs)
def build(self, input_shape):
super(MyPoolingMask, self).build(input_shape)
def compute_mask(self, inputs, input_mask=None):
return None
def call(self, inputs, mask=None, **kwargs):
if mask is not None:
return K.sum(inputs, axis=-2) / (
K.sum(K.cast(mask, K.dtype(inputs)), axis=-1, keepdims=True) + K.epsilon())
else:
output = K.mean(inputs, axis=-2)
return output
def compute_output_shape(self, input_shape):
return input_shape[:-2] + input_shape[-1:]
timesteps, features = (3, 5)
model = Sequential([
keras.layers.Masking(mask_value=0., input_shape=(timesteps, features)),
MyPoolingMask()
])
onnx_model = convert_keras(model, model.name)
x = np.random.uniform(100, 999, size=(2, 3, 5)).astype(np.float32)
expected = model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
def test_timedistributed(runner):
keras_model = keras.Sequential()
keras_model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# keras_model.output_shape == (None, 10, 8)
onnx_model = convert_keras(keras_model, keras_model.name)
x = np.random.rand(32, 10, 16).astype(np.float32)
expected = keras_model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
keras_model = keras.Sequential()
N, D, W, H, C = 5, 10, 15, 15, 3
keras_model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(D, W, H, C)))
onnx_model = convert_keras(keras_model, keras_model.name)
x = np.random.rand(N, D, W, H, C).astype(np.float32)
expected = keras_model.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
def test_channel_first_input(runner):
N, W, H, C = 2, 5, 6, 3
inp1 = Input(batch_shape=(N, W, H, C), name='input1')
inp2 = Input(batch_shape=(N, W, H, C), name='input2')
output = Add()([inp1, inp2])
model = keras.models.Model(inputs=[inp1, inp2], outputs=output)
onnx_model = convert_keras(model, model.name, channel_first_inputs=['input1'])
assert onnx_model is not None
data1 = np.random.rand(N, W, H, C).astype(np.float32).reshape((N, W, H, C))
data2 = np.random.rand(N, W, H, C).astype(np.float32).reshape((N, W, H, C))
data_transpose = np.transpose(data1, (0, 3, 1, 2))
assert data_transpose.shape == (N, C, W, H)
expected = model.predict([data1, data2])
assert runner('channel_first_input', onnx_model, [data_transpose, data2], expected)
def test_channel_last(runner):
N, C, H, W = 2, 3, 5, 5
x = np.random.rand(N, H, W, C).astype(np.float32, copy=False)
model = Sequential()
model.add(Conv2D(2, kernel_size=(1, 2), strides=(1, 1), padding='valid', input_shape=(H, W, C),
data_format='channels_last'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), data_format='channels_last'))
model.compile(optimizer='sgd', loss='mse')
onnx_model = convert_keras(model, channel_first_inputs=[model.input_names[0]])
expected = model.predict(x)
assert expected is not None
assert onnx_model is not None
x = np.transpose(x.astype(np.float32), [0, 3, 1, 2])
assert runner('channel_last_input', onnx_model, x, expected)
def test_sub_model(runner):
class IdentityLayer(Layer):
def __init__(self, **kwargs):
super(IdentityLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(IdentityLayer, self).build(input_shape)
def call(self, inputs, training=None):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
input_shape = [700, 420, 1]
num_classes = 10
image_input = Input(shape=input_shape, name='image_input')
model = Sequential() # 28, 28, 1
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
input_shape=input_shape, padding='valid')) # 28, 28, 1
model.add(Conv2D(64, (3, 3), activation='relu', padding='valid')) # 28, 28, 1
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid")) # 14, 14, 1
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(12, 12), strides=(14, 14), padding="valid", activation='relu'))
model.add(Dropout(0.5))
features = model(image_input)
outputs = []
for _ in range(3):
output1 = Dense(num_classes, activation="softmax")(
Dense(64, activation="relu")(Dense(128, activation="relu")(features)))
output2 = Dense(1, activation="sigmoid")(
Dense(64, activation="relu")(Dense(128, activation="relu")(features)))
output3 = Dense(2, activation="tanh")(
Dense(64, activation="relu")(Dense(128, activation="relu")(features)))
output4 = Dense(2, activation="tanh")(
Dense(64, activation="relu")(Dense(128, activation="relu")(features)))
outputs += [output1, output2, output3, output4]
output = Concatenate(name="output")(outputs)
output = IdentityLayer()(output)
model1 = Model(image_input, output)
onnx_model = convert_keras(model1, model1.name)
x = np.random.rand(2, 700, 420, 1).astype(np.float32)
expected = model1.predict(x)
assert runner(onnx_model.graph.name, onnx_model, x, expected)
@pytest.mark.skipif((is_tensorflow_older_than('1.14.0') or (not is_tf_keras)), reason='old tf version')
def test_reverseV2(runner):
input = Input(shape=(2, 4), name='input')
rev = tf.reverse(input, [1])
model = tf.keras.models.Model(inputs=input, outputs=rev)
onnx_model = convert_keras(model, 'tf_rev_v2')
data = np.random.rand(1, 2, 4).astype(np.float32)
expected = model.predict(data)
assert runner('tf_rev_v2', onnx_model, data, expected)
@pytest.mark.skipif(get_maximum_opset_supported() < 11, reason="TensorScatterUpdate is not supported before opset 11.")
@pytest.mark.skipif((is_tensorflow_older_than('1.14.0') or (not is_tf_keras)), reason='old tf version')
def test_tensor_scatter_update(runner):
indices = np.array([[0],[0]]).astype(np.int64)
updates = np.array([[[1, 1], [1, 1]], [[1, 1], [1, 1]]]).astype(np.float32)
tensor = Input(shape=(2,2), name='tensor')
out = tf.tensor_scatter_nd_update(tensor, indices, updates)
model = tf.keras.models.Model(inputs=tensor, outputs=out)
onnx_model = convert_keras(model, model.name)
tensor_data = np.array([[[6, 5], [6, 6]], [[5, 5], [6, 6]]]).astype(np.float32)
expected = model.predict(tensor_data)
assert runner(onnx_model.graph.name, onnx_model, tensor_data, expected)
def test_two_zero_padding(runner):
def my_func_1(input_layer, nf=64):
temp = keras.layers.Conv2D(filters=nf, kernel_size=(3, 3), strides=(1, 1), padding="same")(input_layer)
pad1 = keras.layers.ZeroPadding2D(padding=((1, 0), (1, 0)))(temp)
pad2 = keras.layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(temp)
conv = keras.layers.Conv2D(filters=nf, kernel_size=(3, 3), strides=(1, 1), padding="valid")
output = keras.layers.concatenate([conv(pad1), conv(pad2)], axis=3)
return output
input1 = Input(shape=(32, 32, 3))
outputs = my_func_1(input1)
model = keras.models.Model(inputs=input1, outputs=outputs)
onnx_model = convert_keras(model, 'test_two_zero_padding')
data = np.random.rand(1, 32, 32, 3).astype(np.float32)
expected = model.predict(data)
assert runner('test_two_zero_padding', onnx_model, data, expected)
| 115,744 | 41.289003 | 229 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/conftest.py | # SPDX-License-Identifier: Apache-2.0
import os
import pytest
import numpy as np
import tensorflow as tf
from mock_keras2onnx.proto import keras, is_tf_keras
from test_utils import run_onnx_runtime
from mock_keras2onnx.proto.tfcompat import is_tf2
K = keras.backend
@pytest.fixture(scope='function')
def runner():
np.random.seed(42)
if is_tf2:
tf.random.set_seed(42)
else:
tf.random.set_random_seed(42)
model_files = []
def runner_func(*args, **kwargs):
return run_onnx_runtime(*args, model_files, **kwargs)
# Ensure Keras layer naming is reset for each function
K.reset_uids()
# Reset the TensorFlow session to avoid resource leaking between tests
K.clear_session()
# Provide wrapped run_onnx_runtime function
yield runner_func
# Remove model files
for fl in model_files:
os.remove(fl)
| 880 | 22.184211 | 74 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/test_subclassing.py | # SPDX-License-Identifier: Apache-2.0
import pytest
import mock_keras2onnx
import numpy as np
import tensorflow as tf
from test_utils import convert_keras_for_test as convert_keras
from mock_keras2onnx.proto import is_tensorflow_older_than
if (not mock_keras2onnx.proto.is_tf_keras) or (not mock_keras2onnx.proto.tfcompat.is_tf2):
pytest.skip("Tensorflow 2.0 only tests.", allow_module_level=True)
class LeNet(tf.keras.Model):
def __init__(self):
super(LeNet, self).__init__()
self.conv2d_1 = tf.keras.layers.Conv2D(filters=6,
kernel_size=(3, 3), activation='relu',
input_shape=(32, 32, 1))
self.average_pool = tf.keras.layers.AveragePooling2D()
self.conv2d_2 = tf.keras.layers.Conv2D(filters=16,
kernel_size=(3, 3), activation='relu')
self.flatten = tf.keras.layers.Flatten()
self.fc_1 = tf.keras.layers.Dense(120, activation='relu')
self.fc_2 = tf.keras.layers.Dense(84, activation='relu')
self.out = tf.keras.layers.Dense(10, activation='softmax')
def call(self, inputs, **kwargs):
x = self.conv2d_1(inputs)
x = self.average_pool(x)
x = self.conv2d_2(x)
x = self.average_pool(x)
x = self.flatten(x)
x = self.fc_2(self.fc_1(x))
return self.out(x)
class MLP(tf.keras.Model):
def __init__(self):
super(MLP, self).__init__()
self.flatten = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(units=10)
def call(self, inputs, **kwargs):
x = self.flatten(inputs)
x = self.dense1(x)
output = self.dense2(x)
return output
class SimpleWrapperModel(tf.keras.Model):
def __init__(self, func):
super(SimpleWrapperModel, self).__init__()
self.func = func
def call(self, inputs, **kwargs):
return self.func(inputs)
def test_lenet(runner):
tf.keras.backend.clear_session()
lenet = LeNet()
data = np.random.rand(2 * 416 * 416 * 3).astype(np.float32).reshape(2, 416, 416, 3)
expected = lenet(data)
lenet._set_inputs(data)
oxml = convert_keras(lenet)
assert runner('lenet', oxml, data, expected)
def test_mlf(runner):
tf.keras.backend.clear_session()
mlf = MLP()
np_input = tf.random.normal((2, 20))
expected = mlf.predict(np_input)
oxml = convert_keras(mlf)
assert runner('mlf', oxml, np_input.numpy(), expected)
def test_tf_ops(runner):
tf.keras.backend.clear_session()
def op_func(arg_inputs):
x = tf.math.squared_difference(arg_inputs[0], arg_inputs[1])
x = tf.matmul(x, x, adjoint_b=True)
r = tf.rank(x)
x = x - tf.cast(tf.expand_dims(r, axis=0), tf.float32)
return x
dm = SimpleWrapperModel(op_func)
inputs = [tf.random.normal((3, 2, 20)), tf.random.normal((3, 2, 20))]
expected = dm.predict(inputs)
oxml = convert_keras(dm)
assert runner('op_model', oxml, [i_.numpy() for i_ in inputs], expected)
layers = tf.keras.layers
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
# epsilon = tf.fill(dims=(batch, dim), value=.9)
# epsilon = tf.compat.v1.random_normal(shape=(batch, dim), seed=1234)
epsilon = tf.keras.backend.random_normal(shape=(batch, dim), seed=12340)
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self,
latent_dim=32,
intermediate_dim=64,
name='encoder',
**kwargs):
super(Encoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation='relu')
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self,
original_dim,
intermediate_dim=64,
name='decoder',
**kwargs):
super(Decoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation='relu')
self.dense_output = layers.Dense(original_dim, activation='sigmoid')
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
class VariationalAutoEncoder(tf.keras.Model):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(self,
original_dim,
intermediate_dim=64,
latent_dim=32,
name='autoencoder',
**kwargs):
super(VariationalAutoEncoder, self).__init__(name=name, **kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim,
intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = - 0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
self.add_loss(kl_loss)
return reconstructed
def test_auto_encoder(runner):
tf.keras.backend.clear_session()
original_dim = 20
vae = VariationalAutoEncoder(original_dim, 64, 32)
x = tf.random.normal((7, original_dim))
expected = vae.predict(x)
oxml = convert_keras(vae)
# assert runner('variational_auto_encoder', oxml, [x.numpy()], expected)
# The random generator is not same between different engines.
import onnx
onnx.checker.check_model(oxml)
@pytest.mark.skipif(is_tensorflow_older_than('2.2.0'), reason="only supported on tf 2.2 and above.")
def test_tf_where(runner):
def _tf_where(input_0):
a = tf.where(True, input_0, [0, 1, 2, 5, 7])
b = tf.where([True], tf.expand_dims(input_0, axis=0), tf.expand_dims([0, 1, 2, 5, 7], axis=0))
c = tf.logical_or(tf.cast(a, tf.bool), tf.cast(b, tf.bool))
return c
swm = SimpleWrapperModel(_tf_where)
const_in = [np.array([2, 4, 6, 8, 10]).astype(np.int32)]
expected = swm(const_in)
swm._set_inputs(const_in)
oxml = convert_keras(swm)
assert runner('where_test', oxml, const_in, expected)
class OptionalInputs(tf.keras.Model):
def __init__(self, *args, **kwargs):
super(OptionalInputs, self).__init__(*args, **kwargs)
def call(self, inputs, type_ids=None, **kwargs):
input_id = inputs
if isinstance(inputs, (tuple, list)):
input_id = inputs[0]
type_ids = inputs[1] if len(inputs) > 1 else type_ids
static = input_id.shape.as_list()
dynamic = tf.shape(input_id)
input_shape = [dynamic[i] if s is None else s for i, s in enumerate(static)]
if type_ids is None:
type_ids = tf.fill(input_shape, 0)
return input_id + type_ids
@pytest.mark.skipif(is_tensorflow_older_than('2.2.0'), reason="only supports on tf 2.2 and above.")
def test_optional_inputs(runner):
input_ids = np.array([1, 2]).astype(np.int32)
test_model = OptionalInputs()
exp0 = test_model(input_ids)
exp1 = test_model(input_ids, np.array([1, 2]).astype(np.int32))
oxml = convert_keras(test_model)
assert runner('opt_inputs_0', oxml, [input_ids], exp0)
from onnxconverter_common.onnx_fx import GraphFunctionType as _Ty
oxml1 = convert_keras(test_model, initial_types=(_Ty.I32(['N']), _Ty.I32(['N'])))
assert runner('opt_inputs_1', oxml1, [input_ids, np.array([1, 2]).astype(np.int32)], exp1)
| 8,436 | 34.599156 | 102 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/test_cgan.py | # SPDX-License-Identifier: Apache-2.0
import pytest
import tensorflow as tf
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras, is_tf_keras, is_tensorflow_older_than
from tf2onnx.keras2onnx_api import convert_keras
from packaging.version import Version
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/cgan/cgan.py
class CGAN():
def __init__(self):
# Input shape
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.num_classes = 10
self.latent_dim = 100
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,))
img = self.generator([noise, label])
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
# and the label of that image
valid = self.discriminator([img, label])
# The combined model (stacked generator and discriminator)
# Trains generator to fool discriminator
self.combined = Model([noise, label], valid)
def get_model(self):
return self.combined
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))
model_input = multiply([noise, label_embedding])
img = model(model_input)
return Model([noise, label], img)
def build_discriminator(self):
model = Sequential()
model.add(Dense(512, input_dim=np.prod(self.img_shape)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))
img = Input(shape=self.img_shape)
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label))
flat_img = Flatten()(img)
model_input = multiply([flat_img, label_embedding])
validity = model(model_input)
return Model([img, label], validity)
@pytest.mark.skipif(mock_keras2onnx.proto.tfcompat.is_tf2 and is_tf_keras, reason="Tensorflow 1.x only tests.")
@pytest.mark.skipif(is_tf_keras and Version(tf.__version__.split('-')[0]) < Version("1.14.0"),
reason="Not supported before tensorflow 1.14.0 for tf_keras")
@pytest.mark.skipif(mock_keras2onnx.proto.tfcompat.is_tf2 and is_tensorflow_older_than('2.2'),
reason="Variable freezing fails to replace ResourceGather op")
def test_CGAN(runner):
keras_model = CGAN().combined
batch = 5
x = np.random.rand(batch, 100).astype(np.float32)
y = np.random.rand(batch, 1).astype(np.float32)
expected = keras_model.predict([x, y])
onnx_model = convert_keras(keras_model, keras_model.name)
assert runner(onnx_model.graph.name, onnx_model,
{keras_model.input_names[0]: x, keras_model.input_names[1]: y}, expected)
| 4,662 | 34.325758 | 111 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/test_utils.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import onnx
from onnx import helper
import numpy as np
import mock_keras2onnx
from mock_keras2onnx.proto import keras, is_keras_older_than
from mock_keras2onnx.proto.tfcompat import is_tf2
from packaging.version import Version
from tf2onnx.keras2onnx_api import convert_keras, get_maximum_opset_supported
import time
import json
import urllib
# Mapping opset to ONNXRuntime version.
# TODO: upgrade opset to 18 for 1.14.0 after the below issue is fixed:
# https://github.com/onnx/tensorflow-onnx/issues/2132
ORT_OPSET_VERSION = {
"1.6.0": 13, "1.7.0": 13, "1.8.0": 14, "1.9.0": 15, "1.10.0": 15, "1.11.0": 16,
"1.12.0": 17, "1.13.0": 17, "1.14.0": 18
}
working_path = os.path.abspath(os.path.dirname(__file__))
tmp_path = os.path.join(working_path, 'temp')
test_level_0 = True
def convert_tf_crop_and_resize(scope, operator, container):
if operator.target_opset < 11:
raise ValueError("CropAndResize op is not supported for opset < 11")
oopb = OnnxOperatorBuilder(container, scope)
node = operator.raw_operator
mode_value = node.get_attr('method')
transpose_node = oopb.apply_transpose(operator.inputs[0].full_name,
name=operator.full_name + '_transpose_1',
perm=[0, 3, 1, 2])
cropandresize = oopb.add_node('CropAndResize',
transpose_node + operator.input_full_names[1:],
operator.full_name + '_crop_and_resize',
op_domain='com.microsoft',
op_version=1,
mode=mode_value)
oopb.apply_op_with_output("apply_transpose",
cropandresize,
operator.output_full_names,
name=operator.full_name + '_transpose_final',
perm=[0, 2, 3, 1])
# convert keras_contrib.layers.InstanceNormalization
def convert_InstanceNormalizationLayer(scope, operator, container):
from mock_keras2onnx.common.onnx_ops import OnnxOperatorBuilder
op = operator.raw_operator
params = op.get_weights()
assert len(op.input_shape) == 4
beta = params[0].reshape(1, 1, 1, 1).astype('float32')
gamma = params[1].reshape(1, 1, 1, 1).astype('float32')
oopb = OnnxOperatorBuilder(container, scope)
reducemean_1 = oopb.add_node('ReduceMean',
[operator.inputs[0].full_name],
operator.inputs[0].full_name + '_reduce_mean_1',
axes=[1, 2, 3], keepdims=1)
sub_1 = oopb.add_node('Sub',
[operator.inputs[0].full_name, reducemean_1],
operator.inputs[0].full_name + '_sub_1')
mul = oopb.add_node('Mul',
[sub_1, sub_1],
operator.inputs[0].full_name + '_mul')
reducemean_2 = oopb.add_node('ReduceMean',
[mul],
operator.inputs[0].full_name + '_reduce_mean_2',
axes=[1, 2, 3], keepdims=1)
sqrt = oopb.add_node('Sqrt',
[reducemean_2],
operator.inputs[0].full_name + '_sqrt')
add = oopb.add_node('Add',
[sqrt,
('_start', oopb.float, np.array([op.epsilon], dtype='float32'))],
operator.inputs[0].full_name + '_add')
div = oopb.add_node('Div',
[sub_1, add],
operator.inputs[0].full_name + '_div')
mul_scale = oopb.add_node('Mul',
[div,
('_start', oopb.float, beta)],
operator.inputs[0].full_name + '_mul_scale')
add_bias = oopb.add_node('Add',
[mul_scale,
('_start', oopb.float, gamma)],
operator.inputs[0].full_name + '_add_bias')
apply_identity(scope, add_bias, operator.outputs[0].full_name, container)
def print_mismatches(case_name, list_idx, expected_list, actual_list, rtol=1.e-3, atol=1.e-6):
diff_list = abs(expected_list - actual_list)
count_total = len(expected_list)
count_error = 0
count_current = 0
for e_, a_, d_ in zip(expected_list, actual_list, diff_list):
if d_ > atol + rtol * abs(a_):
if count_error < 10: # print the first 10 mismatches
print(
"case = " + case_name + ", result mismatch for expected = " + str(e_) +
", actual = " + str(a_) + " at location " + str(count_current), file=sys.stderr)
count_error = count_error + 1
count_current += 1
print("case = " + case_name + ", " +
str(count_error) + " mismatches out of " + str(count_total) + " for list " + str(list_idx),
file=sys.stderr)
def load_profile_json(profile_file):
print(f"loading profile output {profile_file} ...")
with open(profile_file, "r") as f:
sess_time = json.load(f)
assert isinstance(sess_time, list)
return sess_time
def parse_profile_results(sess_time, kernel_time_only=False, threshold=0):
node_time = {}
node_provider = {}
total = 0
for item in sess_time:
if item["cat"] == "Node" and "dur" in item and "args" in item and "op_name" in item["args"]:
if "provider" in item["args"]:
device = "CPU" if item["args"]["provider"] == "CPUExecutionProvider" else "CUDA"
if item["name"] not in node_provider:
node_provider[item["name"]] = device
else:
assert node_provider[item["name"]] == device
elif kernel_time_only:
continue
if item["name"] in node_time:
node_time[item["name"]] += item["dur"]
else:
node_time[item["name"]] = item["dur"]
total += item["dur"]
results = []
if (threshold > 0):
results.append(f"Threshold of Percentage > {threshold:.2f}%")
results.append(f"Duration\tPercentage\tProvider\tName")
for k, v in sorted(node_time.items(), key=lambda x: x[1], reverse=True):
provider = node_provider[k] if k in node_provider else ""
ratio = v / total
if ratio > threshold:
results.append(f"{v}\t{ratio * 100.0:5.2f}\t{provider}\t{k}")
return results
def no_loops_in_tf2(onnx_model):
return not is_tf2 or all(n.op_type != "Loop" for n in onnx_model.graph.node)
def all_recurrents_should_bidirectional(onnx_model):
return all([
helper.get_attribute_value(attr) == b'bidirectional'
for node in onnx_model.graph.node if node.op_type in ['GRU', 'LSTM', 'RNN']
for attr in node.attribute if attr.name == 'direction'
])
def run_onnx_runtime(case_name, onnx_model, data, expected, model_files, rtol=1.e-3, atol=1.e-6,
compare_perf=False, enable_profiling=False):
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
onnx.save_model(onnx_model, temp_model_file)
try:
import onnxruntime
if enable_profiling:
from onnxruntime import SessionOptions
sess_options = SessionOptions()
sess_options.enable_profiling = True
sess = onnxruntime.InferenceSession(temp_model_file, sess_options)
else:
sess = onnxruntime.InferenceSession(temp_model_file)
except ImportError:
mock_keras2onnx.common.k2o_logger().warning("Cannot import ONNXRuntime!")
return True
if isinstance(data, dict):
feed_input = data
else:
data = data if isinstance(data, list) else [data]
input_names = sess.get_inputs()
# to avoid too complicated test code, we restrict the input name in Keras test cases must be
# in alphabetical order. It's always true unless there is any trick preventing that.
feed = zip(sorted(i_.name for i_ in input_names), data)
feed_input = dict(feed)
actual = sess.run(None, feed_input)
if compare_perf:
count = 10
time_start = time.time()
for i in range(count):
sess.run(None, feed_input)
time_end = time.time()
print('avg ort time =' + str((time_end - time_start)/count))
if enable_profiling:
profile_file = sess.end_profiling()
profile_records = load_profile_json(profile_file)
lines = parse_profile_results(profile_records)
print("Results:")
print("-" * 64)
for line in lines:
print(line)
if expected is None:
return
if isinstance(expected, tuple):
expected = list(expected)
elif not isinstance(expected, list):
expected = [expected]
res = all(np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in range(len(expected)))
if res and temp_model_file not in model_files: # still keep the failed case files for the diagnosis.
model_files.append(temp_model_file)
if not res:
for n_ in range(len(expected)):
expected_list = expected[n_].flatten()
actual_list = actual[n_].flatten()
print_mismatches(case_name, n_, expected_list, actual_list, rtol, atol)
return res
def run_keras_and_ort(case_name, onnx_model, keras_model, data, expected, model_files, rtol=1.e-3, atol=1.e-6,
compare_perf=False, enable_profiling=False):
if compare_perf:
count = 10
time_start = time.time()
for i in range(count):
keras_model.predict(data)
time_end = time.time()
print('avg keras time =' + str((time_end - time_start) / count))
return run_onnx_runtime(case_name, onnx_model, data, expected, model_files,
rtol=rtol, atol=atol, compare_perf=compare_perf, enable_profiling=enable_profiling)
def run_image(model, model_files, img_path, model_name='onnx_conversion', rtol=1.e-3, atol=1.e-5, color_mode="rgb",
target_size=224, compare_perf=False):
if is_tf2:
preprocess_input = keras.applications.imagenet_utils.preprocess_input
else:
preprocess_input = keras.applications.resnet50.preprocess_input
image = keras.preprocessing.image
try:
if not isinstance(target_size, tuple):
target_size = (target_size, target_size)
if is_keras_older_than("2.2.3"):
# color_mode is not supported in old keras version
img = image.load_img(img_path, target_size=target_size)
else:
img = image.load_img(img_path, color_mode=color_mode, target_size=target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
if color_mode == "rgb":
x = preprocess_input(x)
except FileNotFoundError:
return False, 'The image data does not exist.'
msg = ''
preds = None
try:
preds = model.predict(x)
if compare_perf:
count = 10
time_start = time.time()
for i in range(count):
model.predict(x)
time_end = time.time()
print('avg keras time =' + str((time_end - time_start) / count))
except RuntimeError:
msg = 'keras prediction throws an exception for model ' + model.name + ', skip comparison.'
onnx_model = mock_keras2onnx.convert_keras(model, model.name, target_opset=get_max_opset_supported_for_test())
res = run_onnx_runtime(model_name, onnx_model, x, preds, model_files, rtol=rtol, atol=atol, compare_perf=compare_perf)
return res, msg
def is_bloburl_access(url):
try:
response = urllib.request.urlopen(url)
return response.getcode() == 200
except urllib.error.URLError:
return False
def get_max_opset_supported_by_ort():
try:
import onnxruntime as ort
ort_ver = Version(ort.__version__)
ort_ver = Version("{}.{}.0".format(ort_ver.major, ort_ver.minor)).base_version
if ort_ver in ORT_OPSET_VERSION.keys():
return ORT_OPSET_VERSION[ort_ver]
else:
print("Given onnxruntime version doesn't exist in ORT_OPSET_VERSION: {}".format(ort_ver))
return None
except ImportError:
return None
def get_max_opset_supported_for_test():
return min(get_max_opset_supported_by_ort(), get_maximum_opset_supported())
def convert_keras_for_test(model, name=None, target_opset=None, **kwargs):
if target_opset is None:
target_opset = get_max_opset_supported_by_ort()
print("Trying to run test with opset version: {}".format(target_opset))
return convert_keras(model=model, name=name, target_opset=target_opset, **kwargs)
| 13,088 | 37.497059 | 122 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/mock_keras2onnx/__init__.py |
from tf2onnx.keras2onnx_api import convert_keras
def set_converter(*args, **kwargs):
pass
| 96 | 15.166667 | 48 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/mock_keras2onnx/proto/tfcompat.py | # SPDX-License-Identifier: Apache-2.0
import os
import tensorflow as _tf
from packaging.version import Version
is_tf2 = Version(_tf.__version__.split('-')[0]) >= Version("2.0.0")
def normalize_tensor_shape(tensor_shape):
if is_tf2:
return [d for d in tensor_shape]
else:
return [d.value for d in tensor_shape]
def dump_graph_into_tensorboard(tf_graph):
# type: (_tf.Graph) -> None
_tb_log_dir = os.environ.get('TB_LOG_DIR')
if _tb_log_dir:
if is_tf2:
from tensorflow.python.ops.summary_ops_v2 import graph as write_graph
pb_visual_writer = _tf.summary.create_file_writer(_tb_log_dir)
with pb_visual_writer.as_default():
write_graph(tf_graph)
else:
from tensorflow.python.summary import summary
pb_visual_writer = summary.FileWriter(_tb_log_dir)
pb_visual_writer.add_graph(tf_graph)
if is_tf2:
tensorflow = _tf.compat.v1
def is_subclassed(layer):
"""Returns True if the object is a subclassed layer or subclassed model."""
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
else:
tensorflow = _tf
def is_subclassed(layer):
return False
| 1,292 | 27.733333 | 83 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/mock_keras2onnx/proto/__init__.py | # SPDX-License-Identifier: Apache-2.0
import os
import tensorflow
from packaging.version import Version
# Rather than using ONNX protobuf definition throughout our codebase, we import ONNX protobuf definition here so that
# we can conduct quick fixes by overwriting ONNX functions without changing any lines elsewhere.
from onnx import onnx_pb as onnx_proto
from onnx import helper
from onnx import save_model as save_model
def _check_onnx_version():
import pkg_resources
min_required_version = pkg_resources.parse_version('1.0.1')
current_version = pkg_resources.get_distribution('onnx').parsed_version
assert current_version >= min_required_version, 'Keras2ONNX requires ONNX version 1.0.1 or a newer one'
_check_onnx_version()
def is_tensorflow_older_than(version_str):
return Version(tensorflow.__version__.split('-')[0]) < Version(version_str)
def is_tensorflow_later_than(version_str):
return Version(tensorflow.__version__.split('-')[0]) > Version(version_str)
def python_keras_is_deprecated():
return is_tensorflow_later_than("2.5.0")
is_tf_keras = False
str_tk_keras = os.environ.get('TF_KERAS', None)
if str_tk_keras is None:
# With tensorflow 2.x, be default we loaded tf.keras as the framework, instead of Keras
is_tf_keras = not is_tensorflow_older_than('2.0.0')
else:
is_tf_keras = str_tk_keras != '0'
if is_tf_keras:
if python_keras_is_deprecated():
from tensorflow import keras
else:
from tensorflow.python import keras
else:
try:
import keras
if keras.Model == tensorflow.keras.Model: # since keras 2.4, keras and tf.keras is unified.
is_tf_keras = True
except ImportError:
is_tf_keras = True
if python_keras_is_deprecated():
from tensorflow import keras
else:
from tensorflow.python import keras
def is_keras_older_than(version_str):
return Version(keras.__version__.split('-')[0]) < Version(version_str)
def is_keras_later_than(version_str):
return Version(keras.__version__.split('-')[0]) > Version(version_str)
| 2,109 | 29.57971 | 117 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_unit_tests/mock_keras2onnx/ke2onnx/batch_norm.py | convert_keras_batch_normalization = None | 40 | 40 | 40 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/tfhub/tfhub_mobile_food_segmenter_V1.py | # SPDX-License-Identifier: Apache-2.0
import os
import numpy
from _tools import generate_random_images, benchmark
import tf2onnx
import onnxruntime as ort
def main(opset=13):
url = "https://tfhub.dev/google/seefood/segmenter/mobile_food_segmenter_V1/1?tf-hub-format=compressed"
dest = "tf-mobile_food_segmenter_V1"
name = "mobile_food_segmenter_V1"
onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))
imgs = generate_random_images(shape=(1, 513, 513, 3), scale=1.)
if True:
benchmark(url, dest, onnx_name, opset, imgs, tag='')
# The conversion works but tensorflow fails with
# TypeError: 'AutoTrackable' object is not callable
if True:
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
m = hub.KerasLayer('https://tfhub.dev/google/seefood/segmenter/mobile_food_segmenter_V1/1')
inputs = {
"X": tf.keras.Input(shape=[1, 513, 513, 3], dtype="float32", name="X"),
}
outputs = m(inputs)["default"]
# TypeError: pruned(images) missing required arguments: images
print(outputs)
model = tf.keras.Model(inputs, outputs)
if not os.path.exists(dest):
os.makedirs(dest)
# This model is a large model.
tf2onnx.convert.from_keras(model, opset=13, output_path=onnx_name)
if __name__ == "__main__":
main()
| 1,400 | 30.840909 | 106 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/tfhub/tfhub_lambert_en_uncased_L-24_H-1024_A-16.py | # SPDX-License-Identifier: Apache-2.0
import os
from collections import OrderedDict
import numpy
import numpy.random as rnd
from _tools import generate_text_inputs, benchmark
def main(opset=13):
if False:
import tensorflow as tf
import tensorflow_text
import tensorflow_hub as hub
sentences = tf.constant(["Hi I'm some text"])
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
encoder = hub.KerasLayer(
"https://tfhub.dev/tensorflow/lambert_en_uncased_L-24_H-1024_A-16/2", trainable=True)
preprocessor = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
encoder_inputs = preprocessor(text_input)
embedded_inputs = {k: v.numpy() for k, v in preprocessor(sentences).items()}
for k, v in embedded_inputs.items():
print(k, v.dtype, v.shape)
url = "https://tfhub.dev/tensorflow/lambert_en_uncased_L-24_H-1024_A-16/2?tf-hub-format=compressed"
dest = "tf-lambert_en_uncased_L-24_H-1024_A-16"
name = "lambert_en_uncased_L-24_H-1024_A-16"
onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))
inputs = [OrderedDict([
('input_word_ids', numpy.array([rnd.randint(0, 1000) for i in range(0, 128)], dtype=numpy.int32).reshape((1, -1))),
('input_mask', numpy.array([rnd.randint(0, 1) for i in range(0, 128)], dtype=numpy.int32).reshape((1, -1))),
('input_type_ids', numpy.array([i//5 for i in range(0, 128)], dtype=numpy.int32).reshape((1, -1)))
]) for i in range(0, 10)]
benchmark(url, dest, onnx_name, opset, inputs, output_name="pooled_output")
if __name__ == "__main__":
main()
| 1,706 | 39.642857 | 123 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/tfhub/tfhub_resnet_v1_101_keras.py | # SPDX-License-Identifier: Apache-2.0
import os
import numpy
import onnxruntime as ort
import tensorflow as tf
import tensorflow_hub as hub
import tf2onnx
from _tools import generate_random_images, check_discrepencies
imgs = generate_random_images(shape=(1, 224, 224, 3), scale=1.)
model = tf.keras.Sequential([
hub.KerasLayer("https://tfhub.dev/google/imagenet/resnet_v1_101/feature_vector/5",
trainable=False)])
model.build([None, 224, 224, 3])
expected_output = model(imgs[0])
dest = "tf-resnet_v1_101"
if not os.path.exists(dest):
os.makedirs(dest)
dest_name = os.path.join(dest, "resnet_v1_101-13-keras.onnx")
if not os.path.exists(dest_name):
tf2onnx.convert.from_keras(model, opset=13, output_path=dest_name)
sess = ort.InferenceSession(dest_name)
print('inputs', [_.name for _ in sess.get_inputs()])
ort_output = sess.run(None, {"keras_layer_input": imgs[0]})
print("Actual")
print(ort_output)
print("Expected")
print(expected_output)
diff = expected_output.numpy() - ort_output[0]
max_diff = numpy.abs(diff).max()
rel_diff = (numpy.abs(diff) / (expected_output.numpy() + 1e-5)).max()
print(max_diff, rel_diff, [ort_output[0].min(), ort_output[0].max()])
| 1,201 | 29.820513 | 86 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/tfhub/tfhub_albert_en_xlarge_keras.py | # Adapted the sample code on https://tfhub.dev/tensorflow/albert_en_xlarge/3
import tensorflow_text as text
import tensorflow as tf
import tensorflow_hub as hub
# Using hub.load instead of KerasLayer lets us easily intercept the results of the
# preprocessor before passing it to the encoder
preprocessor = hub.load("http://tfhub.dev/tensorflow/albert_en_preprocess/3")
encoder = hub.load("https://tfhub.dev/tensorflow/albert_en_xlarge/3")
sentences = tf.constant(["Hi I'm some text"])
embedded_inputs = {k: v.numpy() for k, v in preprocessor(sentences).items()}
print("Inputs")
print(embedded_inputs)
expected_output = encoder(embedded_inputs)["pooled_output"].numpy()
# Now make an actual keras layer for the part we want to convert
encoder = hub.KerasLayer(
"https://tfhub.dev/tensorflow/albert_en_xlarge/3",
trainable=True)
# To convert it to a model, we need the input shapes/types. These can be
# determined from the types/shapes/names of embedded_inputs. Remove the batch dim from the shapes.
encoder_inputs = {
"input_word_ids": tf.keras.Input(shape=[None], dtype="int32", name="input_word_ids"),
"input_mask": tf.keras.Input(shape=[None], dtype="int32", name="input_mask"),
"input_type_ids": tf.keras.Input(shape=[None], dtype="int32", name="input_type_ids"),
}
encoder_outputs = encoder(encoder_inputs)["pooled_output"]
encoding_model = tf.keras.Model(encoder_inputs, encoder_outputs)
import tf2onnx
import onnxruntime as ort
import zipfile
import os
print("Converting")
dest = "tf-albert-en-xlarge"
if not os.path.exists(dest):
os.makedirs(dest)
dest_name = os.path.join(dest, "albert_en_xlarge.zip")
# This model is a large model.
tf2onnx.convert.from_keras(encoding_model, opset=13, large_model=True, output_path=dest_name)
# To run the model in ORT we need to unzip it.
with zipfile.ZipFile(dest_name, 'r') as z:
z.extractall(os.path.join(dest, "albert_en_xlarge"))
sess = ort.InferenceSession(os.path.join(dest, "albert_en_xlarge", "__MODEL_PROTO.onnx"))
ort_output = sess.run(None, embedded_inputs)
print("Actual")
print(ort_output[0])
print("Expected")
print(expected_output)
| 2,127 | 37.690909 | 98 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/tfhub/tfhub_mobilebert_en_uncased.py | # SPDX-License-Identifier: Apache-2.0
import os
from collections import OrderedDict
import numpy
import numpy.random as rnd
from _tools import generate_text_inputs, benchmark
def main(opset=13):
if False:
import tensorflow as tf
import tensorflow_text
import tensorflow_hub as hub
sentences = tf.constant(["Hi I'm some text"])
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
preprocessor = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
encoder_inputs = preprocessor(text_input)
embedded_inputs = {k: v.numpy() for k, v in preprocessor(sentences).items()}
for k, v in embedded_inputs.items():
print(k, v.dtype, v.shape)
url = "https://tfhub.dev/tensorflow/mobilebert_en_uncased_L-24_H-128_B-512_A-4_F-4_OPT/1?tf-hub-format=compressed"
dest = "tf-mobilebert_en_uncased_L-24_H-128_B-512_A-4_F-4_OPT"
name = "mobilebert_en_uncased_L-24_H-128_B-512_A-4_F-4_OPT"
onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))
inputs = generate_text_inputs()
benchmark(url, dest, onnx_name, opset, inputs,
output_name="attention_scores") #, ort_name="mobile_bert_encoder_50")
if __name__ == "__main__":
main()
| 1,308 | 35.361111 | 118 | py |
sherpa | sherpa-master/generate_readme.py | import os
welcome_text = """SHERPA: A Python Hyperparameter Optimization Library
====================================================
.. figure:: https://docs.google.com/drawings/d/e/2PACX-1vRaTP5d5WqT4KY4V57niI4wFDkz0098zHTRzZ9n7SzzFtdN5akBd75HchBnhYI-GPv_AYH1zYa0O2_0/pub?w=522&h=150
:figwidth: 100%
:align: right
:height: 150px
:alt: SHERPA logo
SHERPA is a Python library for hyperparameter tuning of machine learning models. It provides:
* hyperparameter optimization for machine learning researchers
* a choice of hyperparameter optimization algorithms
* parallel computation that can be fitted to the user's needs
* a live dashboard for the exploratory analysis of results.
The documentation at http://parameter-sherpa.readthedocs.io/ provides installation instructions for parallel hyperparameter
optimizations and using the different optimization algorithms. See below for
a short example on what working with SHERPA looks like.
"""
filenames = ['gettingstarted/kerastosherpa.rst', 'gettingstarted/installation.rst']
with open('README.rst', 'w') as outfile:
outfile.write(welcome_text)
for fname in filenames:
with open(os.path.join('docs', fname)) as infile:
data = infile.read().splitlines(True)
outfile.writelines(data[1:])
| 1,298 | 36.114286 | 151 | py |
sherpa | sherpa-master/sherpa/core.py | """
SHERPA is a Python library for hyperparameter tuning of machine learning models.
Copyright (C) 2018 Lars Hertel, Peter Sadowski, and Julian Collado.
This file is part of SHERPA.
SHERPA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SHERPA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SHERPA. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
import os
import sys
import numpy
import pandas
import collections
import time
import logging
import socket
import multiprocessing
import warnings
import contextlib
import shlex
from .database import _Database
from .schedulers import _JobStatus
import datetime
try:
import cPickle as pickle
except ImportError: # python 3.x
import pickle
logger = logging.getLogger(__name__)
logging.getLogger('werkzeug').setLevel(level=logging.WARNING)
rng = numpy.random.RandomState(None)
class Trial(object):
"""
Represents one parameter-configuration here referred to as one trial.
Args:
id (int): the Trial ID.
parameters (dict): parameter-name, parameter-value pairs.
"""
def __init__(self, id, parameters):
self.id = id
self.parameters = parameters
class TrialStatus(object):
INTERMEDIATE = 'INTERMEDIATE'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
STOPPED = 'STOPPED'
class Study(object):
"""
The core of an optimization.
Includes functionality to get new suggested trials and add observations
for those. Used internally but can also be used directly by the user.
Args:
parameters (list[sherpa.core.Parameter]): a list of parameter ranges.
algorithm (sherpa.algorithms.Algorithm): the optimization algorithm.
lower_is_better (bool): whether to minimize or maximize the objective.
stopping_rule (sherpa.algorithms.StoppingRule): algorithm to stop badly
performing trials.
dashboard_port (int): the port for the dashboard web-server, if ``None``
the first free port in the range `8880` to `9999` is found and used.
disable_dashboard (bool): option to not run the dashboard.
output_dir (str): directory path for CSV results.
random_seed (int): seed to use for NumPy random number generators
throughout.
"""
def __init__(self,
parameters,
algorithm,
lower_is_better,
stopping_rule=None,
dashboard_port=None,
disable_dashboard=False,
output_dir=None):
self.parameters = parameters
self.algorithm = algorithm
self.stopping_rule = stopping_rule
self.lower_is_better = lower_is_better
self.results = pandas.DataFrame()
self.num_trials = 0
self._trial_queue = collections.deque()
self.output_dir = output_dir
self._ids_to_stop = set()
if not disable_dashboard:
if sys.platform in ['cygwin', 'win32']:
raise EnvironmentError('Dashboard not supported on Windows. Disable the dashboard and save the '
'finalized study instead.')
self._mgr = multiprocessing.Manager()
self._results_channel = self._mgr.Namespace()
self._results_channel.df = self.results
self._stopping_channel = multiprocessing.Queue()
dashboard_port = dashboard_port or _port_finder(8880, 9999)
self.dashboard_process = self._run_web_server(dashboard_port)
else:
self.dashboard_process = None
def add_observation(self, trial, objective, iteration=1, context={}):
"""
Add a single observation of the objective value for a given trial.
Args:
trial (sherpa.core.Trial): trial for which an observation is to be
added.
iteration (int): iteration number e.g. epoch.
objective (float): objective value.
context (dict): other metrics or values to record.
"""
assert isinstance(trial, Trial), "Trial must be sherpa.core.Trial"
if not self.results.empty and\
((self.results['Trial-ID'] == trial.id)
& (self.results['Iteration'] == iteration)).any():
raise ValueError("Observation for Trial-ID {} at Iteration {} "
"already exists.".format(trial.id, iteration))
if not all(p.name in trial.parameters for p in self.parameters):
raise ValueError("The trial is missing parameter entries. It "
"may not be from this study.")
row = [
('Trial-ID', trial.id),
('Status', 'INTERMEDIATE'),
('Iteration', iteration)
]
# Add parameters in sorted order
p = trial.parameters
row += sorted(p.items(), key=lambda t: t[0])
# Add objective and sorted context
row += [('Objective', objective)]
row += sorted(context.items(), key=lambda t: t[0])
# Use ordered dict to maintain order
row = collections.OrderedDict([(key, [value]) for key, value in row])
self.results = self.results.append(pandas.DataFrame.from_dict(row),
ignore_index=True)
if self.dashboard_process:
self._results_channel.df = self.results
def finalize(self, trial, status='COMPLETED'):
"""
Once a trial will not add any more observations it
must be finalized with this function.
Args:
trial (sherpa.core.Trial): trial that is completed.
status (str): one of 'COMPLETED', 'FAILED', 'STOPPED'.
"""
assert isinstance(trial, Trial), "Trial must be sherpa.core.Trial"
assert status in ['COMPLETED', 'FAILED', 'STOPPED']
try:
rows = self.results.loc[self.results['Trial-ID'] == trial.id]
if len(rows) == 0:
raise KeyError
except KeyError:
raise ValueError("Trial {} does not exist or did not "
"submit metrics.".format(trial.id))
# Find best row as minimum or maximum objective
best_idx = (rows['Objective'].idxmin() if self.lower_is_better
else rows['Objective'].idxmax())
try:
best_row = rows.loc[best_idx].copy()
except TypeError:
warnings.warn("Could not finalize trial {}. Only NaNs "
"encountered.".format(trial.id), RuntimeWarning)
return
# Set status and append
best_row['Status'] = status
best_row['Iteration'] = rows['Iteration'].max()
self.results = self.results.append(best_row, ignore_index=True)
if self.dashboard_process:
self._results_channel.df = self.results
def get_suggestion(self):
"""
Obtain a new suggested trial.
This function wraps the algorithm that was passed to the
study.
Returns:
dict: a parameter suggestion.
"""
if len(self._trial_queue) != 0:
return self._trial_queue.popleft()
p = self.algorithm.get_suggestion(self.parameters, self.results,
self.lower_is_better)
if isinstance(p, dict):
self.num_trials += 1
t = Trial(id=self.num_trials, parameters=p)
return t
else:
return p
def should_trial_stop(self, trial):
"""
Determines whether given trial should stop.
This function wraps the stopping rule provided to the
study.
Args:
trial (sherpa.core.Trial): trial to be evaluated.
Returns:
bool: decision.
"""
assert isinstance(trial, Trial), "Trial must be sherpa.core.Trial"
if self.dashboard_process:
while not self._stopping_channel.empty():
self._ids_to_stop.add(self._stopping_channel.get())
if trial.id in self._ids_to_stop:
return True
if self.stopping_rule:
return self.stopping_rule.should_trial_stop(trial, self.results,
self.lower_is_better)
else:
return False
def add_trial(self, trial):
"""
Adds a trial into queue for next suggestion.
Trials added via this method forego the suggestions
made by the algorithm and are returned by the
`get_suggestion` method on a first in first out
basis.
Args:
trial (sherpa.core.Trial): the trial to be enqueued.
"""
self._trial_queue.append(trial)
def get_best_result(self):
"""
Retrieve the best result so far.
Returns:
pandas.DataFrame: row of the best result.
"""
if self.results.empty:
return {}
return self.algorithm.get_best_result(parameters=self.parameters,
results=self.results,
lower_is_better=
self.lower_is_better)
def _run_web_server(self, port):
"""
Runs the SHERPA dashboard.
Args:
port (int): Port for web app.
Returns:
proc (multiprocessing.Process): the process that runs the web app.
results_channel (multiprocessing.Queue): queue to put results in
stopping_channel (multiprocessing.Queue): queue to get models to stop from.
"""
from .app.app import app
param_types = {}
for p in self.parameters:
if isinstance(p, Continuous) or (isinstance(p, Choice) and type(p.range[0])==float):
param_types[p.name] = 'float'
elif isinstance(p, Discrete) or (isinstance(p, Choice) and type(p.range[0])==int):
param_types[p.name] = 'int'
else:
param_types[p.name] = 'string'
app.parameter_types = param_types
app.set_results_channel(self._results_channel)
app.set_stopping_channel(self._stopping_channel)
proc = multiprocessing.Process(target=app.run,
kwargs={'port': port,
'debug': True,
'use_reloader': False,
'host': '0.0.0.0',
'threaded': True})
msg = "\n" + "-"*55 + "\n"
msg += "SHERPA Dashboard running. Access via\nhttp://{}:{} or " \
"\nhttp://{}:{} if on a cluster, or " \
"\nhttp://{}:{} if running locally.".format(
socket.gethostbyname(socket.gethostname()), port,
socket.gethostname(), port,
"localhost", port)
msg += "\n" + "-"*55
logger.info(msg)
proc.daemon = True
proc.start()
return proc
def save(self, output_dir=None):
"""
Stores results to CSV and attributes to config file.
Args:
output_dir (str): directory to store CSV to, only needed if Study
output_dir is not defined.
"""
if not output_dir:
assert self.output_dir, "If no output-directory is specified, " \
"a directory needs to be passed as argument"
cfg = {'parameters': self.parameters,
'lower_is_better': self.lower_is_better,
'num_trials': self.num_trials}
d = self.output_dir or output_dir
with open(os.path.join(d, 'config.pkl'), 'wb') as f:
pickle.dump(cfg, f)
self.results.to_csv(os.path.join(self.output_dir or output_dir,
'results.csv'), index=False)
@staticmethod
def load_dashboard(path):
"""
Loads a study from an output dir without the algorithm.
Args:
path (str): the path to the output dir.
Returns:
sherpa.core.Study: the study running the dashboard, note that
currently this study cannot be used to continue the optimization.
"""
with open(os.path.join(path, 'config.pkl'), 'rb') as f:
cfg = pickle.load(f)
s = Study(parameters=cfg['parameters'],
lower_is_better=cfg['lower_is_better'],
algorithm=None, output_dir=path)
results_path = os.path.join(path, 'results.csv')
s.results = pandas.read_csv(results_path)
s.num_trials = cfg['num_trials']
s._results_channel.df = s.results
return s
def __iter__(self):
"""
Allow to iterate over a study.
"""
return self
def __next__(self):
"""
Allows to write `for trial in study:`.
"""
t = self.get_suggestion()
if isinstance(t, Trial):
return t
else:
raise StopIteration
def next(self):
return self.__next__()
def keras_callback(self, trial, objective_name, context_names=[]):
"""
Keras Callbacks to add observations to study
Args:
trial (sherpa.core.Trial): trial to send metrics for.
objective_name (str): the name of the objective e.g. ``loss``,
``val_loss``, or any of the submitted metrics.
context_names (list[str]): names of all other metrics to be
monitored.
"""
import keras.callbacks
send_call = lambda epoch, logs: self.add_observation(trial=trial,
iteration=epoch,
objective=logs[objective_name],
context={n: logs[n] for n in context_names})
return keras.callbacks.LambdaCallback(on_epoch_end=send_call)
class _Runner(object):
"""
Encapsulates all functionality needed to run a Study in parallel.
Responsibilities:
* Get rows from database and check if any new observations need to be added
to ``Study``.
* Update active trials, finalize any completed/stopped/failed trials.
* Check what trials should be stopped and call scheduler ``kill_job``
method.
* Check if new trials need to be submitted, get parameters and submit as a
job.
Args:
study (sherpa.core.Study): the study that is run.
scheduler (sherpa.schedulers.Scheduler): a scheduler object.
database (sherpa.database._Database): the database.
max_concurrent (int): how many trials to run in parallel.
command (list[str]): components of the command that runs a trial script
e.g. ["python", "train_nn.py"].
resubmit_failed_trials (bool): whether a failed trial should be
resubmitted.
"""
def __init__(self, study, scheduler, database, max_concurrent,
command, resubmit_failed_trials=False):
self.max_concurrent = max_concurrent
self.command = command
self.resubmit_failed_trials = resubmit_failed_trials
self.scheduler = scheduler
self.database = database
self.study = study
self._done = False # whether optimization is done.
self._active_trials = [] # ids of trials that are active.
self._queued_for_stopping = set() # trials that need to be stopped.
self._all_trials = {} # maps trial id to Trial object, process ID.
self._trial_status = {_JobStatus.finished: 'COMPLETED',
_JobStatus.killed: 'STOPPED',
_JobStatus.failed: 'FAILED',
_JobStatus.other: 'FAILED'}
def update_results(self):
"""
Get rows from database and check if anything new needs to be added to
the results-table.
"""
results = self.database.get_new_results()
if results != [] and self._all_trials == {}:
logger.warning(results)
raise ValueError("Found unexpected results. Check the following\n"
"(1)\toutput_dir is empty\n"
"(2)\tno other database is running on this port.")
for r in results:
try:
# Check if trial has already been collected.
new_trial = (r.get('trial_id') not in
set(self.study.results['Trial-ID']))
except KeyError:
new_trial = True
if not new_trial:
trial_idxs = self.study.results['Trial-ID'] == r.get('trial_id')
trial_rows = self.study.results[trial_idxs]
new_observation = (r.get('iteration') not in
set(trial_rows['Iteration']))
else:
new_observation = True
if new_trial or new_observation:
# Retrieve the Trial object
tid = r.get('trial_id')
tdict = self._all_trials[tid]
t = tdict.get('trial')
self.study.add_observation(trial=t,
iteration=r.get('iteration'),
objective=r.get('objective'),
context=r.get('context'))
def update_active_trials(self):
"""
Update active trials, finalize any completed/stopped/failed trials.
"""
for i in reversed(range(len(self._active_trials))):
tid = self._active_trials[i]
logger.debug('Updating active trials.')
status = self.scheduler.get_status(self._all_trials[tid].get('job_id'))
if status in [_JobStatus.finished, _JobStatus.failed,
_JobStatus.killed, _JobStatus.other]:
if tid in self._queued_for_stopping:
self._queued_for_stopping.remove(tid)
try:
self.study.finalize(trial=self._all_trials[tid].get('trial'),
status=self._trial_status[status])
self.study.save()
except ValueError as e:
warn_msg = str(e)
warn_msg += ("\nRelevant results not found in database."
" Check whether:\n"
"(1)\tTrial is submitting metrics via e.g. sherpa.Client.send_metrics()\n"
"(2)\tTrial crashed\n"
" Trial script output is in: ")
warn_msg += os.path.join(self.study.output_dir, 'jobs', 'trial_{}.out'.format(tid))
warnings.warn(warn_msg, RuntimeWarning)
if self.resubmit_failed_trials:
logger.info("Resubmitting Trial {}.".format(tid))
self.study.add_trial(self._all_trials[tid].get('trial'))
self._active_trials.pop(i)
def stop_bad_performers(self):
"""
Check whether any of the running trials should stop and add them for
stopping if necessary.
"""
for tid in self._active_trials:
if tid in self._queued_for_stopping:
continue
if self.study.should_trial_stop(self._all_trials[tid].get('trial')):
logger.info("Stopping Trial {}".format(tid))
self.scheduler.kill_job(self._all_trials[tid].get('job_id'))
self._queued_for_stopping.add(tid)
def submit_new_trials(self):
"""
Get new trial and submit it to the job scheduler.
"""
while len(self._active_trials) < self.max_concurrent:
next_trial = self.study.get_suggestion()
# Check if algorithm is done.
if next_trial is None or next_trial == AlgorithmState.DONE:
logger.info("Optimization Algorithm finished.")
self._done = True
break
if next_trial == AlgorithmState.WAIT:
break
submit_msg = "\n" + "-"*55 + "\n" + "Submitting Trial {}:\n".format(next_trial.id)
for pname, pval in next_trial.parameters.items():
submit_msg += "\t{0:15}={1:>31}\n".format(str(pname), str(pval))
submit_msg += "-"*55 + "\n"
logger.info(submit_msg)
self.database.enqueue_trial(next_trial)
pid = self.scheduler.submit_job(command=self.command,
env={'SHERPA_TRIAL_ID': str(next_trial.id),
'SHERPA_DB_HOST': socket.gethostname(),
'SHERPA_DB_PORT': str(self.database.port),
'SHERPA_OUTPUT_DIR': self.study.output_dir},
job_name='trial_' + str(next_trial.id))
self._all_trials[next_trial.id] = {'trial': next_trial,
'job_id': pid}
self._active_trials.append(next_trial.id)
def run_loop(self):
"""
Run the optimization loop.
"""
while not self._done or self._active_trials:
self.update_results()
self.update_active_trials()
self.stop_bad_performers()
self.submit_new_trials()
time.sleep(5)
def optimize(parameters, algorithm, lower_is_better,
scheduler,
command=None,
filename=None,
output_dir='./output_' + str(datetime.datetime.now().strftime("%Y%m%d-%H%M%S")),
max_concurrent=1,
db_port=None, stopping_rule=None,
dashboard_port=None, resubmit_failed_trials=False, verbose=1,
load=False, mongodb_args={}, disable_dashboard=False):
"""
Runs a Study with a scheduler and automatically runs a database in the
background.
Args:
algorithm (sherpa.algorithms.Algorithm): takes results table and returns
parameter set.
parameters (list[sherpa.core.Parameter]): parameters being optimized.
lower_is_better (bool): whether lower objective values are better.
command (str): the command to run for the trial script.
filename (str): the filename of the script to run. Will be run as
"python <filename>".
output_dir (str): where scheduler and database files will be stored.
scheduler (sherpa.schedulers.Scheduler): a scheduler.
max_concurrent (int): the number of trials that will be evaluated in
parallel.
db_port (int): port to run the database on.
stopping_rule (sherpa.algorithms.StoppingRule): rule for stopping
trials prematurely.
dashboard_port (int): port to run the dashboard web-server on.
resubmit_failed_trials (bool): whether to resubmit a trial if it failed.
verbose (int, default=1): whether to print submit messages (0=no, 1=yes).
load (bool): option to load study, currently not fully implemented.
mongodb_args (dict[str, any]): arguments to MongoDB beyond port, dir,
and log-path. Keys are the argument name without "--".
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not scheduler.output_dir:
scheduler.output_dir = output_dir
if verbose == 0:
logger.setLevel(level=logging.INFO)
logging.getLogger('dblogger').setLevel(level=logging.WARNING)
study = Study(parameters=parameters,
algorithm=algorithm,
lower_is_better=lower_is_better,
stopping_rule=stopping_rule,
dashboard_port=dashboard_port,
output_dir=output_dir,
disable_dashboard=disable_dashboard)
if command:
runner_command = shlex.split(command)
elif filename:
runner_command = ['python', filename]
else:
raise ValueError("Need to provide either command or filename.")
if load:
study.load()
if not db_port:
db_port = _port_finder(27001, 27050)
with _Database(db_dir=output_dir, port=db_port,
reinstantiated=load, mongodb_args=mongodb_args) as db:
runner = _Runner(study=study,
scheduler=scheduler,
database=db,
max_concurrent=max_concurrent,
command=runner_command,
resubmit_failed_trials=resubmit_failed_trials)
runner.run_loop()
return study.get_best_result()
def run_dashboard(path):
"""
Run the dashboard from a previously run optimization.
Args:
path (str): the output dir of the previous optimization.
"""
s = Study.load_dashboard(path)
def _port_finder(start, end):
"""
Helper function to find free port in range.
Args:
start (int): start point of port range.
end (int): end point of port range.
"""
def check_socket(host, port):
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex((host, port)) == 0:
return False
else:
return True
try:
hostname = socket.gethostname()
for port in range(start, end):
if check_socket(hostname, port):
return port
except socket.gaierror:
raise BaseException('Hostname could not be resolved. Exiting')
except socket.error:
raise BaseException("Couldn't connect to server")
class Parameter(object):
"""
Defines a hyperparameter with a name, type and associated range.
Args:
name (str): the parameter name.
range (list): either ``[low, high]`` or ``[value1, value2, value3]``.
scale (str): `linear` or `log`, defines sampling from linear or
log-scale. Not defined for all parameter types.
"""
def __init__(self, name, range):
assert isinstance(name, str), "Parameter-Name needs to be a string."
assert isinstance(range, list), "Parameter-Range needs to be a list."
self.name = name
self.range = range
@staticmethod
def from_dict(config):
"""
Returns a parameter object according to the given dictionary config.
Args:
config (dict): parameter config.
Example:
::
{'name': '<name>',
'type': '<continuous/discrete/choice>',
'range': [<value1>, <value2>, ... ],
'scale': <'log' to sample continuous/discrete from log-scale>}
Returns:
sherpa.core.Parameter: the parameter range object.
"""
if config.get('type') == 'continuous':
return Continuous(name=config.get('name'),
range=config.get('range'),
scale=config.get('scale', 'linear'))
elif config.get('type') == 'discrete':
return Discrete(name=config.get('name'),
range=config.get('range'),
scale=config.get('scale', 'linear'))
elif config.get('type') == 'choice':
return Choice(name=config.get('name'),
range=config.get('range'))
else:
raise ValueError("Got unexpected value for type: {}".format(
config.get('type')))
@staticmethod
def grid(parameter_grid):
"""
Creates a list of parameters given a parameter grid.
Args:
parameter_grid (dict): Dictionary mapping hyperparameter names
lists of possible values.
Example:
::
{'parameter_a': [aValue1, aValue2, ...],
'parameter_b': [bValue1, bValue2, ...],
...}
Returns:
list[sherpa.core.Parameter]: list of parameter ranges for SHERPA.
"""
plist = []
for pname, prange in parameter_grid.items():
p = Parameter.from_dict({'name': pname,
'type': 'choice',
'range': prange})
plist.append(p)
return plist
class Continuous(Parameter):
"""
Continuous parameter class.
"""
def __init__(self, name, range, scale='linear'):
super(Continuous, self).__init__(name, range)
self.scale = scale
self.type = float
if scale == 'log':
assert all(r > 0. for r in range), "Range parameters must be " \
"positive for log scale."
def sample(self):
try:
if self.scale == 'log':
return 10**rng.uniform(low=numpy.log10(self.range[0]),
high=numpy.log10(self.range[1]))
else:
return rng.uniform(low=self.range[0], high=self.range[1])
except ValueError as e:
raise ValueError("{} causes error {}".format(self.name, e))
class Discrete(Parameter):
"""
Discrete parameter class.
"""
def __init__(self, name, range, scale='linear'):
super(Discrete, self).__init__(name, range)
self.scale = scale
self.type = int
if scale == 'log':
assert all(r > 0 for r in range), "Range parameters must be " \
"positive for log scale."
def sample(self):
try:
if self.scale == 'log':
return int(10**rng.uniform(low=numpy.log10(self.range[0]),
high=numpy.log10(self.range[1])))
else:
return rng.randint(low=self.range[0], high=self.range[1])
except ValueError as e:
raise ValueError("{} causes error {}".format(self.name, e))
class Choice(Parameter):
"""
Choice parameter class.
"""
def __init__(self, name, range):
super(Choice, self).__init__(name, range)
self.type = type(self.range[0])
def sample(self):
i = rng.randint(low=0, high=len(self.range))
return self.range[i]
class Ordinal(Parameter):
"""
Ordinal parameter class. Categorical, ordered variable.
"""
def __init__(self, name, range):
super(Ordinal, self).__init__(name, range)
self.type = type(self.range[0])
def sample(self):
i = rng.randint(low=0, high=len(self.range))
return self.range[i]
class AlgorithmState(object):
"""
Used internally to signal the sherpa._Runner class when to wait or when
algorithm is done.
"""
DONE = 'DONE'
WAIT = 'WAIT'
| 31,945 | 36.060325 | 112 | py |
sherpa | sherpa-master/sherpa/database.py | """
SHERPA is a Python library for hyperparameter tuning of machine learning models.
Copyright (C) 2018 Lars Hertel, Peter Sadowski, and Julian Collado.
This file is part of SHERPA.
SHERPA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SHERPA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SHERPA. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import numpy
import pymongo
from pymongo import MongoClient
import subprocess
import time
import os
import socket
import warnings
try:
from subprocess import DEVNULL # python 3
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
import sherpa
dblogger = logging.getLogger(__name__)
class _Database(object):
"""
Manages a Mongo-DB for storing metrics and delivering parameters to trials.
The Mongo-DB contains one database that serves as a queue of future trials
and one to store results of active and finished trials.
Attributes:
dbpath (str): the path where Mongo-DB stores its files.
port (int): the port on which the Mongo-DB should run.
reinstantiated (bool): whether an instance of the MongoDB is being loaded.
mongodb_args (dict): keyword arguments to MongoDB
"""
def __init__(self, db_dir, port=27010, reinstantiated=False,
mongodb_args={}):
self.client = MongoClient(port=port)
self.db = self.client.sherpa
self.collected_results = set()
self.mongo_process = None
self.dir = db_dir
self.port = port
self.reinstantiated = reinstantiated
self.mongodb_args = mongodb_args
def close(self):
print('Closing MongoDB!')
self.mongo_process.terminate()
def start(self):
"""
Runs the DB in a sub-process.
"""
args = {"--" + k: v for k, v in self.mongodb_args.items()}
if "--dbpath" in args:
warnings.warn("Writing MongoDB to custom path {} instead of "
"output dir {}".format(args["--dbpath"], self.dir),
UserWarning)
else:
args["--dbpath"] = self.dir
if "--logpath" in args:
warnings.warn("Writing MongoDB logs to custom path {} instead of "
"output dir {}".format(
args["--logpath"], os.path.join(self.dir, "log.txt")),
UserWarning)
else:
args["--logpath"] = os.path.join(self.dir, "log.txt")
if "--port" in args:
warnings.warn("Starting MongoDB on port {} instead of "
"port {}. Set port via the db_port argument in "
"sherpa.optimize".format(args["--port"], self.port),
UserWarning)
else:
args["--port"] = str(self.port)
dblogger.debug("Starting MongoDB...\nDIR:\t{}\nADDRESS:\t{}:{}".format(
self.dir, socket.gethostname(), self.port))
cmd = ['mongod']
cmd += [str(item) for keyvalue in args.items() for item in keyvalue if item is not '']
dblogger.debug("Starting MongoDB using command:{}".format(str(cmd)))
try:
self.mongo_process = subprocess.Popen(cmd)
except FileNotFoundError as e:
raise FileNotFoundError(str(e) + "\nCheck that MongoDB is installed and in PATH.")
time.sleep(1)
self.check_db_status()
if self.reinstantiated:
self.get_new_results()
def check_db_status(self):
"""
Checks whether database is still running.
"""
status = self.mongo_process.poll()
if status:
raise EnvironmentError("Database exited with code {}".format(status))
def get_new_results(self):
"""
Checks database for new results.
Returns:
(list[dict]) where each dict is one row from the DB.
"""
self.check_db_status()
new_results = []
for entry in self.db.results.find():
result = entry
mongo_id = result.pop('_id')
if mongo_id not in self.collected_results:
new_results.append(result)
self.collected_results.add(mongo_id)
return new_results
def enqueue_trial(self, trial):
"""
Puts a new trial in the queue for trial scripts to get.
"""
self.check_db_status()
trial = {'trial_id': trial.id,
'parameters': trial.parameters}
try:
t_id = self.db.trials.insert_one(trial).inserted_id
except pymongo.errors.InvalidDocument:
new_params = {}
for k, v in trial['parameters'].items():
if isinstance(v, numpy.int64):
v = int(v)
new_params[k] = v
trial['parameters'] = new_params
t_id = self.db.trials.insert_one(trial).inserted_id
def add_for_stopping(self, trial_id):
"""
Adds a trial for stopping.
In the trial-script this will raise an exception causing the trial to
stop.
Args:
trial_id (int): the ID of the trial to stop.
"""
self.check_db_status()
dblogger.debug("Adding {} to DB".format({'trial_id': trial_id}))
self.db.stop.insert_one({'trial_id': trial_id}).inserted_id
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class Client(object):
"""
Registers a session with a Sherpa Study via the port of the database.
This function is called from trial-scripts only.
Attributes:
host (str): the host that runs the database. Passed host, host set via
environment variable or 'localhost' in that order.
port (int): port that database is running on. Passed port, port set via
environment variable or 27010 in that order.
"""
def __init__(self, host=None, port=None, test_mode=False, **mongo_client_args):
"""
Args:
host (str): the host that runs the database. Generally not needed since
the scheduler passes the DB-host as an environment variable.
port (int): port that database is running on. Generally not needed since
the scheduler passes the DB-port as an environment variable.
test_mode (bool): mock the client, that is, get_trial returns a trial
that is empty, keras_send_metrics accepts calls but does not do any-
thing, as does send_metrics. Useful for trial script debugging.
"""
self.test_mode = test_mode
if not self.test_mode:
host = host or os.environ.get('SHERPA_DB_HOST') or 'localhost'
port = port or os.environ.get('SHERPA_DB_PORT') or 27010
self.client = MongoClient(host, int(port), **mongo_client_args)
self.db = self.client.sherpa
def get_trial(self):
"""
Returns the next trial from a Sherpa Study.
Returns:
sherpa.core.Trial: The trial to run.
"""
if self.test_mode:
return sherpa.Trial(id=1, parameters={})
assert os.environ.get('SHERPA_TRIAL_ID'), "Environment-variable SHERPA_TRIAL_ID not found. Scheduler needs to set this variable in the environment when submitting a job"
trial_id = int(os.environ.get('SHERPA_TRIAL_ID'))
for _ in range(5):
g = (entry for entry in self.db.trials.find({'trial_id': trial_id}))
t = next(g)
if t:
break
time.sleep(10)
if not t:
raise RuntimeError("No Trial Found!")
return sherpa.Trial(id=t.get('trial_id'), parameters=t.get('parameters'))
def send_metrics(self, trial, iteration, objective, context={}):
"""
Sends metrics for a trial to database.
Args:
trial (sherpa.core.Trial): trial to send metrics for.
iteration (int): the iteration e.g. epoch the metrics are for.
objective (float): the objective value.
context (dict): other metric-values.
"""
if self.test_mode:
return
result = {'parameters': trial.parameters,
'trial_id': trial.id,
'objective': objective,
'iteration': iteration,
'context': context}
# Convert float32 to float64.
# Note: Keras ReduceLROnPlateau callback requires this.
for k,v in context.items():
if type(v) == numpy.float32:
context[k] = numpy.float64(v)
self.db.results.insert_one(result)
def keras_send_metrics(self, trial, objective_name, context_names=[]):
"""
Keras Callbacks to send metrics to SHERPA.
Args:
trial (sherpa.core.Trial): trial to send metrics for.
objective_name (str): the name of the objective e.g. ``loss``,
``val_loss``, or any of the submitted metrics.
context_names (list[str]): names of all other metrics to be
monitored.
"""
import keras.callbacks
send_call = lambda epoch, logs: self.send_metrics(trial=trial,
iteration=epoch,
objective=logs[objective_name],
context={n: logs[n] for n in context_names})
return keras.callbacks.LambdaCallback(on_epoch_end=send_call)
| 10,144 | 36.161172 | 177 | py |
sherpa | sherpa-master/examples/parallel-examples/bianchini/bianchini.py | # Train simple network on 2D data.
# Author: Peter Sadowski
from __future__ import print_function
import numpy as np
import sherpa
from keras.models import Model
from keras.layers import Dense, Input
from keras.optimizers import SGD
def dataset_bianchini(batchsize, k=1):
'''
Synthetic data set where we can control Betti numbers from Bianchini et al. 2014.
Input: 2D real values, Output: binary {0,1}.
f = g(t_k(x)), where g=1-||x||^2, t_1(x)=[1-2*x_1^2, 1-2*x_2^2], t_k = t * t_{k-1}
'''
g = lambda x: 1. - np.linalg.norm(x, ord=2)**2
t = lambda x: 1. - 2.*(x**2)
def f(x):
for i in range(k):
x = t(x)
return g(x)
while True:
X = np.random.uniform(low=-1.,high=1.0 , size=(batchsize, 2))
Y = (np.apply_along_axis(f, axis=1, arr=X) > 0.0).astype('float32')
yield {'input':X}, {'output':Y}
def define_model(params):
'''
Return compiled model using hyperparameters specified in args.
'''
nin = 2
nout = 1
units = 10
nhlay = 2
act = params['act']
init = 'glorot_normal'
input = Input(shape=(nin,), dtype='float32', name='input')
x = input
for units in params['arch']:
x = Dense(units, kernel_initializer=init, activation=act)(x)
output = Dense(nout, kernel_initializer=init, activation='sigmoid', name='output')(x)
model = Model(inputs=input, outputs=output)
# Learning Algorithm
lrinit = params['lrinit']
momentum = params['momentum']
lrdecay = params['lrdecay']
loss = {'output':'binary_crossentropy'}
metrics = {'output':'accuracy'}
loss_weights = {'output':1.0}
optimizer = SGD(lr=lrinit, momentum=momentum, decay=lrdecay)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights)
return model
def main(client, trial):
# Create new model.
model = define_model(trial.parameters)
# Define dataset.
gtrain = dataset_bianchini(batchsize=100, k=3)
gvalid = dataset_bianchini(batchsize=100, k=3)
model.fit_generator(gtrain,
steps_per_epoch=100,
validation_data=gvalid,
validation_steps=10,
callbacks=[client.keras_send_metrics(trial, objective_name='val_loss', context_names=['val_acc'])],
epochs = trial.parameters['epochs'],
verbose=2)
if __name__ == '__main__':
client = sherpa.Client()
trial = client.get_trial()
main(client, trial)
| 2,585 | 31.734177 | 123 | py |
sherpa | sherpa-master/examples/parallel-examples/mnistmlp/trial.py | from __future__ import print_function
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import sherpa
import keras
from keras.models import Model
from keras.layers import Dense, Input, Dropout
from keras.optimizers import SGD
from keras.datasets import mnist
def define_model(params):
"""
Return compiled model using hyperparameters specified in args.
"""
nin = 784
nout = 10
act = params.get('act', 'relu')
init = 'glorot_normal'
arch = params.get('arch', [100, 100])
dropout = params.get('dropout')
input = Input(shape=(nin,), dtype='float32', name='input')
x = input
for units in arch:
x = Dense(units, kernel_initializer=init, activation=act)(x)
if dropout:
x = Dropout(dropout)(x)
output = Dense(nout, kernel_initializer=init, activation='softmax', name='output')(x)
model = Model(inputs=input, outputs=output)
# Learning Algorithm
lrinit = params.get('lrinit', 0.02)
momentum = params.get('momentum', 0.7)
lrdecay = params.get('lrdecay', 0.)
loss = {'output':'categorical_crossentropy'}
metrics = {'output':'accuracy'}
loss_weights = {'output':1.0}
optimizer = SGD(lr=lrinit, momentum=momentum, decay=lrdecay)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights)
return model
def main(client, trial):
batch_size = 32
num_classes = 10
epochs = trial.parameters.get('epochs', 15)
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Create new model.
model = define_model(trial.parameters)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
callbacks=[client.keras_send_metrics(trial,
objective_name='val_loss',
context_names=['val_acc'])],
validation_data=(x_test, y_test))
if __name__=='__main__':
client = sherpa.Client()
trial = client.get_trial()
main(client, trial)
| 2,619 | 30.95122 | 93 | py |
sherpa | sherpa-master/examples/parallel-examples/mnistcnnpbt/mnist_cnn.py | '''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
CONFIG = tf.ConfigProto(device_count = {'GPU': 1}, log_device_placement=False, allow_soft_placement=False)
CONFIG.gpu_options.allow_growth = True # Prevents tf from grabbing all gpu memory.
sess = tf.Session(config=CONFIG)
from keras import backend as K
K.set_session(sess)
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.models import load_model
import numpy as np
################## Sherpa trial ##################
import sherpa
client = sherpa.Client()
trial = client.get_trial() # contains ID and parameters
##################################################
batch_size = trial.parameters['batch_size']
num_classes = 10
epochs = trial.parameters['epochs']
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if trial.parameters.get('load_from', '') == '':
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(trial.parameters.get('dropout', 0.25)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(2*trial.parameters.get('dropout', 0.25)))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=trial.parameters['lr'],
momentum=trial.parameters.get('momentum', 0.7),
decay=trial.parameters.get('lr_decay', 0.)),
metrics=['accuracy'])
else:
model = load_model(os.path.join('./output', trial.parameters['load_from']))
K.set_value(model.optimizer.lr, trial.parameters['lr'])
assert np.isclose(K.get_value(model.optimizer.lr), trial.parameters['lr'])
K.set_value(model.optimizer.momentum, trial.parameters['momentum'])
assert np.isclose(K.get_value(model.optimizer.momentum), trial.parameters['momentum'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=[client.keras_send_metrics(trial, objective_name='val_loss', context_names=['val_acc'])],
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save(os.path.join('./output', trial.parameters['save_to'])) | 3,873 | 37.74 | 109 | py |
sherpa | sherpa-master/examples/parallel-examples/fashion_mnist_benchmark/fashion_mlp.py | '''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import os
import time
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import sherpa
import keras
from keras.datasets import fashion_mnist
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten
from keras.optimizers import SGD
client = sherpa.Client()
trial = client.get_trial()
output_dir = os.environ.get("SHERPA_OUTPUT_DIR", '/tmp/')
# Loading data
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
# Set the number of epochs
max_epochs = 26
if 'generation' in trial.parameters:
epochs = trial.parameters['generation']
initial_epoch = trial.parameters['generation']-1
elif 'resource' in trial.parameters:
resource_unit = max_epochs//13
initial_epoch = {1: 0, 3: 1, 9: 4}[trial.parameters['resource']] * resource_unit
epochs = trial.parameters['resource'] * resource_unit + initial_epoch
else:
epochs = max_epochs
initial_epoch = 0
# Load or create model
if trial.parameters.get('load_from', '') != '':
load_path = os.path.join(output_dir, trial.parameters['load_from'] + ".hdf5")
model = load_model(load_path)
else:
model = Sequential([Flatten(input_shape=(28, 28)),
Dense(512, activation='relu'),
Dropout(trial.parameters['dropout']),
Dense(512, activation='relu'),
Dropout(trial.parameters['dropout']),
Dense(10, activation='softmax')])
optimizer = SGD(lr=trial.parameters['learning_rate'], momentum=trial.parameters['momentum'], decay=trial.parameters.get('decay', 0.))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Train model
history = model.fit(x_train, y_train,
batch_size=int(trial.parameters['batch_size']),
epochs=epochs,
verbose=2,
callbacks=[client.keras_send_metrics(trial,
objective_name='val_acc',
context_names=['val_loss', 'loss', 'acc'])],
validation_data=(x_test, y_test),
initial_epoch=initial_epoch)
if 'save_to' in trial.parameters:
save_path = os.path.join(output_dir, trial.parameters['save_to'] + ".hdf5")
model.save(save_path)
| 2,681 | 36.25 | 137 | py |
sherpa | sherpa-master/tests/test_database.py | import os
import pytest
import sys
import sherpa
import sherpa.core
import sherpa.schedulers
import sherpa.database
try:
import unittest.mock as mock
except ImportError:
import mock
import logging
import time
import warnings
from testing_utils import *
def test_database(test_dir):
test_trial = get_test_trial()
testlogger.debug(test_dir)
db_port = sherpa.core._port_finder(27000, 28000)
with sherpa.database._Database(test_dir, port=db_port) as db:
time.sleep(2)
testlogger.debug("Enqueuing...")
db.enqueue_trial(test_trial)
testlogger.debug("Starting Client...")
client = sherpa.Client(port=db_port,
connectTimeoutMS=100,
serverSelectionTimeoutMS=1000)
testlogger.debug("Getting Trial...")
os.environ['SHERPA_TRIAL_ID'] = '1'
t = client.get_trial()
assert t.id == 1
assert t.parameters == {'a': 1, 'b': 2}
testlogger.debug("Sending Metrics...")
client.send_metrics(trial=t, iteration=1,
objective=0.1, context={'other_metric': 0.2})
new_results = db.get_new_results()
testlogger.debug(new_results)
assert new_results == [{'context': {'other_metric': 0.2},
'iteration': 1,
'objective': 0.1,
'parameters': {'a': 1, 'b': 2},
'trial_id': 1}]
# test that Sherpa raises correct error if MongoDB exits
db2 = sherpa.database._Database(test_dir, port=db_port)
with pytest.raises(OSError):
db2.start()
# with pytest.raises(RuntimeError):
# with pytest.raises(pymongo.errors.ServerSelectionTimeoutError):
# client.get_trial()
def test_database_args(test_dir):
custom_port = 26999
testlogger.debug(test_dir)
db_port = sherpa.core._port_finder(27000, 28000)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with sherpa.database._Database(test_dir, port=db_port,
mongodb_args={
"port": custom_port}) as db:
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "Set port via the db_port" in str(w[-1].message)
# test that there is something running on that port
db2 = sherpa.database._Database(test_dir, port=custom_port)
with pytest.raises(OSError):
db2.start()
def test_client_test_mode_send_metrics_does_nothing():
client = sherpa.Client(test_mode=True)
trial = client.get_trial()
assert trial.id == 1
assert trial.parameters == {}
client.send_metrics(trial=trial, iteration=1, objective=0.1)
@pytest.mark.skipif('keras' not in sys.modules,
reason="requires the Keras library")
def test_client_test_mode_keras_send_metrics_does_nothing():
client = sherpa.Client(test_mode=True)
trial = client.get_trial()
assert trial.id == 1
assert trial.parameters == {}
callback = client.keras_send_metrics(trial=trial, objective_name='val_acc',
context_names=['val_loss', 'loss',
'acc'])
| 3,431 | 33.32 | 79 | py |
sherpa | sherpa-master/docs/conf.py | """
SHERPA is a Python library for hyperparameter tuning of machine learning models.
Copyright (C) 2018 Lars Hertel, Peter Sadowski, and Julian Collado.
This file is part of SHERPA.
SHERPA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SHERPA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SHERPA. If not, see <http://www.gnu.org/licenses/>.
"""
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from sphinx.apidoc import main
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'SHERPA'
copyright = '2018, Lars Hertel, Peter Sadowski, and Julian Collado'
author = 'Lars Hertel, Peter Sadowski, and Julian Collado'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'nbsphinx'
]
exclude_patterns = ['_build', '**.ipynb_checkpoints']
napoleon_google_docstring = True
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SHERPAdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SHERPA.tex', 'SHERPA Documentation',
'Lars Hertel, Peter Sadowski, and Julian Collado', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sherpa', 'SHERPA Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SHERPA', 'SHERPA Documentation',
author, 'SHERPA', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 6,552 | 30.965854 | 80 | py |
socks | socks-main/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import shutil
import os
import sys
sys.path.insert(0, os.path.abspath("sphinxext"))
from github_link import make_linkcode_resolve
from github_link import _get_git_revision
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "SOCKS"
copyright = "2021 - 2022, Adam Thorpe"
author = "Adam Thorpe"
# The full version, including alpha/beta/rc tags
release = _get_git_revision()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.linkcode",
"sphinxcontrib.bibtex",
"sphinx.ext.autodoc.typehints",
"sphinx_copybutton",
"sphinx_design",
"nbsphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_theme_options = {
"sidebar_hide_name": False,
}
html_title = "SOCKS"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
mathjax3_config = {
"startup": {
"requireMap": {
"AMSmath": "ams",
"AMSsymbols": "ams",
"AMScd": "amscd",
"HTML": "html",
"noErrors": "noerrors",
"noUndefined": "noundefined",
}
},
"tex": {
"tagSide": "right",
},
}
autodoc_typehints = "description"
autodoc_mock_imports = [
"numpy",
"scipy",
"matplotlib",
"sklearn",
"gym",
"sacred",
"tqdm",
]
# bibtex
bibtex_bibfiles = ["bibliography.bib"]
# Code block and signature options.
copybutton_prompt_text = r">>> |\.\.\. "
copybutton_prompt_is_regexp = True
linkcode_resolve = make_linkcode_resolve(
"gym_socks",
"https://github.com/ajthor/socks/blob/{revision}/{package}/{path}#L{lineno}",
)
# nbsphinx configuration options
nbsphinx_custom_formats = {
".py": ["jupytext.reads", {"fmt": "py:percent"}],
}
shutil.copytree(
os.path.join("..", "examples"),
os.path.join("..", "docs/examples"),
dirs_exist_ok=True,
)
# This is processed by Jinja2 and inserted before each notebook
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base=None) %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
Open an interactive version of this example on Binder:
:raw-html:`<a href="https://mybinder.org/v2/gh/ajthor/socks/{{ env.config.release|e }}?filepath={{ docname|e }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:middle"></a>`
.. raw:: latex
\nbsphinxstartnotebook{\scriptsize\noindent\strut
\textcolor{gray}{The following section was generated from
\sphinxcode{\sphinxupquote{\strut {{ docname | escape_latex }}}} \dotfill}}
"""
# This is processed by Jinja2 and inserted after each notebook
nbsphinx_epilog = r"""
.. raw:: latex
\nbsphinxstopnotebook{\scriptsize\noindent\strut
\textcolor{gray}{\dotfill\ \sphinxcode{\sphinxupquote{\strut
{{ env.doc2path(env.docname, base='doc') | escape_latex }}}} ends here.}}
"""
rst_epilog = """
.. |release| replace:: {release}
""".format(
release=release
)
| 4,549 | 26.743902 | 222 | py |
perm_hmm | perm_hmm-master/example_scripts/plot_binned_histograms.py | import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
import fire
import numpy as np
from scipy.special import logsumexp
import matplotlib.pyplot as plt
import torch
import pyro.distributions as dist
from example_systems.beryllium import dimensionful_gamma, expanded_transitions, expanded_initial, expanded_outcomes, N_STATES, DARK_STATE, BRIGHT_STATE
from perm_hmm.models.hmms import ExpandedHMM
from perm_hmm.binning import optimally_binned_consecutive, bin_log_histogram
def get_binned_histograms(time, num_bins, max_photons, steps):
initial_logits = expanded_initial(max_photons)
transition_logits = expanded_transitions(time, max_photons)
observation_dist = dist.Categorical(logits=torch.from_numpy(expanded_outcomes(max_photons)))
expanded_hmm = ExpandedHMM(
torch.from_numpy(initial_logits),
torch.from_numpy(transition_logits),
observation_dist,
)
edges, min_infidelity = optimally_binned_consecutive(expanded_hmm, num_bins, steps=steps)
unbinned_hists = logsumexp(transition_logits.reshape((N_STATES, max_photons, N_STATES, max_photons))[:, 0, :, :], axis=-2)
return {
"bin_edges": edges.numpy(),
"unbinned_hists": unbinned_hists,
"binned_hists": bin_log_histogram(torch.tensor(unbinned_hists), edges).numpy(),
}
def plot_binned_histograms(
data_directory=None,
):
if data_directory is None:
data_directory = os.getcwd()
time = 5.39e-5 * dimensionful_gamma
num_bins = 4
max_photons = 15
steps = 6
data = get_binned_histograms(
time,
num_bins,
max_photons,
steps,
)
bin_edges = data["bin_edges"]
unbinned_hist = data["unbinned_hists"]
binned_hist = data["binned_hists"]
fig = plt.figure()
[[ax1, ax2], [ax3, ax4]] = fig.subplots(2, 2)
ax1.bar(np.arange(max_photons), np.exp(unbinned_hist[DARK_STATE]), color="C0")
ax1.set_title("Dark unbinned")
ax3.bar(np.arange(max_photons), np.exp(unbinned_hist[BRIGHT_STATE]), color="C1")
ax3.set_title("Bright unbinned")
ax2.bar(bin_edges[:-1], np.exp(binned_hist[DARK_STATE]), np.diff(bin_edges),
align="edge", color="C0")
ax2.set_title("Dark binned")
ax4.bar(bin_edges[:-1], np.exp(binned_hist[BRIGHT_STATE]), np.diff(bin_edges),
align="edge", color="C1")
ax4.set_title("Bright binned")
for ax in [ax3, ax4]:
ax.set_xlim(0, max_photons)
ax.set_xlabel("Number of photons")
for ax in [ax1, ax3]:
ax.set_xlim(-0.5, max_photons)
ax.set_ylabel("Probability of detection")
ax2.set_xlim(0, max_photons)
for ax in [ax1, ax2, ax3, ax4]:
ax.set_ylim([0, 1])
for edge in bin_edges:
ax2.axvline(edge, color="k", linestyle="-")
ax4.axvline(edge, color="k", linestyle="-")
for ax, label in zip([ax1, ax2, ax3, ax4], ["(a)", "(b)", "(c)", "(d)"]):
ax.set_title(label, loc="left")
fig.suptitle(r"$\Delta t = {:.2f} \mu\mathrm{{s}}$".format(
time / dimensionful_gamma * 1e6))
plt.tight_layout()
filename = os.path.join(data_directory, "binned_histograms.svg")
plt.savefig(filename)
plt.show()
def main():
plt.rc('text', usetex=True)
font = {'family': 'serif', 'size': 12, 'serif': ['computer modern roman']}
plt.rc('font', **font)
font = {'size': 12, 'sans-serif': ['computer modern sans-serif']}
plt.rc('font', **font)
plt.rcParams.update({
'text.latex.preamble': r'\usepackage{amsfonts}',
})
fire.Fire(plot_binned_histograms)
if __name__ == '__main__':
main()
| 3,662 | 34.563107 | 151 | py |
perm_hmm | perm_hmm-master/example_scripts/beryllium_plot.py | import os
import sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
parentdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parentdir)
import fire
import numpy as np
import matplotlib.pyplot as plt
import torch
import pyro.distributions as dist
from adapt_hypo_test.two_states.util import log1mexp
from example_systems.beryllium import DARK_STATE, BRIGHT_STATE, N_STATES, dimensionful_gamma, expanded_permutations, simplest_perms, expanded_initial, expanded_outcomes, expanded_transitions, log_prob_n_given_l, unbinned_hists
from perm_hmm.simulator import HMMSimulator
from perm_hmm.util import log_tvd
from perm_hmm.models.hmms import ExpandedHMM
from perm_hmm.policies.exhaustive import SplitExhaustivePolicy, ExhaustivePolicy
from perm_hmm.policies.min_tree import MinEntPolicy
from perm_hmm.policies.belief import HMMBeliefState
from perm_hmm.simulator import HMMSimulator
from perm_hmm.binning import optimally_binned_consecutive, binned_expanded_hmm
def run_exhaustive_experiment(hmm, perms, steps, verbosity=0):
root_belief = HMMBeliefState.from_expandedhmm(hmm)
exhaustive_policy = ExhaustivePolicy(perms, hmm, steps, root_belief=root_belief, save_history=False)
log_value = exhaustive_policy.compute_perm_tree(return_log_costs=True, delete_belief_tree=False, is_cost_func=False)
result = {
b'log_value': log_value,
}
if verbosity:
result[b"perms"] = exhaustive_policy.perm_tree
if verbosity > 1:
result[b"beliefs"] = exhaustive_policy.belief_tree
return result
def run_min_ent_experiment(hmm, perms, steps, verbosity=0):
sim = HMMSimulator(hmm)
root_belief = HMMBeliefState.from_expandedhmm(hmm)
policy = MinEntPolicy(perms, hmm, look_ahead=2, root_belief=root_belief)
retval = sim.all_classifications(steps, perm_policy=policy, verbosity=verbosity)
if verbosity == 1:
retval[1].pop(b"history", None)
retval[1].pop(b"posterior_log_initial_state_dist", None)
return retval
def run_no_perm_experiment(hmm, steps, verbosity=0):
sim = HMMSimulator(hmm)
retval = sim.all_classifications(steps, verbosity=verbosity)
if verbosity == 1:
retval[1].pop(b"posterior_log_initial_state_dist", None)
return retval
def run_histogram_method(time):
dark_bright_histograms = unbinned_hists(time, max_photons=50)
return log1mexp(np.logaddexp(0, log_tvd(dark_bright_histograms[0], dark_bright_histograms[1])) - np.log(2))
def run_experiment(hmm, perms, steps, verbosity=0):
exhaustive_dic = run_exhaustive_experiment(hmm, perms, steps, verbosity=verbosity)
min_ent_result = run_min_ent_experiment(hmm, perms, steps, verbosity=verbosity)
no_perm_result = run_no_perm_experiment(hmm, steps, verbosity=verbosity)
return {
b"exhaustive": exhaustive_dic,
b"min_entropy": min_ent_result,
b"no_perms": no_perm_result,
}
def make_filename(path, params):
assert os.path.exists(path)
fn = os.path.join(path, "time_{:.3e}_steps_{}.pt".format(params[b"dimensionful_time"],
params[b"steps"]))
return fn
def setup_experiment(time, steps, max_photons=15, num_bins=4):
r"""Given an integration time, bins the output distributions such that the
symmetrized divergence from the bright state to the dark state is maximal,
then returns everything used to compute that, along with the resulting HMM.
:param time:
:return:
"""
initial_logits = expanded_initial(max_photons)
transition_logits = expanded_transitions(time, max_photons)
observation_dist = dist.Categorical(logits=torch.from_numpy(expanded_outcomes(max_photons)))
expanded_hmm = ExpandedHMM(
torch.from_numpy(initial_logits),
torch.from_numpy(transition_logits),
observation_dist,
)
edges, min_infidelity = optimally_binned_consecutive(expanded_hmm, num_bins, steps=steps)
hmm = binned_expanded_hmm(expanded_hmm, edges)
retval = {b"hmm": hmm, b"time": time,
b"dimensionful_time": time / dimensionful_gamma,
b"max_photons": max_photons, b"num_bins": num_bins,
b"steps": steps, b"histogram_result": run_histogram_method(time*steps)}
return retval
def do_experiment(num_bins=3,
steps=2,
max_dim_tot_time=3.3e-4,
num_time_points=4,
verbosity=0):
max_tot_time = max_dim_tot_time * dimensionful_gamma
max_integ_time = max_tot_time / steps
inc = max_integ_time / num_time_points
time_range = np.linspace(inc, max_integ_time, num_time_points, endpoint=True)
results = []
for time in time_range:
sub_params = setup_experiment(time, steps, num_bins=num_bins)
sub_params[b"perms"] = torch.from_numpy(expanded_permutations(simplest_perms(), k=num_bins))
hmm = sub_params[b"hmm"]
perms = sub_params[b"perms"]
steps = sub_params[b"steps"]
data = run_experiment(hmm, perms, steps, verbosity=verbosity)
result = {
b"params": sub_params,
b"result": data,
}
result[b"result"][b"histogram"] = sub_params[b"histogram_result"]
results.append(result)
return results
def preprocess_results(results):
steps = results[0][b"params"][b"steps"]
bins = results[0][b"params"][b"num_bins"]
dim_times = np.array([r[b"params"][b"dimensionful_time"] for r in results])
no_perm_rates = np.array([r[b"result"][b"no_perms"].log_misclassification_rate().numpy()/np.log(10) for r in results])
min_ent_rates = np.array([r[b"result"][b"min_entropy"].log_misclassification_rate().numpy()/np.log(10) for r in results])
exhaustive_rates = np.array([log1mexp(r[b"result"][b"exhaustive"][b"log_value"][0][0])/np.log(10) for r in results])
histogram_rates = np.array([r[b"result"][b"histogram"]/np.log(10) for r in results])
return {
b"steps": steps,
b"bins": bins,
b"dim_times": dim_times,
b"no_perm_rates": no_perm_rates,
b"min_ent_rates": min_ent_rates,
b"exhaustive_rates": exhaustive_rates,
b"histogram_rates": histogram_rates,
}
def plot_results(results):
dim_times = results[b"dim_times"]
no_perm_rates = results[b"no_perm_rates"]
min_ent_rates = results[b"min_ent_rates"]
exhaustive_rates = results[b"exhaustive_rates"]
histogram_rates = results[b"histogram_rates"]
steps = results[b"steps"]
bins = results[b"bins"]
dim_times = dim_times * 1e6
# Plot the results
fig = plt.figure(figsize=(8.5, 4))
axs = fig.subplots(1, 3)
for i, label in enumerate(["(a)", "(b)", "(c)"]):
time_range = dim_times[
i * len(dim_times) // 3:(i + 1) * len(dim_times) // 3]
ax = axs[i]
ax.plot(time_range, exhaustive_rates[
i * len(dim_times) // 3:(i + 1) * len(
dim_times) // 3], label='Exhaustive')
ax.plot(time_range, min_ent_rates[
i * len(dim_times) // 3:(i + 1) * len(
dim_times) // 3], dashes=[1, 1],
label='Min Entropy')
ax.plot(time_range, no_perm_rates[
i * len(dim_times) // 3:(i + 1) * len(
dim_times) // 3], linestyle="--",
label='No Perms')
ax.plot(time_range, histogram_rates[
i * len(dim_times) // 3:(i + 1) * len(
dim_times) // 3], linestyle="-.",
label='Histogram')
ax.set_xlabel(r"Total time ($\mu$s)")
ax.set_title(label, loc="left")
if i == 0:
ax.set_ylabel(r"$\log_{{10}}(\mathbb{{P}}(\hat{{L}}_1 \neq L_1))$")
ax.legend()
fig.suptitle("{} Bins, {} Steps".format(bins, steps))
plt.tight_layout()
return fig
def make_and_plot_data(
num_bins=3,
steps=2,
max_dim_tot_time=3.3e-4,
num_time_points=9,
verbosity=0,
data_directory=None,
):
results = do_experiment(num_bins=num_bins,
steps=steps,
max_dim_tot_time=max_dim_tot_time,
num_time_points=num_time_points,
verbosity=verbosity)
if data_directory is None:
data_directory = os.getcwd()
filename = os.path.join(data_directory, "bins_{}_steps_{}.pt".format(num_bins, steps))
with open(filename, "wb") as f:
torch.save(results, f)
results = preprocess_results(results)
fig = plot_results(results)
plt.figure(fig.number)
plt.savefig(os.path.join(data_directory, "bins_{}_steps_{}.svg".format(num_bins, steps)))
plt.show()
def main():
plt.rc('text', usetex=True)
font = {'family': 'serif', 'size': 12, 'serif': ['computer modern roman']}
plt.rc('font', **font)
font = {'size': 12, 'sans-serif': ['computer modern sans-serif']}
plt.rc('font', **font)
plt.rcParams.update({
'text.latex.preamble': r'\usepackage{amsfonts}',
})
fire.Fire(make_and_plot_data)
if __name__ == '__main__':
main() | 9,318 | 38.155462 | 226 | py |
perm_hmm | perm_hmm-master/example_scripts/exhaustive_three_states.py | import os
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
import fire
import numpy as np
import torch
from perm_hmm.simulator import HMMSimulator
from perm_hmm.util import log1mexp
from perm_hmm.policies.exhaustive import ExhaustivePolicy
from perm_hmm.util import id_and_transpositions
from example_systems.three_states import three_state_hmm
def run_exhaustive_experiment(hmm, perms, steps, verbosity=0):
exhaustive_policy = ExhaustivePolicy(perms, hmm, steps,
save_history=False)
log_value = exhaustive_policy.compute_perm_tree(return_log_costs=True,
delete_belief_tree=False,
is_cost_func=False)
result = {
b'log_value': log_value,
}
if verbosity:
result[b"perms"] = exhaustive_policy.perm_tree
if verbosity > 1:
result[b"beliefs"] = exhaustive_policy.belief_tree
return result
def run_no_perm_experiment(hmm, steps, verbosity=0):
sim = HMMSimulator(hmm)
retval = sim.all_classifications(steps, verbosity=verbosity)
if verbosity == 1:
retval[1].pop(b"posterior_log_initial_state_dist", None)
return retval
def run_experiment(hmm, perms, steps, verbosity=0):
exhaustive_dic = run_exhaustive_experiment(hmm, perms, steps, verbosity=verbosity)
no_perm_result = run_no_perm_experiment(hmm, steps, verbosity=verbosity)
return {
b"exhaustive": exhaustive_dic,
b"no_perms": no_perm_result,
}
def six_step_experiment(
verbosity=0,
):
steps = 6
min_log_a = -2 * np.log(10)
min_log_b = -2 * np.log(10)
num_grid = 10
a_grid = np.linspace(min_log_a, 0, num_grid, endpoint=False)
b_grid = np.linspace(min_log_b, 0, num_grid, endpoint=False)
resultss = []
for a in a_grid.flatten():
results = []
for b in b_grid.flatten():
hmm = three_state_hmm(a, b)
perms = id_and_transpositions(hmm.initial_logits.shape[-1])
result = run_experiment(hmm, perms, steps, verbosity=verbosity)
results.append({b"a": a, b"b": b, b"result": result})
resultss.append(results)
return resultss
def preprocess_six_step(six_step_result):
processed = {b"a": np.array([[x[b"a"]/np.log(10) for x in y] for y in six_step_result])}
processed[b"b"] = np.array([[x[b"b"]/np.log(10)for x in y] for y in six_step_result])
processed[b"no_perm"] = np.array(
[[x[b"result"][b"no_perms"].log_misclassification_rate().numpy()/np.log(10) for x in y] for y in six_step_result]
)
processed[b"exhaustive"] = np.array(
[[log1mexp(x[b"result"][b"exhaustive"][b"log_value"][0][0]).numpy()/np.log(10) for x in y] for y in six_step_result]
)
return processed
def preprocess_two_step(two_step_result):
processed = {b"no_perm": np.array([x[b"no_perm"]/np.log(10) for x in two_step_result])}
processed[b"exhaustive"] = np.array([x[b"exhaustive"]/np.log(10) for x in two_step_result])
return processed
def two_step_experiment(diagonal):
results = []
for a in diagonal:
hmm = three_state_hmm(a, a)
sim = HMMSimulator(hmm)
no_perm_results = sim.all_classifications(2, verbosity=0)
perms = id_and_transpositions(3)
exhaustive_policy = ExhaustivePolicy(perms, hmm, 2, save_history=False)
log_value = exhaustive_policy.compute_perm_tree(return_log_costs=True,
delete_belief_tree=False,
is_cost_func=False)
result = {
b"no_perm": no_perm_results.log_misclassification_rate().numpy(),
b"exhaustive": log1mexp(log_value[0][0].numpy()),
}
results.append(result)
return results
def plot_data(six_step_results, two_step_results):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
a = six_step_results[b"a"]
b = six_step_results[b"b"]
six_exhaustive = six_step_results[b'exhaustive']
six_no_perm = six_step_results[b'no_perm']
two_exhaustive = two_step_results[b'exhaustive']
two_no_perm = two_step_results[b'no_perm']
fig = plt.figure(figsize=(8, 3))
ax1, ax2 = fig.subplots(1, 2)
ax1.set_aspect('equal')
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax1.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax2.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax2.set_xlabel('a')
ax2.set_ylabel('b')
ax2.set_title('No Permutation - Exhaustive')
ax1.plot(np.diagonal(a), np.diagonal(six_no_perm), label='No Perm, $n = 6$', color=colors[0])
ax1.plot(np.diagonal(a), two_no_perm, label="No Perm, $n = 2$", color=colors[0], linestyle='--')
ax1.plot(np.diagonal(a), np.diagonal(six_exhaustive), label='Perm, $n = 6$', color=colors[1])
ax1.plot(np.diagonal(a), two_exhaustive, label="Perm, $n = 2$", color=colors[1], linestyle='--')
ax1.legend()
ax1.set_xlabel(r'$\log_{10}(a) = \log_{10}(b)$')
ax1.set_ylabel(r'$\log_{10}(\mathbb{P}(\hat{S_1} \neq S_1))$')
ax1.set_title(r'Infidelities along $a = b$')
ax1.text(-0.1, 1.15, r'(a)', transform=ax1.transAxes, fontsize=14, va='top', ha='right')
cs = ax2.contourf(a, b, six_no_perm - six_exhaustive, cmap=plt.cm.binary, levels=8)
ax2.plot(np.diagonal(a), np.diagonal(b), 'r--')
ax2.set_title(r'Infidelity ratios, $n = 6$')
ax2.set_xlabel(r'$\log_{10}(a)$')
ax2.set_ylabel(r'$\log_{10}(b)$')
ax2.set_xticks(np.linspace(np.min(a), np.max(a), 8))
ax2.set_yticks(np.linspace(np.min(b), np.max(b), 8))
ax2.set_aspect('equal')
ax2.text(1.45, 1.15, r'(b)', transform=ax1.transAxes, fontsize=14, va='top', ha='right')
fig.colorbar(cs, shrink=.8, label=r'$\log_{{10}}(\mathbb{{P}}_{{\mathrm{{no\;perm}}}}(\hat{{S}}_1 \neq S_1)/\mathbb{{P}}_{{\mathrm{{perm}}}}(\hat{{S}}_1 \neq S_1))$')
return fig
def do_all_experiments(
verbosity=0,
data_directory=None,
):
six_res = six_step_experiment(
verbosity=verbosity,
)
if data_directory is None:
data_directory = os.getcwd()
filename = os.path.join(data_directory, "three_state_six_steps.pt")
with open(filename, "wb") as f:
torch.save(six_res, f)
six_res = preprocess_six_step(six_res)
diagonal = np.diagonal(six_res[b"a"]) * np.log(10)
two_res = two_step_experiment(
diagonal,
)
if data_directory is None:
data_directory = os.getcwd()
filename = os.path.join(data_directory, "three_state_two_steps.pt")
with open(filename, "wb") as f:
torch.save(two_res, f)
two_res = preprocess_two_step(two_res)
fig = plot_data(six_res, two_res)
plt.figure(fig.number)
filename = os.path.join(data_directory, "three_state_plots.svg")
plt.savefig(filename)
plt.show()
def main():
plt.rc('text', usetex=True)
font = {'family': 'serif', 'size': 12, 'serif': ['computer modern roman']}
plt.rc('font', **font)
font = {'size': 12, 'sans-serif': ['computer modern sans-serif']}
plt.rc('font', **font)
plt.rcParams.update({
'text.latex.preamble': r'\usepackage{amsfonts}',
})
fire.Fire(do_all_experiments)
if __name__ == '__main__':
main()
| 7,615 | 35.792271 | 170 | py |
perm_hmm | perm_hmm-master/perm_hmm/loss_functions.py | r"""Loss functions for the
:py:class:`~perm_hmm.postprocessing.ExactPostprocessor` and
:py:class:`~perm_hmm.postprocessing.EmpiricalPostprocessor` classes.
"""
import torch
from perm_hmm.util import ZERO
def log_zero_one(state, classification):
r"""Log zero-one loss.
Returns ``log(int(classification != state))``
The log of zero is clipped to be ``np.log(perm_hmm.util.ZERO)``.
"""
loss = classification != state
floss = loss.float()
floss[~loss] = ZERO
log_loss = floss.log()
return log_loss
def zero_one(state, classification):
r"""Zero-one loss function.
Returns ``classification != state``
"""
return classification != state
def log_binary_zero_one(dark_state, bright_state):
testing_states = torch.tensor([dark_state, bright_state], dtype=int)
def _wrapper(state, classification):
return log_zero_one(state, testing_states[classification.long()])
return _wrapper
def binary_zero_one(dark_state, bright_state):
r"""
Makes the identification of 0 = ``dark_state``, 1 = ``bright_state`` then
returns the zero-one loss function for that.
"""
testing_states = torch.tensor([dark_state, bright_state], dtype=int)
def _wrapper(state, classification):
return zero_one(state, testing_states[classification.long()])
return _wrapper
def mapped_log_zero_one(state, classification, alpha=None):
r"""For when the state and classification are considered equal for some
nontrivial mapping ``alpha``.
Returns ``log_zero_one(alpha(state), alpha(classification))``.
"""
if alpha is None:
return log_zero_one(state, classification)
else:
return log_zero_one(alpha(state), alpha(classification))
def expanded_alpha(state, num_outcomes=2):
return state // num_outcomes
def expanded_log_zero_one(num_outcomes=2):
r"""Log zero-one loss on the expanded state space.
Returns the zero-one loss for the expanded state space
>>> def loss(state, classification):
>>> return state//num_outcomes != classification//num_outcomes
Use the returned function for computing losses.
:param num_outcomes: Number of outcomes that dictates how the state space is
expanded.
:return: The loss function.
"""
def alpha(s):
return expanded_alpha(s, num_outcomes=num_outcomes)
def loss(state, classification):
return mapped_log_zero_one(state, classification, alpha)
return loss
| 2,495 | 27.363636 | 80 | py |
perm_hmm | perm_hmm-master/perm_hmm/simulator.py | """
Simulates the initial state discrimination experiment using different
methods, to compare the resulting error rates.
"""
import torch
from perm_hmm.util import num_to_data
from perm_hmm.postprocessing import ExactPostprocessor, EmpiricalPostprocessor
from perm_hmm.classifiers.perm_classifier import PermClassifier
class HMMSimulator(object):
"""
Runs an experiment where data is generated by an HMM, then classified by
a classifier.
Instances of this class have the following attributes:
``phmm``:
The :py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM` whose
misclassification rates will be computed.
"""
def __init__(self, phmm):
"""
Initializes the experiment.
:param perm_hmm.models.hmms.PermutedDiscreteHMM phmm:
the model whose
misclassification rate will be computed.
"""
self.phmm = phmm
""":py:class:`PermutedDiscreteHMM`
The model whose misclassification rates we wish to analyze.
"""
def all_classifications(self, num_steps, classifier=None, perm_policy=None, verbosity=0):
"""
Computes the data required to compute the exact misclassification rate for the given classifier.
This method always calls ``perm_policy.reset()`` if ``perm_policy`` is
not ``None``.
:param num_steps: Number of steps, int.
:param classifier: Defaults to
:py:class:`~perm_hmm.classifiers.perm_classifier.PermClassifier`,
initialized with the hmm ``self.phmm``.
:param perm_policy: Defaults to None. If specified, will call
``perm_policy.get_perms`` to compute the permutations.
:param verbosity: If ``verbosity == 0``, only the
:py:class:`~perm_hmm.postprocessing.ExactPostprocessor` needed to
compute the misclassification rates is returned.
If ``verbosity == 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys:
b"posterior_log_initial_state_dist":
The posterior log initial state distribution used to compute
the classifications.
b"perms":
Only present if ``perm_policy`` is not ``None``. The
permutations computed from ``perm_policy.get_perms()``.
If ``verbosity > 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys as in
the case with ``verbosity == 1`` and in addition,
b"history":
Whatever is stored in ``perm_policy.calc_history`` after
calling ``perm_policy.get_perms``.
Note that if ``verbosity > 1``, the simulator calls
``perm_policy.reset(save_history=True)`` before calling
``perm_policy.get_perms()``.
"""
base = len(self.phmm.observation_dist.enumerate_support())
data = torch.stack(
[num_to_data(num, num_steps, base) for num in range(base**num_steps)]
).float()
if verbosity > 1:
save_history = True
else:
save_history = False
if classifier is None:
classifier = PermClassifier(self.phmm)
if perm_policy is not None:
perm_policy.reset(save_history=save_history)
perms = perm_policy.get_perms(data)
if save_history:
history = perm_policy.calc_history
classi_result = classifier.classify(data, perms=perms, verbosity=verbosity)
else:
perms = None
classi_result = classifier.classify(data, verbosity=verbosity)
if verbosity:
classifications, classi_dict = classi_result
if perm_policy is not None:
classi_dict[b"perms"] = perms
if save_history:
classi_dict[b"history"] = history
else:
classifications = classi_result
lp = self.phmm.log_prob(data, perms)
dist = self.phmm.posterior_log_initial_state_dist(data, perms)
log_joint = dist.T + lp
ep = ExactPostprocessor(
log_joint,
classifications,
)
if verbosity:
return ep, classi_dict
return ep
def simulate(self, num_steps, num_samples, classifier=None, perm_policy=None, verbosity=0):
"""
Computes the data required to compute the misclassification rates
of the given classifier.
This method always calls ``perm_policy.reset()`` if ``perm_policy`` is
not ``None``.
:param num_steps: Number of steps, int.
:param num_samples: number of samples to draw from the hmm, int
:param classifier: Defaults to
:py:class:`~perm_hmm.classifiers.perm_classifier.PermClassifier`,
initialized with the hmm ``self.phmm``.
:param perm_policy: Defaults to None. If specified, will call
``self.hmm.sample(perm_policy=perm_policy)``.
:param verbosity: If ``verbosity == 0``, only the
:py:class:`~perm_hmm.postprocessing.EmpiricalPostprocessor` needed
to compute the misclassification rates is returned.
If ``verbosity == 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys:
b"posterior_log_initial_state_dist":
The posterior log initial state distribution used to compute
the classifications.
b"perms":
Only present if ``perm_policy`` is not ``None``. The
permutations computed from ``perm_policy.get_perms()``.
If ``verbosity > 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys as in
the case with ``verbosity == 1`` and in addition,
b"history":
Whatever is stored in ``perm_policy.calc_history`` after
calling ``perm_policy.get_perms``.
Note that if ``verbosity > 1``, the simulator calls
``perm_policy.reset(save_history=True)`` before calling
``perm_policy.get_perms()``.
"""
if verbosity > 1:
save_history = True
else:
save_history = False
if perm_policy is not None:
perm_policy.reset(save_history=save_history)
output = self.phmm.sample((num_samples, num_steps), perm_policy=perm_policy)
if perm_policy is not None:
perms = perm_policy.perm_history
else:
perms = None
history = None
if save_history:
if perm_policy is not None:
history = perm_policy.calc_history
data = output.observations
if classifier is None:
classifier = PermClassifier(self.phmm)
if perms is not None:
classi_result = classifier.classify(data, perms=perms, verbosity=verbosity)
else:
classi_result = classifier.classify(data, verbosity=verbosity)
if verbosity:
classifications, classi_dict = classi_result
classi_dict[b"data"] = data
if perm_policy is not None:
classi_dict[b"perms"] = perms
if history is not None:
classi_dict[b"history"] = history
else:
classifications = classi_result
ep = EmpiricalPostprocessor(
output.states[..., 0],
classifications,
)
if verbosity:
return ep, classi_dict
return ep
| 7,817 | 38.887755 | 104 | py |
perm_hmm | perm_hmm-master/perm_hmm/log_cost.py | r"""Log costs to be used with the
:py:class:`~perm_hmm.policies.min_tree.MinTreePolicy` class.
"""
import torch
def log_initial_entropy(log_probs: torch.Tensor):
"""
Calculates the log of the initial state posterior entropy from log_probs, with dimensions
-1: s_k, -2: s_1
:param log_probs:
:return:
"""
inits = log_probs.logsumexp(-1)
return ((-inits).log() + inits).logsumexp(-1)
def log_renyi_entropy(log_probs: torch.Tensor, alpha: float):
"""
Calculates the log of initial state posterior Renyi entropy from log_probs, with dimensions
-1: s_k, -2: s_1
:param log_probs:
:param alpha:
:return:
"""
inits = log_probs.logsumexp(-1)
return ((inits*alpha).logsumexp(-1)/(1-alpha)).log()
def log_min_entropy(log_probs: torch.Tensor):
"""
Calculates the log of minimum initial state posterior entropy from log_probs, with dimensions
-1: s_k, -2: s_1
:param log_probs:
:return:
"""
inits = log_probs.logsumexp(-1)
return (((-inits).log()).min(-1))[0]
def min_entropy(log_probs):
inits = log_probs.logsumexp(-1)
return ((inits.max(-1))[0])
| 1,155 | 23.595745 | 97 | py |
perm_hmm | perm_hmm-master/perm_hmm/util.py | """This module includes a few utility functions.
"""
from functools import reduce
from operator import mul
import torch
import numpy as np
from scipy.special import logsumexp, expm1, log1p
ZERO = 10**(-14)
def bin_ent(logits_tensor):
"""Computes the binary entropy of a tensor of independent log probabilities.
:param torch.Tensor logits_tensor:
A tensor of log independent Bernoulli parameters.
All elements should be nonpositive.
shape arbitrary.
:returns: A tensor of the same shape containing the binary entropies
corresponding to the inputs.
"""
y = (1 - logits_tensor.exp())
return -(y.log() * y + logits_tensor.exp() * logits_tensor)
def entropy(log_dist):
"""Computes the entropy of a distribution.
:param torch.Tensor log_dist: log of a distribution.
The last axis should logsumexp to 0.
shape ``(..., n_outcomes_of_distribution)``
:returns: Entropy of the input distributions.
"""
return (log_dist.exp() * (-log_dist)).sum(-1)
def num_to_data(num, num_steps, base=2, dtype=float):
"""Turns an integer into a tensor containing its representation in a given base.
Use this function to, for example, enumerate all possible binary outcomes.
:param int num: The integer whose binary representation is output
:param base: The base of the resulting strings.
:param int num_steps: The size of the output tensor
:param dtype: The data type of the output tensor.
:returns: A :py:class:`torch.Tensor` of length ``num_steps``
"""
rep_list = []
while num > 0:
rep_list.append(num % base)
num //= base
assert len(rep_list) <= num_steps
while len(rep_list) < num_steps:
rep_list.append(0)
rep_list = rep_list[::-1]
return torch.tensor(rep_list, dtype=dtype)
def all_strings(steps, base=2, dtype=float):
r"""All strings of a given base.
:param steps: Length of strings
:param base: The base of the strings.
:param dtype: The type of the resulting tensor.
:return: All strings of given base, as a :py:class:`~torch.Tensor`.
"""
return torch.stack([num_to_data(num, steps, base, dtype=dtype) for num in range(base**steps)])
def unsqueeze_to(x, total, target):
"""Unsqueezes a dimension-1 tensor to the :attr:`target` position,
out of a total of :attr:`total` dimensions.
Example::
>>> x = torch.arange(5)
>>> y = unsqueeze_to(x, 6, 2)
>>> assert y.shape == (1, 1, 5, 1, 1, 1)
:param torch.Tensor x:
:param int total:
:param int target:
:returns:
"""
ones = total - target - 1
twos = target
r = x
for i in range(twos):
r.unsqueeze_(-2)
for i in range(ones):
r.unsqueeze_(-1)
return r
def wrap_index(index: torch.Tensor, batch_shape=None):
"""
Takes a tensor whose interpretation is as indices of another
tensor of shape ``batch_shape + arbitrary`` and outputs
a tuple of tensors with which we can slice into the desired tensor.
Use this method when you want only the 'diagonal' elements of some other
tensor.
Example::
>>> x = torch.rand((5, 10, 7))
>>> v = x.argmin(dim=-1)
>>> print((x.min(dim=-1)[0] == x[wrap_index(v)].all()))
tensor(True)
:param torch.Tensor index: dtype :py:class:`int`. The index we wish to use
to slice into some other tensor
:param tuple batch_shape: tuple of ints or a :py:class:`torch.Shape` object.
The shape of the batch dimensions of the tensor we will slice into.
If :class:`None`, defaults to `index.shape`.
:return: tuple of tensors. Use it to slice into some other tensor of the
right shape.
"""
shape = index.shape
if batch_shape is None:
batch_shape = index.shape
if batch_shape == ():
return index
l = len(shape)
ind = \
tuple(
unsqueeze_to(torch.arange(batch_shape[x]), l, x)
for x in range(len(batch_shape))
)
ind = ind + (index,)
return ind
def transpositions(n):
"""Gives all transpositions for length n as a list of
:py:class:`~torch.Tensor`.
:param int n: The number to compute for.
:return: list of transpositions.
"""
ts = []
for i in range(n):
for j in range(i):
x = torch.arange(n)
x[i] = j
x[j] = i
ts.append(x)
return ts
def id_and_transpositions(n):
r"""Identity and transpositions.
Computes a list of all transposition permutations, and an identity
permutation.
:param n: Number of states.
:return: Shape ``(n*(n-1)/2, n)``
"""
return torch.stack([torch.arange(n)] + transpositions(n))
def first_nonzero(x, dim=-1):
"""The first nonzero elements along a dimension.
If none, default to length along dim.
:param torch.Tensor x:
:param int dim:
:return: x reduced along dim.
"""
s = x.shape
if len(s) == 0:
x = x.unsqueeze(-1)
l = x.shape[dim]
bx = x.bool()
ix = bx.int()
rl = torch.arange(l, 0, -1).view((l,)+(1,)*len(x.shape[dim:-1]))
to_argmax = ix * rl
to_ret = to_argmax.argmax(dim)
return to_ret*bx.any(dim) + (~bx.any(dim)).int()*bx.shape[dim]
def indices(shape):
"""An implementation of `numpy.indices <https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
for torch.
Always returns the "sparse" version.
:param tuple shape:
:return: A tuple of tensors, each of dimension (1,)*n + (shape[n],) + (1,)*(len(shape) - n - 1), where n is the position in
the tuple.
"""
l = len(shape)
return tuple(torch.arange(shape[a]).reshape((1,)*a + (shape[a],) + (1,)*(l-a-1)) for a in range(l))
def index_to_tuple(index, axis):
"""Given a tensor x which contains the indices into another tensor y, constructs the
tuple to pass as y[index_to_tuple(x, axis)], where axis is the axis which x indexes.
:param torch.Tensor index: An integer tensor whose elements can be interpreted as indices into another tensor.
:param int axis: The axis which ``index`` indexes into.
:return: A tuple of tensors which can be broadcast to shape ``index.shape``
"""
shape = index.shape
x = indices(shape)
return x[:axis] + (index,) + x[axis:]
def log_tvd(lps1, lps2):
r"""Log of `total variation distance`_, between two arrays with last axis
containing the probabilities for all possible outcomes.
.. math::
\log(1/2 \sum_{x \in \mathcal{X}}|\mathbb{P}_1(x) - \mathbb{P}_2(x)|)
where :math:`\mathcal{X}` is the space of possible outcomes.
.. _`total variation distance`: https://en.wikipedia.org/wiki/Total_variation_distance_of_probability_measures
:param lps1:
:param lps2:
:return:
"""
signs = np.sign(lps1 - lps2)
retval = logsumexp(
logsumexp(np.stack([lps1, lps2], axis=0), axis=0, b=np.stack([signs, -signs], axis=0)),
axis=-1
) - np.log(2)
return retval
def log1mexp(lp):
r"""Log 1 minus exp of argument.
.. math::
\log(1-\exp(x))
This is used to take :math:`1-p` for an argument in log space.
:param lp:
:return:
"""
if lp > np.log(.5):
return log1p(-np.exp(lp))
return np.log(-expm1(lp))
def flatten_batch_dims(data, event_dims=0):
"""Flattens batch dimensions of data, and returns the batch shape.
Data is assumed to be of shape batch_shape + event_shape. Pass
len(event_shape) for the argument event_dims to delineate the event
dimensions from the batch dimensions.
:param data: The data to flatten.
:param event_dims: The number of event dimensions. Defaults to 0.
:return: The data with the batch dimensions flattened.
:raises: Warning if the data shape does not match the previously
recorded shape.
"""
data_shape = data.shape
shape = data_shape[:len(data_shape) - event_dims]
batch_len = reduce(mul, shape, 1)
event_shape = data_shape[len(data_shape) - event_dims:]
data = data.reshape((batch_len,) + event_shape)
return data, shape
def perm_idxs_from_perms(possible_perms, perms):
idxs = (perms.unsqueeze(-2) == possible_perms).all(-1) # type: torch.Tensor
if (idxs.sum(-1) != 1).any():
raise ValueError("Invalid permutations. Either the possible perms"
"contains duplicates, or there was a perm passed "
"that was not a possible perm.")
return idxs.max(-1)[1]
def kl_divergence(lp1, lp2, axis=-1):
mh1 = np.exp(logsumexp(np.log(-lp1) + lp1, axis=axis))
mch = np.exp(logsumexp(np.log(-lp2) + lp1, axis=axis))
return mch - mh1 | 8,762 | 29.217241 | 127 | py |
perm_hmm | perm_hmm-master/perm_hmm/binning.py | import warnings
import torch
from pyro.distributions import Categorical
from itertools import combinations
from perm_hmm.models.hmms import ExpandedHMM, DiscreteHMM
from perm_hmm.simulator import HMMSimulator
def bin_histogram(base_hist, bin_edges):
r"""Given a histogram, bins it using the given bin edges.
Bin edges are left inclusive, right exclusive, so that::
bin_histogram(base_hist, bin_edges)[..., i] = base_hist[..., bin_edges[i]:bin_edges[i+1]].sum()
:param base_hist: The histogram to bin. This is a probability distribution,
so that ``torch.allclose(base_hist.sum(-1), 1.)``
:param bin_edges: The edges of the bins. Should be in increasing order.
:return: Binned histograms, shape ``base_hist.shape[:-1] + (bin_edges.shape[-1]-1,)``
"""
if not torch.allclose(torch.sum(base_hist, dim=-1).float(), torch.tensor(1.).float()):
warnings.warn("The input histogram is not normalized.")
n = base_hist.shape[-1]
new_hist = []
for i in range(len(bin_edges) - 1):
new_hist.append(
torch.sum(base_hist[..., bin_edges[i]:bin_edges[i + 1]], dim=-1))
new_hist = torch.stack(new_hist, dim=-1)
if not torch.allclose(torch.sum(new_hist, dim=-1).float(), torch.tensor(1.).float()):
warnings.warn("The bin edges are such that the new histogram is not normalized. "
"Maybe the edges 0 and {} weren't included?".format(n))
return new_hist
def bin_log_histogram(base_log_hist, bin_edges):
r"""Given a histogram, bins it using the given bin edges.
Bin edges are left inclusive, right exclusive, so that::
bin_log_histogram(base_hist, bin_edges)[..., i] = base_hist[..., bin_edges[i]:bin_edges[i+1]].logsumexp()
:param base_hist: The histogram to bin. This is a probability distribution,
so that ``torch.allclose(base_hist.logsumexp(-1), 0.)``
:param bin_edges: The edges of the bins. Should be in increasing order.
:return: Binned histograms, shape ``base_hist.shape[:-1] + (bin_edges.shape[-1]-1,)``
"""
if not torch.allclose(torch.logsumexp(base_log_hist, dim=-1).float(), torch.tensor(0.).float(), atol=1e-7):
warnings.warn("The input histogram is not normalized.")
new_hist = []
for i in range(len(bin_edges) - 1):
new_hist.append(
torch.logsumexp(base_log_hist[..., bin_edges[i]:bin_edges[i + 1]], dim=-1))
new_hist = torch.stack(new_hist, dim=-1)
if not torch.allclose(torch.logsumexp(new_hist, dim=-1).float(), torch.tensor(0.).float(), atol=1e-7):
warnings.warn(
"The bin edges are such that the new histogram is not normalized. "
"Maybe the edges 0 and {} weren't included?".format(new_hist.shape[-1]))
return new_hist
def binned_hmm(hmm, bin_edges):
r"""Assuming a categorical output distribution, finds the binned version of
the HMM.
Applies ``bin_histogram`` to the output distributions if probs is specified,
otherwise applies ``bin_log_histogram``.
"""
if torch.any(hmm.observation_dist._param < 0):
# sum log_probs
base_log_hist = hmm.observation_dist._param
observation_dist = Categorical(logits=bin_log_histogram(base_log_hist, bin_edges))
else:
observation_dist = Categorical(probs=bin_histogram(hmm.observation_dist._param, bin_edges))
return type(hmm)(
initial_logits=hmm.initial_logits,
transition_logits=hmm.transition_logits,
observation_dist=observation_dist,
)
def binned_expanded_hmm(expanded_hmm, bin_edges):
r"""Given an HMM with an expanded state space used to account for outcome
dependence, bins the HMM using the bin edges.
"""
num_bins = len(bin_edges) - 1
ltm = expanded_hmm.transition_logits
n, o = expanded_hmm.i_to_lo(ltm.shape[-1]-1)
n += 1
o += 1
tm = ltm.reshape((n, o, n, o))
binned_transitions = bin_log_histogram(tm, bin_edges)[:, torch.arange(num_bins), ...].reshape((
n * num_bins,
n * num_bins)) # Doesn't matter which "previous outcome" slice we take, as long as it has the right size.
binned_initial = bin_log_histogram(expanded_hmm.initial_logits.reshape((n, o)), bin_edges).reshape((-1,))
if torch.any(expanded_hmm.observation_dist._param < 0):
# sum log_probs
base_log_hist = expanded_hmm.observation_dist._param
base_log_hist = base_log_hist.reshape((n, o, o))[:, bin_edges[:-1]]
binned_outputs = bin_log_histogram(base_log_hist, bin_edges).reshape((n*num_bins, num_bins))
observation_dist = Categorical(logits=binned_outputs)
else:
base_hist = expanded_hmm.observation_dist._param
base_hist = base_hist.reshape((n, o, o))[:, bin_edges[:-1]]
binned_outputs = bin_histogram(base_hist, bin_edges).reshape((n*num_bins, num_bins))
observation_dist = Categorical(probs=binned_outputs)
hmm = type(expanded_hmm)(binned_initial, binned_transitions, observation_dist)
return hmm
def generate_infidelity_cost_func(hmm, num_steps):
r"""Generates a function that takes bin edges and returns the infidelity of
a binned hmm after collecting ``num_steps`` steps of data.
"""
is_expanded = isinstance(hmm, ExpandedHMM)
def infidelity_cost_func(bin_edges):
if is_expanded:
binned = binned_expanded_hmm(hmm, bin_edges)
else:
binned = binned_hmm(hmm, bin_edges)
sim = HMMSimulator(binned)
ep = sim.all_classifications(num_steps)
return ep.log_misclassification_rate()
return infidelity_cost_func
def optimally_binned_consecutive(hmm, num_bins, cost_func=None, steps=2):
r"""Given an hmm, finds the optimal binning by exhaustively searching over all possible
bin edges.
The bin edges dictate which consecutive outcomes to include in a bin.
WARNING: This method is slow. The complexity grows as :math:`O(Y^n)`, where
:math:`Y` is the number of outcomes of the unbinned histogram, and :math:`n`
is the number of bins to use.
By default, uses the infidelity at ``steps`` number of steps of the binned hmm as the
cost function. This can potentially be expensive to compute, so use a different cost function when necessary.
:param cost_func: A function that takes bin edges and returns a cost. Defaults
to ``generate_infidelity_cost_func(hmm, steps)``.
:param steps: Only used to make a cost function, when that is not specified.
:return: The optimal bin edges, and the minimal cost.
"""
max_observation = hmm.enumerate_support(False).reshape((-1,)).shape[-1]
if num_bins > max_observation:
raise ValueError("Too many bins for number of outcomes. Asked for "
"{} bins for {} outcomes.".format(num_bins, max_observation))
if num_bins < 2:
raise ValueError("Can't have fewer than 2 bins. Asked for {} bins.".format(num_bins))
if cost_func is None:
cost_func = generate_infidelity_cost_func(hmm, steps)
minimizing_edges = None
min_cost = None
for bin_edges in combinations(torch.arange(max_observation - 2)+1, num_bins-1):
bin_edges = torch.concat((torch.tensor([0], dtype=int), torch.tensor(bin_edges), torch.tensor([max_observation], dtype=int)))
cost = cost_func(bin_edges)
if (min_cost is None) or (cost < min_cost):
min_cost = cost
minimizing_edges = bin_edges
return minimizing_edges, min_cost
| 7,504 | 45.32716 | 133 | py |
perm_hmm | perm_hmm-master/perm_hmm/rate_comparisons.py | import torch
from perm_hmm.models.hmms import PermutedDiscreteHMM
from perm_hmm.simulator import HMMSimulator
from perm_hmm.loss_functions import log_zero_one
def exact_rates(phmm: PermutedDiscreteHMM, num_steps, perm_policy, classifier=None, verbosity=0, log_loss=None):
r"""Provides plumbing for comparing the misclassification rate calculated
using a given policy versus using the trivial policy that applies no
permutations.
Basically calls :py:meth:`~perm_hmm.simulator.HMMSimulator.all_classifications`
twice and packages the results into a dictionary.
"""
experiment_parameters = {
b"hmm_params": {
b"initial_logits": phmm.initial_logits,
b"transition_logits": phmm.transition_logits,
b"observation_params": phmm.observation_dist._param,
},
b"possible_perms": perm_policy.possible_perms,
b"num_steps": torch.tensor(num_steps),
}
simulator = HMMSimulator(phmm)
nop = simulator.all_classifications(num_steps, classifier=classifier, verbosity=verbosity)
pp = simulator.all_classifications(num_steps, classifier=classifier, perm_policy=perm_policy, verbosity=verbosity)
if verbosity:
nop, nod = nop
pp, pd = pp
no_classifications = nop.classifications
p_classifications = pp.classifications
if log_loss is None:
log_loss = log_zero_one
toret = {
b"permuted_log_rate": pp.log_risk(log_loss),
b"unpermuted_log_rate": nop.log_risk(log_loss),
b"unpermuted_classifications": no_classifications,
b"permuted_classifications": p_classifications,
b"experiment_parameters": experiment_parameters
}
if verbosity:
toret[b"unpermuted_extras"] = nod
toret[b"permuted_extras"] = pd
return toret
def empirical_rates(phmm: PermutedDiscreteHMM, num_steps, perm_policy, classifier=None, num_samples=1000, confidence=.95, verbosity=0, loss=None):
experiment_parameters = {
b"hmm_params": {
b"initial_logits": phmm.initial_logits,
b"transition_logits": phmm.transition_logits,
b"observation_params": phmm.observation_dist._param,
},
b"possible_perms": perm_policy.possible_perms,
b"num_steps": torch.tensor(num_steps),
}
simulator = HMMSimulator(phmm)
pp = simulator.simulate(num_steps, num_samples, classifier=classifier, perm_policy=perm_policy, verbosity=verbosity)
nop, d = simulator.simulate(num_steps, num_samples, classifier=classifier, verbosity=max(1, verbosity))
if verbosity:
pp, pd = pp
no_classifications = nop.classifications
p_classifications = pp.classifications
toret = {
b"permuted_rates": pp.misclassification_rate(confidence, loss),
b"unpermuted_rates": nop.misclassification_rate(confidence, loss),
b"unpermuted_classifications": no_classifications,
b"permuted_classifications": p_classifications,
b"experiment_parameters": experiment_parameters
}
if verbosity:
toret[b"unpermuted_extras"] = d
toret[b"permuted_extras"] = pd
return toret
| 3,152 | 39.948052 | 146 | py |
perm_hmm | perm_hmm-master/perm_hmm/postprocessing.py | """
Classes to be used for postprocessing data after a simulation.
"""
import warnings
import numpy as np
import torch
from scipy.stats import beta
from perm_hmm.util import ZERO
from perm_hmm.loss_functions import zero_one, log_zero_one
def clopper_pearson(alpha, num_successes, total_trials):
"""
Computes the `exact binomial`_ confidence interval for confidence level
1-`alpha`.
This method uses the scipy.stats.beta.ppf function because I couldn't
find it in the torch framework.
:param float alpha: between 0 and 1. 1-alpha is the confidence level.
:param torch.Tensor num_successes: number of "positive" inferences.
shape arbitrary, but must match that of `total_trials`.
:param torch.Tensor total_trials: number of total inferences.
shape arbitrary, but must match that of `num_successes`.
:returns: A :py:class:`Interval` object, with attributes
.lower: A :py:class:`torch.Tensor`, float, shape same as num_successes.
.upper: A :py:class:`torch.Tensor`, float, shape same as num_successes.
:raises ValueError: if the input tensors are not of the same shape.
.. _exact binomial: https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Clopper%E2%80%93Pearson_interval
"""
if (num_successes > total_trials).any():
raise ValueError("The number of successful trials cannot be more than"
" the total number of trials")
npalpha = np.array(alpha)
n_successes = np.array(num_successes)
trials = np.array(total_trials)
if (trials == 0).any():
raise ValueError("The total number of trials should be nonzero")
n_successes, trials = np.broadcast_arrays(n_successes, trials)
lower = np.array(beta.ppf(npalpha / 2, n_successes, trials - n_successes + 1))
upper = np.array(beta.ppf(1 - npalpha / 2, n_successes + 1, trials - n_successes))
lower[n_successes == 0] = 0
upper[n_successes == 0] = 1 - (npalpha / 2)**(1 / trials[n_successes == 0])
lower[n_successes == trials] = (npalpha / 2)**(1 / trials[n_successes == trials])
upper[n_successes == trials] = 1
return torch.stack((torch.from_numpy(lower), torch.from_numpy(upper)))
class ExactPostprocessor(object):
r"""
A class for postprocessing the data obtained from running
a simulation to compute the exact misclassification rate of a model.
Instances of this class have the following attributes:
``log_joint``:
:math:`\log(\mathbb{P}(s_0, y^n))`, represented by a
:py:class:`~torch.Tensor`, of shape
``(num_states, num_possible_outcomes)``
``classifications``:
The inferred initial states, as a :py:class:`~torch.Tensor` of shape
``(num_possible_outcomes,)``.
``score``:
The "degree of belief" with which the classification is made,
as a :py:class:`~torch.Tensor` of shape ``(num_possible_outcomes,)``.
Can be ``None``.
This is used for postselection. The runs with the highest score are
kept.
"""
def __init__(self, log_joint, classifications, score=None, testing_states=None):
if not len(classifications.shape) == 1:
raise ValueError("Classifications must have exactly 1 dimension")
if not len(log_joint.shape) == 2:
raise ValueError("log_joint must have exactly two dimensions.")
if not log_joint.shape[-1] == classifications.shape[-1]:
raise ValueError("Classifications should have same last dimension as log_joint.")
if (score is not None) and (score.shape != classifications.shape):
raise ValueError("Score should have same shape as classifications.")
self.log_joint = log_joint
self.classifications = classifications
self.score = score
def log_risk(self, log_loss):
"""
Computes log bayes risk for a given log loss function.
:param log_loss: log loss function, should take arguments state, classification
and return the log loss for that pair.
:return: The log bayes risk for the given function.
"""
states = torch.arange(self.log_joint.shape[-2])
ll = log_loss(states.unsqueeze(-1), self.classifications.unsqueeze(-2))
return (self.log_joint + ll).logsumexp(-1).logsumexp(-1)
def log_misclassification_rate(self):
return self.log_risk(log_zero_one)
def log_confusion_matrix(self):
r"""
Computes the elementwise log of the confusion matrix,
:math:`\mathbb{P}(\hat{s}|s)`
"""
log_prior = self.log_joint.logsumexp(-1)
nonzero_prior = log_prior > torch.tensor(1e-6).log()
if not nonzero_prior.all():
warnings.warn("Not all states have nonzero prior, there will be "
"NaNs in the confusion matrix.")
possible_class = torch.arange(self.classifications.max()+1)
log_data_given_state = self.log_joint - log_prior.unsqueeze(-1)
one_hot = possible_class.unsqueeze(-1) == self.classifications
f_one_hot = one_hot.float()
f_one_hot[~one_hot] = ZERO
log_one_hot = f_one_hot.log()
# log_one_hot[~one_hot] = 2*log_one_hot[~one_hot]
log_confusion_rates = (log_data_given_state.unsqueeze(-2) +
log_one_hot.unsqueeze(-3)).logsumexp(-1)
if log_confusion_rates.dtype == torch.double:
log_confusion_rates[~nonzero_prior] = torch.tensor(float('NaN')).double()
else:
log_confusion_rates[~nonzero_prior] = torch.tensor(float('NaN'))
return log_confusion_rates
def postselected_misclassification_rate(self, log_prob_to_keep):
"""
Given a total probability to keep, gives the misclassification rate of
the model restricted to the domain containing that probability with the
best score.
This method is necessary in spite of the
:py:meth:`ExactPostprocessor.postselect` method because we cannot
guarantee that the amount of probability kept after using that method
is exactly the desired probability.
:param float log_prob_to_keep: The probability to keep.
:returns: The misclassification rate keeping only the best prob_to_keep of data.
:raises ValueError: if you try to throw away all the data.
"""
log_prob = self.log_joint.logsumexp(-1)
enum_prob_score = sorted(
enumerate(zip(log_prob, self.score)),
key=lambda x: x[1][1],
)
enum = torch.tensor([tup[0] for tup in enum_prob_score])
sort_log_prob = torch.tensor([tup[1][0] for tup in enum_prob_score])
throw = (sort_log_prob.logsumexp(-1) < (1 - log_prob_to_keep.exp()).log())
if not throw.any():
boundary = -1
else:
boundary = throw.nonzero().squeeze().max()
if boundary == len(log_prob):
raise ValueError("Can't throw away all the data.")
boundary_onehot = torch.zeros(len(log_prob), dtype=bool)
boundary_onehot[boundary + 1] = True
mask = (~(throw | boundary_onehot))[enum.argsort()]
kept_log_prob = log_prob[mask].logsumexp(-1)
log_most_rate = self.postselect(mask).log_misclassification_rate()
b_mask = boundary_onehot[enum.argsort()]
log_b_rate = self.postselect(b_mask).log_misclassification_rate()
return torch.from_numpy(np.logaddexp((kept_log_prob + log_most_rate).numpy(), (log_prob_to_keep.exp()-kept_log_prob.exp()).log() + log_b_rate)) - log_prob_to_keep
def postselection_mask(self, threshold_score):
"""
Returns a mask where the score of the runs is larger than specified.
:param float threshold_score: The score below which we would like to
throw out runs.
:returns: :py:class:`torch.Tensor`, bool. True means keep.
shape ``(n_runs,)``
"""
return self.score > threshold_score
def postselect(self, postselect_mask):
"""
Postselects the data according to the postselect mask.
:param torch.Tensor postselect_mask: bool. indicating whether to
keep the data. True corresponds to keep.
shape ``(n_runs,)``
:returns: ExactPostprocessor. A postselected version of self.
"""
if (~postselect_mask).all():
raise ValueError("Can't throw out all the data.")
p_log_joint = self.log_joint[:, postselect_mask]
p_log_joint -= p_log_joint.logsumexp(-1).logsumexp(-1)
if self.score is not None:
p_score = self.score[postselect_mask]
else:
p_score = None
p_classifications = self.classifications[postselect_mask]
return ExactPostprocessor(p_log_joint, p_classifications, p_score)
class EmpiricalPostprocessor(object):
"""
A class for postprocessing the data obtained from running a
simulation to compute the approximate misclassification rate of a model.
Instances of this class have the following attributes:
``ground_truth``:
The true initial states, shape ``(num_samples,)``
``classifications``:
The inferred initial states, shape ``(num_samples,)``.
``score``:
The "degree of belief" of the classification. shape ``(num_samples,)``.
Can be ``None``.
This is used for postselection. The runs with the highest score are
kept.
"""
def __init__(self, ground_truth, classifications, score=None):
"""
The minimal things which are needed to produce misclassification rates.
Requires that classified_bright and classified_dark are specified in a
subclass init.
:param torch.Tensor ground_truth: Indicates which runs were generated
from which initial state.
"""
self.ground_truth = ground_truth
self.classifications = classifications
self.score = score
def postselection_percentage_mask(self, prob_to_keep):
"""
Produces a mask which indicates the top prob_to_keep of data according to
self.score
:param prob_to_keep: A float between 0 and 1.
:return: A boolean tensor indicating which runs to keep
shape ``(num_runs,)``
:raises AttributeError: If self.score is None
"""
if self.score is None:
raise AttributeError(
"The data is not scored for postselection.")
sort_score = self.score.sort()
mask = torch.zeros_like(self.score, dtype=bool)
mask[sort_score.indices[:round(len(self.score)*prob_to_keep)]] = True
return mask
def postselection_mask(self, threshold_score):
"""
Masks the runs whose score is too low.
:param float threshold_score:
The score below which we will throw out the data.
:returns: :py:class:`torch.Tensor`, bool.
True if we want to keep the run.
shape ``(n_runs,)``
"""
return self.score > threshold_score
def risk(self, loss):
"""
Computes bayes risk by Monte Carlo integration of a loss function.
:param loss: The loss function to compute bayes risk for.
Should take a state and a classification and return a float tensor.
:return: Float tensor, the Bayes risk.
"""
return loss(self.ground_truth, self.classifications).sum(-1) / torch.tensor(self.classifications.shape[-1]).float()
def misclassification_rate(self, confidence_level=.95, loss=None):
"""
Computes misclassification rate and a confidence interval.
Computed using the Clopper-Pearson binomial interval.
:param confidence_level: The confidence level to compute a confidence
interval for.
:param loss: The loss function to compute the number of misses with.
defaults to the zero-one loss,
>>> def loss(ground_truth, classification):
>>> return (ground_truth != classification).sum(-1)
:return: A dict with keys
b"rate": The misclassification rate as a float tensor.
b"lower": The lower bound of the confidence interval as a float tensor.
b"upper": The upper bound of the confidence interval as a float tensor.
"""
if loss is None:
def loss(ground_truth, classification):
return (ground_truth != classification).sum(-1)
rate = self.risk(zero_one)
total = torch.tensor(self.classifications.shape[-1])
num_misses = loss(self.ground_truth, self.classifications)
interval = clopper_pearson(1-confidence_level, num_misses, total)
return {b"rate": rate, b"lower": interval[0], b"upper": interval[1]}
def confusion_matrix(self, confidence_level=.95):
"""
Computes a confusion matrix and a confidence set for it, at the desired
confidence level. Requires an R `package`_ "MultinomialCI" for computing
the confidence set.
:param confidence_level:
:return: A dict with keys
b"matrix": The confusion matrix as a float tensor.
b"lower": The lower corner of the confidence set as a float tensor.
b"upper": The upper corner of the confidence set as a float tensor.
.. _package: https://cran.r-project.org/web/packages/MultinomialCI/MultinomialCI.pdf
"""
from rpy2.robjects.packages import importr
from rpy2.robjects import FloatVector
multinomial_ci = importr("MultinomialCI")
range_truth = self.ground_truth.max() + 1
range_class = self.classifications.max() + 1
possible_class = torch.arange(range_class)
lower = torch.empty((range_truth, range_class))
upper = torch.empty((range_truth, range_class))
matrix = torch.empty((range_truth, range_class))
for i in range(range_truth.item()):
mask = self.ground_truth == i
if mask.any():
classi = self.classifications[mask]
counts = (classi == possible_class.unsqueeze(-1)).sum(-1)
frequencies = counts / mask.sum(-1).float()
vec = FloatVector(counts)
ci = multinomial_ci.multinomialCI(vec, 1-confidence_level)
ci = torch.from_numpy(np.array(ci))
lower[i] = ci[:, 0]
upper[i] = ci[:, 1]
matrix[i] = frequencies
else:
warnings.warn("No instances of state {} in ground truth, there"
"will be NaNs in confusion matrix.".format(i))
lower[i] = torch.tensor(float('NaN'))
upper[i] = torch.tensor(float('NaN'))
matrix[i] = torch.tensor(float('NaN'))
return {b"matrix": matrix, b"lower": lower, b"upper": upper}
def postselect(self, postselection_mask):
"""
Postselects the data according to the postselection mask.
:param torch.Tensor postselection_mask: bool.
A boolean tensor indicating whether to
keep the data. True corresponds to keep.
shape ``(n_runs,)``
:returns: EmpiricalPostprocessor.. A postselected
version of self.
"""
if (~postselection_mask).all():
raise ValueError("Can't throw out all the data.")
if self.score is None:
p_score = self.score
else:
p_score = self.score[postselection_mask]
return EmpiricalPostprocessor(
self.ground_truth[postselection_mask],
self.classifications[postselection_mask],
p_score,
)
| 15,799 | 39.306122 | 170 | py |
perm_hmm | perm_hmm-master/perm_hmm/return_types.py | from typing import NamedTuple
import torch
hmm_fields = [
('states', torch.Tensor),
('observations', torch.Tensor),
]
HMMOutput = NamedTuple('HMMOutput', hmm_fields)
| 180 | 11.928571 | 47 | py |
perm_hmm | perm_hmm-master/perm_hmm/policies/belief_tree.py | r"""Provides functions used by strategies that use a tree to select the
permutation.
To compute optimal permutations, we use the belief states
.. math::
b(y^{k-1}) := \mathbb{P}(s_0, s_k|y^{k-1}),
where the :math:`s_k` are the states of the HMM at step :math:`k`, and the
superscript :math:`y^{k-1}` is the sequence of observations up to step
:math:`k-1`.
Here, when we refer to a tree, we really mean a list of
:py:class:`~perm_hmm.strategies.belief.HMMBeliefState` objects. The i'th
object contains the beliefs for all the nodes at the i'th level of the tree.
"""
import torch
from perm_hmm.policies.belief import HMMBeliefState
class HMMBeliefTree(object):
r"""
Instances of this class have the following attributes:
``hmm``:
A :py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM` that is used to
calculate belief states.
``possible_perms``:
A :py:class:`~torch.Tensor` of type ``long`` that contains the possible
permutations. This is used to compute transition matrices for updating
belief states.
"""
def __init__(self, hmm, possible_perms, nsteps, root_belief: HMMBeliefState = None, data_len=None, terminal_offset=False):
r"""Generates the belief tree for the given HMM.
Builds a tree that is traversed by sequences :math:`y_0, \sigma_0, y_1,
\sigma_1, \ldots`, where the :math:`\sigma_k` are permutation indices, and
the :math:`y_k` are the observation indices. This tree has a layered
structure. Attached to each node in the tree is a belief state
:math:`\mathbb{P}(s_0, s_k|y^{k-1})`, or :math:`\mathbb{P}(s_0, s_k|y^k)`,
depending on whether the node is an even or odd number of steps from the
root, respectively. To go from a belief state attached to one node to a
belief state attached to one of that node's children, we either use a
transition or a Bayesian update, depending on whether the edge is a
permutation or an observation, respectively.
:param hmm: The HMM to compute likelihoods with.
:param possible_perms: The allowable permutations.
:param nsteps: The number of steps to compute for. (2 * nsteps + 1) is the
height of the tree.
:param HMMBeliefState root_belief: The belief state to start the tree with. If None,
defaults to the initial state distribution of the HMM.
:param data_len: The length of the data. If None, defaults to 1.
:param terminal_offset: Whether the leaves of the tree should be labeled by
observation indices.
:return: A list of belief states, to be interpreted as a tree by looking at
the ith element of the list as the set of all nodes at the ith level.
"""
self.hmm = hmm
self.possible_perms = possible_perms
self._build_tree(nsteps, root_belief, data_len, terminal_offset)
def _build_tree(self, nsteps, root_belief: HMMBeliefState = None, data_len=None, terminal_offset=False):
r"""Generates the belief tree for the given HMM.
Builds a tree that is traversed by sequences :math:`y_0, \sigma_0, y_1,
\sigma_1, \ldots`, where the :math:`\sigma_k` are permutation indices, and
the :math:`y_k` are the observation indices. This tree has a layered
structure. Attached to each node in the tree is a belief state
:math:`\mathbb{P}(s_0, s_k|y^{k-1})`, or :math:`\mathbb{P}(s_0, s_k|y^k)`,
depending on whether the node is an even or odd number of steps from the
root, respectively. To go from a belief state attached to one node to a
belief state attached to one of that node's children, we either use a
transition or a Bayesian update, depending on whether the edge is a
permutation or an observation, respectively.
:param nsteps: The number of steps to compute for. (2 * nsteps + 1) is the
height of the tree.
:param root_belief: The belief state to start the tree with. If None,
defaults to the initial state distribution of the HMM.
:param data_len: The length of the data. If None, defaults to 1.
:param terminal_offset: Whether the leaves of the tree should be labeled by
observation indices.
:return: A list of belief states, to be interpreted as a tree by looking at
the ith element of the list as the set of all nodes at the ith level.
:raise ValueError: If ``nsteps`` is less than 1. Must look ahead at
least one step.
"""
if nsteps < 1:
raise ValueError("Cannot build a tree of less than 1 look ahead "
"steps.")
if data_len is None:
data_len = 1
if root_belief is None:
root_belief = HMMBeliefState.from_hmm(self.hmm)
root_belief.logits = root_belief.logits.expand(data_len, -1, -1)
self.beliefs = [root_belief]
if terminal_offset and (nsteps == 1):
return
b = root_belief.bayes_update(self.hmm.observation_dist.enumerate_support(expand=False).squeeze(-1), new_dim=True)
self.beliefs.append(b)
if (not terminal_offset) and (nsteps == 1):
return
while len(self.beliefs) < (2 * (nsteps - 1)):
self.grow(self.possible_perms)
if not terminal_offset:
self.grow(self.possible_perms)
else:
self.beliefs.append(self.beliefs[-1].transition(self.possible_perms, new_dim=True))
def broadcast_to_length(self, length):
new_beliefs = []
for b in self.beliefs:
shape = torch.broadcast_shapes((length, 1, 1), b.logits.shape)
new_b = HMMBeliefState(b.logits.expand(shape).clone(), b.hmm, offset=b.offset)
new_beliefs.append(new_b)
self.beliefs = new_beliefs
def grow(self, possible_perms=None, hmm=None):
"""Expands the tree by two levels.
Assumes that the leaves have offset=True. Then, we expand the leaves by
transitioning the belief states at the leaves, and then again by Bayesian
updates.
:param possible_perms: The allowable permutations.
:param hmm: The HMM to compute likelihoods with.
:return: An expanded tree, in the form of a list of belief states.
"""
if possible_perms is None:
possible_perms = self.possible_perms
if hmm is None:
hmm = self.hmm
b = self.beliefs[-1].transition(possible_perms, hmm=hmm, new_dim=True)
self.beliefs.append(b)
b = self.beliefs[-1].bayes_update(hmm.observation_dist.enumerate_support(expand=False).squeeze(-1), hmm=hmm, new_dim=True)
self.beliefs.append(b)
def perm_idxs_from_log_cost(self, log_cost_func, return_log_costs=False, terminal_log_cost=None, is_cost_func=True):
r"""Computes :math:`\mathbb{E}_{Y_k^n|y^{k-1}}[c(y^{k-1},Y_k^n)]` and the
corresponding permutation indices that minimize this expectation.
Given a tree of belief states, computes the expected cost of the tree.
This computation is performed by first evaluating the cost function at the
leaves of the tree, then propagating the cost up the tree.
To compute the cost at an internal node whose children are labeled by data,
we take the expectation over the children's costs, using the belief state
to compute said expectation. To compute the cost at an internal node whose
children are labeled by permutations, we take the minimum over the
children's costs. This is a direct computation of the expected cost using
the `Bellman equation`_.
We then return both the permutation indices and, if ``return_costs`` is
True, the expected cost.
The computation is done in log space, so the cost function must be in log
space as well.
.. _`Bellman equation`: https://en.wikipedia.org/wiki/Bellman_equation
:param log_cost_func: The cost function to compute the expected cost of.
Must be in log space, and must take a single argument, which is a
tensor of shape ``tree_shape + (n_states, n_states)``, returning a
tensor of shape ``tree_shape``. The last two dimensions of the input
correspond to the initial and final states of the HMM.
:param bool return_log_costs: Whether to return the expected cost as well.
:param terminal_log_cost: A tensor of terminal costs to start the calculation
with. Defaults to ``log_cost_func(self.tree[-1].logits)``
:return: A list of permutation indices, and, if ``return_costs`` is True,
the expected cost.
"""
if terminal_log_cost is None:
terminal_log_cost = log_cost_func(self.beliefs[-1].logits)
costs = [terminal_log_cost]
perm_idxs = []
for b in reversed(self.beliefs[:-1]):
if b.offset:
# yksk = b.joint_yksk(b.hmm.enumerate_support(expand=False).squeeze(-1), new_dim=True)
yksk = b.joint_yksks0(b.hmm.enumerate_support(expand=False).squeeze(-1), new_dim=True).logsumexp(-2)
yk = yksk.logsumexp(-1)
# Compute the expectation of the cost function
c = costs[-1] + yk
c = c.logsumexp(-2)
costs.append(c)
else:
# Gets the optimal permutation index.
if is_cost_func:
c, perm_idx = costs[-1].min(-2)
else:
c, perm_idx = costs[-1].max(-2)
costs.append(c)
perm_idxs.append(perm_idx)
costs = costs[::-1]
perm_idxs = perm_idxs[::-1]
perm_tree = PermIdxTree(perm_idxs)
if return_log_costs:
return perm_tree, costs
return perm_tree
def prune_tree(self, idx):
"""Prunes a tree according to the index.
:param idx: The index corresponding to the data or permutations.
"""
idx = idx.unsqueeze(-1).unsqueeze(-2)
new_tree = []
for b in self.beliefs[1:]:
idxb = torch.broadcast_tensors(idx, b.logits)[0]
new_b = HMMBeliefState(b.logits.gather(0, idxb)[0], b.hmm, b.offset)
new_tree.append(new_b)
self.beliefs = new_tree
class PermIdxTree(object):
def __init__(self, idx_list):
self.perm_idxs = idx_list
def trim_list_tree(self):
r"""Trims the tree to remove permutation layers.
The tree is a list of tensors. The first tensor is the root of the tree, and
each subsequent tensor is a layer of the tree. The tree has a layered
structure, with a path to a node in the tree given by the indices
corresponding to the list :math:`(y_0, \sigma_0, y_1, \sigma_1, \ldots,)`,
where :math:`y_i` is the index of the observation at step :math:`i`, and
:math:`\sigma_i` is the index of the permutation at step :math:`i`.
Once the permutations have been selected, the tree should be trimmed to
remove the permutation layers, which is done by this function.
"""
new_tree = []
p = self.perm_idxs[0]
p = p.squeeze()
new_tree.append(p)
for p in self.perm_idxs[1:]:
p = p.squeeze()
for ntp in new_tree:
idx = torch.meshgrid([torch.arange(s) for s in ntp.shape])
p = p[idx + (ntp,)]
new_tree.append(p)
self.perm_idxs = new_tree
def expand_batch(self, data_len):
r"""Adds a dimension of length data_len to each tensor in the tree.
This function is used to expand the tree.
:param int data_len: Length of new dimension.
:return: Same list of tensors, but with a new dimension added to each
tensor.
"""
self.perm_idxs = [b.unsqueeze(-1).expand((-1,)*(len(b.shape)) + (data_len,)) for b in self.perm_idxs]
def prune_perm_tree(self, data_idx):
r"""Prunes the tree after observing data.
Given data indexed by data_idx, this function prunes the tree to remove
the branches that are not relevant to the data.
:param torch.Tensor data_idx: Index of data.
:return: Same list of tensors, but with the branches not relevant to the
data removed.
"""
# data_idx = data_idx.unsqueeze(-1)
new_tree = []
for pl in self.perm_idxs[1:]:
new_b = pl[data_idx, ..., torch.arange(data_idx.shape[-1])]
new_b = new_b.movedim(0, -1)
new_tree.append(new_b)
self.perm_idxs = new_tree
| 12,770 | 45.44 | 130 | py |
perm_hmm | perm_hmm-master/perm_hmm/policies/exhaustive.py | r"""Exhaustively searches the best permutations for all possible observations.
This is a policy that exhaustively searches the best permutations for all
possible observations. This method is very slow and should only be used for
testing purposes. The complexity of this method is O((n*p)**t), where n is the
number of possible observations, p is the number of possible permutations, and
t is the number of steps. The computation works by generating a tree of all
possible observations and permutations, and then using the `Bellman equation`_
to compute the best permutation for each observation.
.. _`Bellman equation`: https://en.wikipedia.org/wiki/Bellman_equation
"""
import os
from copy import deepcopy
import torch
from perm_hmm.util import id_and_transpositions, all_strings
from perm_hmm.policies.belief import HMMBeliefState
from perm_hmm.policies.belief_tree import HMMBeliefTree, PermIdxTree
from perm_hmm.models.hmms import random_phmm
from perm_hmm.policies.policy import PermPolicy
import perm_hmm.log_cost as cf
class ExhaustivePolicy(PermPolicy):
r"""Exhaustively searches the best permutations for all possible
observations.
This is a policy that exhaustively searches the best permutations for all
possible observations. This method is very slow and should only be used for
testing purposes. The complexity of this method is O((n*p)**t), where n is
the number of possible observations, p is the number of possible
permutations, and t is the number of steps.
Instances of this class have the following attributes:
data_to_idx:
A function that maps the data to indices. If unspecified in
initialization, defaults to::
def data_to_idx(x):
return x.long()
look_ahead:
:py:class:`~int` that indicates the number of steps to compute for.
root_belief:
The belief state that the tree of belief states is rooted at.
hmm:
:py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM` that is used to
compute belief states.
belief_tree:
List of :py:class:`~perm_hmm.policies.belief.HMMBeliefState`s. Each
element of the list corresponds to a layer of the tree.
Computed using the method
:py:meth:`~perm_hmm.policies.policy.PermPolicy.initialize_tree`.
perm_tree:
List of tensors. Gives the best permutation for each observation.
Computed using the method
:py:meth:`~perm_hmm.policies.exhaustive.ExhaustivePolicy.compute_perm_tree`.
remaining_tree:
When actually using the policy, this is the tree that is used to
generate permutations. This tree is reset when using the method
:py:meth:`~perm_hmm.policies.exhaustive.ExhaustivePolicy.reset`.
"""
def __init__(self, possible_perms, hmm, look_ahead, data_to_idx=None, initialize_tree=True, root_belief=None, terminal_offset=False, save_history=False):
r"""Initializes the policy.
:param possible_perms: The possible permutations to select from at each
step.
:param hmm: The HMM to compute likelihoods with.
:param data_to_idx: The mapping from data to indices.
:param save_history: Whether to save the calculation history for the
policy.
"""
super().__init__(possible_perms, save_history=save_history)
if data_to_idx is None:
def data_to_idx(x):
return x.long()
self.data_to_idx = data_to_idx
self.hmm = deepcopy(hmm)
self.look_ahead = look_ahead
self.root_belief = root_belief
if initialize_tree:
self.initialize_tree(self.look_ahead, root_belief=root_belief, terminal_offset=terminal_offset)
else:
self.belief_tree = None # type: HMMBeliefTree | None
self.perm_tree = None # type: None | PermIdxTree
self.remaining_tree = None # type: None | PermIdxTree
def initialize_tree(self, num_steps, root_belief=None, terminal_offset=False):
r"""Computes the full tree of beliefs.
This method computes the full tree of beliefs, which is used to select
the best permutations. This method is very expensive, using memory
O(n*(n*p)**t), where n is the number of possible observations, p is the
number of possible permutations, and t is the number of steps.
:param int num_steps: Number of steps to compute for.
:param torch.Tensor root_belief: The initial belief state. Defaults to
the initial distribution of the HMM.
:param bool terminal_offset: Indicates whether the leaves of the tree
should have offset=True, i.e. should be indexed by observations.
:return: None
"""
self.belief_tree = HMMBeliefTree(self.hmm, self.possible_perms, num_steps, root_belief=root_belief, terminal_offset=terminal_offset)
def compute_perm_tree(self, log_cost=None, return_log_costs=False, delete_belief_tree=True, terminal_log_cost=None, is_cost_func=True):
r"""After computing the full tree of beliefs using :py:meth:`~perm_hmm.policies.exhaustive.ExhaustivePolicy.initialize_tree`,
this method is used to compute the best permutations for each
observation.
:param log_cost: A function for the cost of a terminal belief state.
Defaults to the log of the min entropy of the initial state.
:param bool return_log_costs: Indicates whether to return the optimal costs
of the permutations.
:param bool delete_belief_tree: Indicates whether to delete the belief
tree after computing the permutations.
:param terminal_log_cost: The log cost attached to the terminal belief
states. If unspecified, will compute the log cost using the function
log_cost.
:return: If return_log_costs is true, returns the log costs of the
optimal permutations at all nodes of the tree. Otherwise, returns
None.
:raises: ValueError if the belief tree has not been computed.
"""
if self.belief_tree is None:
raise Exception('Must compute belief tree first.')
if log_cost is None:
log_cost = cf.min_entropy
is_cost_func = False
r = self.belief_tree.perm_idxs_from_log_cost(log_cost, return_log_costs=return_log_costs, terminal_log_cost=terminal_log_cost, is_cost_func=is_cost_func)
if return_log_costs:
self.perm_tree, log_costs = r
else:
self.perm_tree = r
self.perm_tree.trim_list_tree()
if delete_belief_tree:
del self.belief_tree
self.belief_tree = None
if return_log_costs:
return log_costs
def reset(self, save_history=False):
r"""Resets the policy.
Because computing the full tree of beliefs is expensive, this method
only resets the tree ``remaining_tree``, which acts as a cache.
:param bool save_history: Indicates whether to save the calculation
history for the policy.
:return: None
"""
super().reset(save_history=save_history)
self.remaining_tree = None
def calculate_perm(self, data):
r"""Generates the best permutation for the given data.
This method is called after using the
:py:meth:`~perm_hmm.policies.exhaustive.ExhaustivePolicy.initialize_tree`
and
:py:meth:`~perm_hmm.policies.exhaustive.ExhaustivePolicy.compute_perm_tree`
methods.
HACK: The last perm to be returned will be some arbitrary permutation.
This shouldn't matter, as the last permutation acts after the last data.
:param data: The data observed.
:return: The best permutation to apply having seen that data.
"""
data_len = data.shape[0]
if self.remaining_tree is None:
if self.perm_tree is None:
raise ValueError("Must compute perm tree first. Call "
"compute_perm_tree.")
self.remaining_tree = deepcopy(self.perm_tree)
self.remaining_tree.expand_batch(data_len)
# HACK: Last perm to be returned acts after the last data. Just return
# Perm index 0 as default.
if len(self.remaining_tree.perm_idxs) == 0:
perm_idx = torch.zeros(data_len, dtype=torch.long)
else:
data_idx = self.data_to_idx(data)
perm_idx = self.remaining_tree.perm_idxs[0][data_idx, torch.arange(data_len)]
perm = self.possible_perms[perm_idx]
if len(self.remaining_tree.perm_idxs) != 0:
self.remaining_tree.prune_perm_tree(data_idx)
return perm, {}
def subtree_cost(logits, hmm, possible_perms, num_steps, filename=None):
policy = ExhaustivePolicy(possible_perms, hmm, num_steps, root_belief=HMMBeliefState(logits, hmm, offset=True))
cost = policy.compute_perm_tree(return_log_costs=True, delete_belief_tree=False)
d = {b'log_values': cost, b'beliefs': policy.belief_tree, b'perms': policy.perm_tree.perm_idxs}
if filename is not None:
with open(filename, 'wb') as f:
torch.save(d, f)
return cost[0]
def split_path(path):
return [x for i, x in enumerate(path) if i % 2 == 0], [x for i, x in enumerate(path) if i % 2 == 1]
def name_from_path(path):
obs, perm = split_path(path)
return 'obs' + '_'.join(map(str, obs)) + '_perm_' + '_'.join(map(str, perm)) + '.pt'
def _all_cost_helper(leaf_logits, hmm, possible_perms, subtree_steps, path_to_root, directory=None, save=False):
if len(leaf_logits.shape) == 3:
if save:
filename = name_from_path(path_to_root)
if directory is not None:
filename = os.path.join(directory, name_from_path(path_to_root))
else:
filename = None
return subtree_cost(leaf_logits, hmm, possible_perms, subtree_steps, filename=filename)
else:
return torch.stack([_all_cost_helper(l, hmm, possible_perms, subtree_steps, path_to_root + [i], directory, save=save) for i, l in enumerate(leaf_logits)])
def all_subtree_costs(belief_leaves, hmm, possible_perms, subtree_steps, directory=None, save=False):
costs = _all_cost_helper(belief_leaves.logits, hmm, possible_perms, subtree_steps, [], directory=directory, save=save)
return costs
class SplitExhaustivePolicy(ExhaustivePolicy):
def __init__(self, possible_perms, hmm, look_ahead, split=None, data_to_idx=None, root_belief=None, save_history=False):
if split is None:
split = look_ahead - look_ahead // 2
super().__init__(
possible_perms,
hmm,
split,
data_to_idx=data_to_idx,
initialize_tree=True,
root_belief=root_belief,
terminal_offset=True,
save_history=save_history,
)
self.total_look_ahead = look_ahead
self.initial_cost = None
def make_initial_cost(self, root_directory=None, save=False):
self.initial_cost = all_subtree_costs(self.belief_tree.beliefs[-1], self.hmm, self.possible_perms, self.total_look_ahead - self.look_ahead + 1, directory=root_directory, save=save)
def compute_perm_tree(self, log_cost=None, return_log_costs=True,
delete_belief_tree=False, terminal_log_cost=None,
is_cost_func=False):
if terminal_log_cost is None:
if self.initial_cost is None:
raise ValueError("Please call make_initial_cost first.")
terminal_log_cost = self.initial_cost
return super().compute_perm_tree(
return_log_costs=return_log_costs,
terminal_log_cost=terminal_log_cost,
delete_belief_tree=delete_belief_tree,
is_cost_func=is_cost_func,
)
def main():
nstates = 3
nsteps = 4
hmm = random_phmm(nstates)
possible_perms = id_and_transpositions(nstates)
policy = ExhaustivePolicy(possible_perms, hmm, nsteps)
v = policy.compute_perm_tree(return_log_costs=True)
all_data = all_strings(nsteps)
p = policy.get_perms(all_data)
print(p)
print(v[0])
if __name__ == '__main__':
main()
| 12,317 | 42.373239 | 188 | py |
perm_hmm | perm_hmm-master/perm_hmm/policies/belief.py | r"""Computes belief states of HMMs with permutations.
This module contains the :py:class:`~perm_hmm.policies.belief.HMMBeliefState`
class, which computes belief states of HMMs with permutations in a tree-like
manner.
This module also contains the
:py:class:`~perm_hmm.policies.belief.BeliefStatePolicy` class, which is an
abstract class that shows how to generically use the HMMBeliefState for
computing permutations.
"""
from copy import deepcopy
import torch
from perm_hmm.util import ZERO
from perm_hmm.policies.policy import PermPolicy
class HMMBeliefState(object):
r"""Stores the current belief state of the HMM, and provides update methods.
The HMMBeliefState has an attribute ``.logits`` that is a tensor of shape
(n_outcomes, n_permutations, n_outcomes, ...) + (n_batch, n_states,
n_states). The first part of the shape is the tree, while the second part
is the belief state attached to each leaf. The path from the root to a leaf
is indexed by the sequence :math:`(y_0, \sigma_0, y_1, \sigma_1, ...)`,
where the :math:`y_i` are the outcomes at the :math:`i`-th step, and the
:math:`\sigma_i` are the permutations at the :math:`i`-th step. Because of
the layered structure of this tree, the leaves can be indexed with either an
outcome or a permutation. To account for this possibility, the BeliefPolicy
class comes with a flag called ``.offset`` that is true if and only if the
leaves of the tree are labelled by outcomes. The reason for the name offset
is because if the leaves of the tree are outcomes, then the belief states
are :math:`\mathbb{P}(s_0, s_{k+1}|y^k)`, while otherwise it is
:math:`\mathbb{P}(s_0, s_k|y^k)`, so that the "current" state step index is
offset from the "current" data step index.
Instances of this class have the following attributes:
``logits``:
:py:class:`~torch.Tensor` that stores the log probabilities
:math:`\mathbb{P}(s_0, s_k|y^k)` if `offset` is `False`,
or :math:`\mathbb{P}(s_0, s_k|y^{k-1})` when it is `True`.
Dimension -1 is the :math:`s_k` dimension, -2 is the
:math:`s_0` dimension, and -3 is the :math:`y^k` dimension.
``offset``:
A boolean indicating whether this belief state has the :math:`y` step
index offset from the :math:`s` step index. This is necessary because
the updating of the belief state takes place in two steps: We update the
belief state based on new data, and we update according to a chosen
permutation that induces a particular transition matrix. The Bayes' rule
update is only possible when `offset` is True, and updates the belief
state from :math:`\mathbb{P}(s_0, s_k|y^{k-1})` to
:math:`\mathbb{P}(s_0, s_k|y^k)`, while the transition update is only
possible when `offset` is False, and updates the belief state from
:math:`\mathbb{P}(s_0, s_k|y^k)` to
:math:`\mathbb{P}(s_0, s_{k+1}|y^k)`.
``hmm``:
A :py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM` that is used
to update the belief state.
"""
def __init__(self, logits, hmm, offset=True):
r"""Initializes the object.
Stores the distribution
:math:`\mathbb{P}(s_0, s_k|y^{k-1})` if `offset=True`,
and :math:`\mathbb{P}(s_0, s_k|y^k)` if `offset=False`.
Dimension -1 is the :math:`s_k` dimension, -2 is the
:math:`s_0` dimension, and -3 is the :math:`y^k` dimension.
We opt to put :math:`y^k` in the -3 dimension rather than the 0
dimension because it often needs to be updated.
:param torch.Tensor logits: A tensor of shape (..., n_states, n_states).
:param DiscreteHMM hmm: The HMM to use for updating the belief state.
:param bool offset: Whether the leaves of the tree are labelled by
outcomes or permutations.
"""
self.logits = logits # type: torch.Tensor
# Offset is true if state is :math:`\mathbb{P}(s_0, s_k|y^{k-1})`,
# and false if it is :math:`\mathbb{P}(s_0, s_k|y^k)`.
self.offset = offset
self.hmm = hmm
@classmethod
def from_hmm(cls, hmm):
r"""A factory method that constructs an HMMBeliefState from an HMM.
Outputs the belief state
:math:`\mathbb{P}(s_0, s_0'|y^{-1}) = \delta_{s_0, s_0'}`, where
the data :math:`y^{-1}` is the empty tuple.
:param perm_hmm.models.hmms.PermutedDiscreteHMM hmm: A DiscreteHMM.
:return: An HMMBeliefState with offset = True
"""
logits = hmm.initial_logits.clone().detach()
val = torch.full((logits.shape[0], logits.shape[0]), ZERO, dtype=logits.dtype).log()
val[torch.arange(logits.shape[0]), torch.arange(logits.shape[0])] = logits
val -= val.logsumexp(-2, keepdim=True).logsumexp(-1, keepdim=True)
offset = True
return cls(val, hmm, offset)
@classmethod
def from_skipfirsthmm(cls, skipfirsthmm, trivial_obs=None):
r"""A factory method that constructs an HMMBeliefState from a
:py:class:`~perm_hmm.models.hmms.SkipFirstDiscreteHMM`.
The distinction with
:py:meth:`~perm_hmm.policies.belief.from_hmm`
is that, because of the encoding of the SkipFirstDiscreteHMM as having
the first outcome being a dummy outcome, we need to perform an extra
transition before seeing any data.
:param perm_hmm.models.hmms.SkipFirstDiscreteHMM skipfirsthmm:
:param torch.Tensor trivial_obs: The observation to use for the first
bayes update.
:return: :math:`\mathbb{P}(s_0, s_1|y^{-1}) = A_{s_0, s_1}`, where
A is the transition matrix of the SkipFirstDiscreteHMM.
"""
b = HMMBeliefState.from_hmm(skipfirsthmm)
if trivial_obs is None:
trivial_obs = torch.zeros((1,) + skipfirsthmm.observation_dist.event_shape[:1])
if not trivial_obs.shape:
trivial_obs = trivial_obs.unsqueeze(-1)
b = b.bayes_update(trivial_obs)
trivial_perm = torch.arange(skipfirsthmm.initial_logits.shape[-1])
b = b.transition(trivial_perm)
return b
@classmethod
def from_expandedhmm(cls, expandedhmm, trivial_obs=None,
initial_states_to_keep=None):
if initial_states_to_keep is None:
initial_states_to_keep = ~torch.isclose(
expandedhmm.initial_logits.exp(), torch.tensor(0, dtype=float), atol=1e-7)
b = cls.from_skipfirsthmm(expandedhmm, trivial_obs=trivial_obs)
b.logits = b.logits[..., initial_states_to_keep, :]
return b
def joint_yksks0(self, obs, observation_dist=None, new_dim=False):
r"""Computes :math:`\mathbb{P}(s_0, s_k, y_k|y^{k-1})`
This method is used to compute the joint probability of the
initial state, the current state, and the current observation.
:param torch.Tensor obs: The observed data.
:param observation_dist: Defaults to self.hmm.observation_dist
:param bool new_dim: Indicates whether to add a new dimension to
the output. If specified, the output will add a new dimension of
length equal to that of obs, in position -4.
:return: A tensor containing the joint distribution, with dimensions
-1: s_k, -2: s_0, -3: y_k if `new_dim=False`, and batch otherwise,
-4: y_k if `new_dim=True`.
"""
if not self.offset:
raise ValueError("Cannot compute joint distribution if offset is False.")
if observation_dist is None:
observation_dist = self.hmm.observation_dist
lls = observation_dist.log_prob(obs.unsqueeze(-1))
# Make space for initial state
lls = lls.unsqueeze(-2)
v = self.logits
if new_dim:
v = v.unsqueeze(-3)
v = v + lls
if new_dim:
if len(v.shape) == 3:
v = v.unsqueeze(-4)
return v.transpose(-3, -4)
return v
def joint_yksk(self, obs, observation_dist=None, new_dim=False):
r"""Computes :math:`\mathbb{P}(s_k, y_k|y^{k-1})`
This method is used to compute the joint probability of the current
state and the current observation. This method is necessary because
just using the
:py:meth:`~perm_hmm.policies.belief.HMMBeliefState.joint_yksks0` method
does unnecessary work if we only want
:math:`\mathbb{P}(y_k, s_k|y^{k-1})`.
:param obs: The observed data.
:param observation_dist: The observation distribution to update the
belief state with. Defaults to ``self.hmm.observation_dist``
:param bool new_dim: Indicates whether to add a new dimension to
the output. If specified, the output will add a new dimension of
length equal to that of obs, in position -3.
:return: The joint current state and observation distribution, with
dimensions -1: s_k, -2: y_k if `new_dim=False`, and batch otherwise,
-3: y_k if `new_dim=True`.
"""
if not self.offset:
raise ValueError("Cannot compute joint distribution if offset is False.")
if observation_dist is None:
observation_dist = self.hmm.observation_dist
lls = observation_dist.log_prob(obs.unsqueeze(-1))
# Marginalize over initial state
v = self.logits.logsumexp(-2)
if new_dim:
v = v.unsqueeze(-2)
v = v + lls
if new_dim:
if len(v.shape) == 2:
v = v.unsqueeze(-3)
return v.transpose(-2, -3)
return v
def bayes_update(self, obs, hmm=None, new_dim=False):
r"""Starting from :math:`\mathbb{P}(s_0, s_k|y^{k-1})`, we update the
belief state to :math:`\mathbb{P}(s_0, s_k|y^k)` using Bayes' rule.
This method is variously used to expand the leaves of the tree, or to
update the tree. This method is a constructor-style method, meaning
it returns a new object. We make new objects because sometimes it is
useful to have both the original and the updated belief state.
:param obs: The observed data.
:param hmm: The hmm containing the distribution to update with.
Defaults to self.hmm
:param bool new_dim: Indicates whether to add a new dimension to
the output. If specified, the output will add a new dimension of
length equal to that of obs, in position -4.
:return: A new :py:class:`~perm_hmm.policies.belief.HMMBeliefState`
object with .offset=False, and .logits = the updated belief state.
.logits has dimensions -1: s_k, -2: s_0, -3: batch, and -4: new
dimension corresponding to obs if new_dim=True.
"""
if hmm is None:
hmm = self.hmm
v = self.joint_yksks0(obs, observation_dist=hmm.observation_dist, new_dim=new_dim)
v -= v.logsumexp(-1, keepdim=True).logsumexp(-2, keepdim=True)
return self.__class__(v, hmm, offset=False)
def transition(self, perm, hmm=None, new_dim=False):
r"""Starting from :math:`\mathbb{P}(s_0, s_k|y^k)`, we update the
belief state to :math:`\mathbb{P}(s_0, s_{k+1}|y^k)` using
the transition matrix of the input hmm.
This method is variously used to expand the leaves of the tree, or to
update the tree. This method is a constructor-style method, meaning
it returns a new object. We make new objects because sometimes it is
useful to have both the original and the updated belief state.
:param perm: The observed data.
:param hmm: The hmm containing the log transition matrix to use.
Defaults to self.hmm
:param bool new_dim: Indicates whether to add a new dimension to
the output. If specified, the output will add a new dimension of
length equal to that of perm, in position -4.
:return: A new :py:class:`~perm_hmm.policies.belief.HMMBeliefState`
with .offset=True, and .logits = the updated belief state. The
logits have dimensions -1: s_k, -2: s_0, -3: batch, and -4: new
dimension corresponding to perm if new_dim=True.
"""
if self.offset:
raise ValueError("Cannot transition belief state if offset is True.")
# Make space for initial state
if hmm is None:
hmm = self.hmm
transition_logits = hmm.transition_logits[perm].unsqueeze(-3)
# Unsqueeze -1 for state :math:`s_{k+1}`,
# unsqueeze -4 for perm choice.
# Logsumexp -2 to marginalize :math:`s_k`.
v = (self.logits.unsqueeze(-1).unsqueeze(-4) + transition_logits).logsumexp(-2)
if new_dim:
return self.__class__(v.transpose(-3, -4), hmm, offset=True)
return self.__class__(v.squeeze(-3), hmm, offset=True)
class BeliefStatePolicy(PermPolicy):
r"""Abstract PermPolicy class for selecting a permutation based on the
most up-to-date belief state.
Generically, one wants to make a policy based on the most updated belief
state. This class takes care of all the various updating necessary. One can
subclass this class and write a method calculate_perm that calculates the
permutation based on the beliefs :math:`\mathbb{P}(s_0, s_k|y^{k})`.
In addition to the attributes of the base class, instances of this class
have the following attributes:
``hmm``:
A :py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM` that is used to
update the belief state.
``belief_state``:
A :py:class:`~perm_hmm.policies.belief.HMMBeliefState` that represents
the current belief.
"""
def __init__(self, possible_perms, hmm, save_history=False):
r"""Initializes the policy.
.. seealso:: :py:meth:`~perm_hmm.policies.policy.PermPolicy.__init__`
:param possible_perms: The possible permutations.
:param hmm: The HMM used to compute the outcome probabilities.
:param save_history: Whether to save the computation history the
next time that the policy is called. This history is saved in the
attribute .calc_history.
"""
super().__init__(possible_perms, save_history=save_history)
self.hmm = deepcopy(hmm)
self.belief_state = HMMBeliefState.from_hmm(hmm)
def reset(self, save_history=False):
r"""Resets the policy, and in particular resets the belief state.
.. seealso:: :py:meth:`~perm_hmm.policies.policy.PermPolicy.reset`
:param bool save_history: Whether to save the computation history
the next time that the policy is used.
:return: None
"""
super().reset(save_history=save_history)
self.belief_state = HMMBeliefState.from_hmm(self.hmm)
def calculate_perm_from_belief(self, return_dict=False):
r"""The method that calculates the permutation based on the most
up-to-date belief state.
This method should be overwritten by subclasses.
For an example implementation, see the method from the MinTreePolicy.
.. seealso:: :py:meth:`~perm_hmm.policies.min_tree.MinTreePolicy.calculate_perm`
:param bool return_dict: Whether to return a dictionary of the
computation history.
:return: The permutation, and if return_dict=True, a dictionary of the
computation history.
"""
raise NotImplementedError
def bayes_update(self, obs):
r"""Updates the belief state using Bayes' rule.
Uses the HMM to update the belief state.
.. seealso:: :py:meth:`~perm_hmm.policies.belief.HMMBeliefState.bayes_update`
:param obs: The observation to update the belief state with.
:return: None
"""
self.belief_state = self.belief_state.bayes_update(obs)
def transition(self, perm):
r"""Updates the belief state using the transition probabilities, and the
selected permutation.
Uses the HMM and the selected permutation to update the belief state.
.. seealso:: :py:meth:`~perm_hmm.policies.belief.HMMBeliefState.transition`
:param perm: The permutation used.
:return: None
"""
self.belief_state = self.belief_state.transition(perm.unsqueeze(-2))
def calculate_perm(self, obs, event_dims=0):
r"""Generates a permutation based on the most up-to-date belief state.
This method first updates the belief state using the observation, then
calculates the permutation based on the new belief state, using the
method
:py:meth:`~perm_hmm.policies.belief.BeliefStatePolicy.calculate_perm_from_belief`.
After the permutation is calculated, the belief state is updated using
the permutation, via the method
:py:meth:`~perm_hmm.policies.belief.BeliefStatePolicy.transition`.
Then, the permutation is returned, along with the computation history.
:param obs: The observation to update the belief state with.
:param event_dims: The number of event dimensions. Used to distinguish
batch dimensions from the event dimensions.
:return: The permutation and the computation history.
"""
self.bayes_update(obs)
perm, diction = self.calculate_perm_from_belief(return_dict=True)
self.transition(perm)
diction[b'belief'] = self.belief_state.logits.clone().detach()
return perm, diction
def _calculate_beliefs(self, data, perms):
r"""Given a set of data and a set of perms, calculates the belief states
for the sequence.
This method is useful to calculate the belief states for a sequence of
data and permutations.
:param data: The data to calculate the belief states for.
:param perms: The permutations that follow the data. Must have the same
batch shape as data.
:raises: ValueError if the batch shapes of data and perms are not the
same.
:raises: ValueError if the data is not of a shape that is compatible
with the HMM.
:return: The belief states.
"""
shape = perms.shape[:-1]
if not data.shape[:len(shape)] == shape:
raise ValueError("Data and permutations must have same batch shape, but got {} and {}".format(data.shape[:len(shape)], shape))
try:
_ = self.hmm.log_prob(data)
except (ValueError, RuntimeError) as e:
raise ValueError("Data does not have a compatible shape") from e
sel = FixedPolicy(self.possible_perms, self.hmm, perms, save_history=True)
sel.get_perms(data)
return sel.calc_history[b"belief"]
class FixedPolicy(BeliefStatePolicy):
r"""A policy that always returns the same sequence of permutations.
Used to compute the belief states for a sequence of data and permutations.
In addition to the attributes of the base class, this class has the
attribute ``perms``, which is the permutations that will be returned, and
the attribute ``step``, which is the number of permutations that have been
returned for the current sequence. This is reset to 0 in the reset method.
In addition to the attributes of the base class, instances of this class
have the following attributes:
``step``:
An :py:class:`~int` that indiccates how many steps have passed.
``perms``:
A :py:class:`~torch.Tensor` that is the fixed set of permutations to be
used.
"""
def __init__(self, possible_perms, hmm, perms, save_history=False):
r"""Initializes the policy.
Needs the ``perms`` argument, that is the permutations that will be
returned, independent of the data.
:param torch.Tensor possible_perms: The allowed permutations.
:param perm_hmm.models.hmms.PermutedDiscreteHMM hmm:
The HMM used to calculate the belief states.
:param torch.Tensor perms: The fixed sequence of permutations to be returned.
:param bool save_history: Whether to save the computation history.
"""
super().__init__(possible_perms, hmm, save_history=save_history)
self.step = 0
self.perms = perms
def reset(self, save_history=False):
r"""Resets the policy.
Resets the step to 0, and the computation history.
:param bool save_history: Whether to save the computation history.
:return: None
"""
super().reset(save_history=save_history)
self.step = 0
def calculate_perm_from_belief(self, return_dict=False):
r"""Trivial implementation of the base class method.
Returns the permutation corresponding to the current step, and an empty
dictionary for the computation history if ``return_dict`` is True.
:param bool return_dict: Whether to return the computation history.
:return: The permutation and, if ``return_dict`` is True, the
computation history.
"""
p = self.perms[..., self.step, :]
self.step += 1
if return_dict:
return p, {}
return p
| 21,372 | 43.807128 | 138 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.