| | import cv2 |
| | import numpy as np |
| | import gradio as gr |
| | from PIL import Image |
| | from io import BytesIO |
| | import base64 |
| | |
| | import numpy as np |
| | import matplotlib.pyplot as plt |
| |
|
| | import os |
| | import pathlib |
| | import time |
| | import datetime |
| | import glob |
| | import random |
| |
|
| | from PIL import Image |
| |
|
| | |
| | import tensorflow as tf |
| | from tensorflow import keras |
| |
|
| | def fix_image(upload_img): |
| | image = Image.fromarray(upload_img) |
| | image = image.convert('L') |
| | image = image.resize((256, 256)) |
| |
|
| | generator = Generator(G_input_dim) |
| | generator.load_weights('checkpoints/cp-10.h5') |
| | fixed = saved_images(generator, upload_img) |
| | fixed = np.reshape(fixed, [256, 256, 1]) |
| |
|
| | image = np.asarray(image) |
| | image = np.reshape(image, [256, 256, 1]) |
| |
|
| | diff = np.uint8(np.abs(fixed - image)) |
| | heatmap = cv2.applyColorMap(diff , cv2.COLORMAP_JET) |
| | heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB) |
| | heatmap = cv2.resize(heatmap, (256, 256)) |
| | heatmap = Image.fromarray(heatmap) |
| |
|
| | heatmap = heatmap.convert('RGB') |
| | heatmap = np.asarray(heatmap) |
| | return heatmap |
| |
|
| | def saved_images(model, input): |
| | input = Image.fromarray(input) |
| | input = input.convert('L') |
| | input = input.resize((256, 256)) |
| | input = np.asarray(input) |
| | |
| | input = np.reshape(input, [1, 256, 256, 1]) |
| | |
| | input = tf.cast(tf.convert_to_tensor(np.asarray(input)), dtype=tf.float32) / 255. |
| |
|
| | |
| | batch_input = [] |
| | batch_input += [input] |
| | batch_input = tf.concat(batch_input, axis=0) |
| |
|
| | prediction = model(batch_input, training=False) |
| | if CHANNEL == 1: |
| | predict_image = prediction[0].numpy().flatten().reshape(256, 256) |
| | |
| | else: |
| | predict_image = prediction[0].numpy().flatten().reshape(256, 256, 3) |
| | |
| | return predict_image |
| |
|
| |
|
| | |
| | def upsample(filters, size, dropout=0.5, max_pool=True, batch_norm=True): |
| | initializer = tf.random_normal_initializer(0., 0.02) |
| |
|
| | result = tf.keras.Sequential() |
| | result.add( |
| | |
| | tf.keras.layers.Conv2DTranspose(filters, size, strides=2, |
| | padding='same', |
| | kernel_initializer=initializer, |
| | use_bias=False) |
| | ) |
| | |
| | |
| | if max_pool: |
| | result.add(tf.keras.layers.MaxPool2D(pool_size=(1, 1), strides=None, padding='same')) |
| |
|
| | |
| | if batch_norm: |
| | result.add(tf.keras.layers.BatchNormalization()) |
| |
|
| | |
| | if dropout != None: |
| | result.add(tf.keras.layers.Dropout(dropout)) |
| | result.add(tf.keras.layers.ReLU()) |
| |
|
| | return result |
| |
|
| | |
| | def downsample(filters, kernel_size, strides=2, dropout=0.5, max_pool=True, batch_norm=True): |
| | initializer = tf.random_normal_initializer(0., 0.02) |
| |
|
| | result = tf.keras.Sequential() |
| | result.add( |
| | |
| | tf.keras.layers.Conv2D(filters, kernel_size, strides=strides, padding='same', |
| | kernel_initializer=initializer, use_bias=False)) |
| | |
| | if max_pool: |
| | result.add(tf.keras.layers.MaxPool2D(pool_size=(1, 1), strides=None, padding='same')) |
| |
|
| | |
| | if batch_norm: |
| | result.add(tf.keras.layers.BatchNormalization()) |
| | |
| | |
| | if dropout != None: |
| | result.add(tf.keras.layers.Dropout(dropout)) |
| |
|
| | result.add(tf.keras.layers.LeakyReLU()) |
| | return result |
| |
|
| | |
| | def Generator(image_shape): |
| | initializer = tf.random_normal_initializer(0., 0.02) |
| | |
| | input_image = keras.layers.Input(shape=image_shape, name='input_image') |
| | x = input_image |
| |
|
| | |
| | enc1 = downsample(n_E1, kernel_size_E1, stride_E1, DropOut_E1, MaxPooling_E1, BatchNorm_E1)(x) |
| | enc2 = downsample(n_E2, kernel_size_E2 ,stride_E2, DropOut_E2, MaxPooling_E2, BatchNorm_E2)(enc1) |
| | enc3 = downsample(n_E3, kernel_size_E3, stride_E3, DropOut_E3, MaxPooling_E3, BatchNorm_E3)(enc2) |
| | enc4 = downsample(n_E4, kernel_size_E4, stride_E4, DropOut_E4, MaxPooling_E4, BatchNorm_E4)(enc3) |
| | enc5 = downsample(n_E5, kernel_size_E5 ,stride_E5, DropOut_E5, MaxPooling_E5, BatchNorm_E5)(enc4) |
| | enc6 = downsample(n_E6, kernel_size_E6 ,stride_E6, DropOut_E6, MaxPooling_E6, BatchNorm_E6)(enc5) |
| | enc7 = downsample(n_E7, kernel_size_E7 ,stride_E7, DropOut_E7, MaxPooling_E7, BatchNorm_E7)(enc6) |
| | enc8 = downsample(n_E8, kernel_size_E8, stride_E8, DropOut_E8, MaxPooling_E8, BatchNorm_E8)(enc7) |
| |
|
| | |
| | dec1 = upsample(n_E7, kernel_size_E7, DropOut_E7, MaxPooling_E7, BatchNorm_E7) |
| | dec2 = upsample(n_E6, kernel_size_E6, DropOut_E6, MaxPooling_E6, BatchNorm_E6) |
| | dec3 = upsample(n_E5, kernel_size_E5, DropOut_E5, MaxPooling_E5, BatchNorm_E5) |
| | dec4 = upsample(n_E4, kernel_size_E4, DropOut_E4, MaxPooling_E4, BatchNorm_E4) |
| | dec5 = upsample(n_E3, kernel_size_E3, DropOut_E3, MaxPooling_E3, BatchNorm_E3) |
| | dec6 = upsample(n_E2, kernel_size_E2, DropOut_E2, MaxPooling_E2, BatchNorm_E2) |
| | dec7 = upsample(n_E1, kernel_size_E1, DropOut_E1, MaxPooling_E1, BatchNorm_E1) |
| | |
| |
|
| | |
| | |
| |
|
| | |
| | enc_value_list = [enc7, enc6, enc5, enc4, enc3, enc2, enc1] |
| |
|
| | |
| | dec_value_list = [dec1, dec2, dec3, dec4, dec5, dec6, dec7] |
| |
|
| | |
| | bipass_list = [Bipass_7, Bipass_6, Bipass_5, Bipass_4, Bipass_3, Bipass_2, Bipass_1] |
| |
|
| | |
| | x = enc8 |
| |
|
| | |
| | for dec, enc, bipass in zip(dec_value_list, enc_value_list, bipass_list): |
| | x = dec(x) |
| | |
| | if bipass: |
| | x = tf.keras.layers.Concatenate()([x, enc]) |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | OUTPUT_CHANNELS = CHANNEL |
| |
|
| | |
| | initializer = tf.random_normal_initializer(0., 0.02) |
| |
|
| | |
| | last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4, |
| | strides=2, |
| | padding='same', |
| | kernel_initializer=initializer, |
| | activation='tanh') |
| | x = last(x) |
| |
|
| | return tf.keras.Model(inputs=input_image, outputs=x) |
| |
|
| | if __name__ == "__main__": |
| | CHANNEL = 1 |
| | |
| | G_input_dim = (256, 256, CHANNEL) |
| | EPOCH = 20 |
| |
|
| | |
| | n_E1 = 32 |
| | m_E1 = 128 |
| | stride_E1 = 2 |
| | kernel_size_E1 = 4 |
| | MaxPooling_E1 = True |
| | ActivationFunc_E1 = "Leaky_ReLu" |
| | BatchNorm_E1 = True |
| | DropOut_E1 = 0.5 |
| | Bipass_1 = True |
| |
|
| | |
| | n_E2 = 64 |
| | m_E2 = 64 |
| | stride_E2 = 2 |
| | kernel_size_E2 = 4 |
| | MaxPooling_E2 = True |
| | ActivationFunc_E2 = "Leaky_ReLu" |
| | alfa = 0.2 |
| | BatchNorm_E2 = True |
| | DropOut_E2 = 0.5 |
| | Bipass_2 = True |
| |
|
| | |
| | n_E3 = 128 |
| | m_E3 = 128 |
| | stride_E3 = 2 |
| | kernel_size_E3 = 4 |
| | MaxPooling_E3 = True |
| | ActivationFunc_E3 = "Leaky_ReLu" |
| | alfa = 0.2 |
| | BatchNorm_E3 = True |
| | DropOut_E3 = 0.5 |
| | Bipass_3 = True |
| |
|
| | |
| | n_E4 = 256 |
| | m_E4 = 256 |
| | stride_E4 = 2 |
| | kernel_size_E4 = 4 |
| | MaxPooling_E4 = True |
| | ActivationFunc_E4 = "Leaky_ReLu" |
| | alfa = 0.2 |
| | BatchNorm_E4 = True |
| | DropOut_E4 = 0.5 |
| | Bipass_4 = True |
| |
|
| | |
| | n_E5 = 512 |
| | m_E5 = 512 |
| | stride_E5 = 2 |
| | kernel_size_E5 = 4 |
| | MaxPooling_E5 = True |
| | ActivationFunc_E5 = "Leaky_ReLu" |
| | alfa = 0.2 |
| | BatchNorm_E5 = True |
| | DropOut_E5 = 0.5 |
| | Bipass_5 = True |
| |
|
| | |
| | n_E6 = 512 |
| | m_E6 = 512 |
| | stride_E6 = 2 |
| | kernel_size_E6 = 4 |
| | MaxPooling_E6 = True |
| | ActivationFunc_E6 = "Leaky_ReLu" |
| | alfa = 0.2 |
| | BatchNorm_E6 = True |
| | DropOut_E6 = 0.5 |
| | Bipass_6 = True |
| |
|
| | |
| | n_E7 = 512 |
| | m_E7 = 512 |
| | stride_E7 = 2 |
| | kernel_size_E7 = 4 |
| | MaxPooling_E7 = True |
| | ActivationFunc_E7 = "Leaky_ReLu" |
| | alfa = 0.2 |
| | BatchNorm_E7 = True |
| | DropOut_E7 = 0.5 |
| | Bipass_7 = True |
| |
|
| |
|
| | |
| | n_E8 = 512 |
| | m_E8 = 512 |
| | stride_E8 = 2 |
| | kernel_size_E8 = 4 |
| | MaxPooling_E8 = True |
| | ActivationFunc_E8 = "Leaky_ReLu" |
| | alfa = 0.2 |
| | BatchNorm_E8 = True |
| | DropOut_E8 = 0.5 |
| |
|
| | |
| | expantion = True |
| | |
| | |
| | input_size = (256, 256) |
| | output_size = (256, 256) |
| | demo = gr.Interface(fix_image, inputs="image", outputs="image",input_size=input_size, output_size=output_size) |
| | demo.launch() |
| |
|