Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| # Import dependencies | |
| import tensorflow as tf | |
| from tensorflow import keras | |
| import numpy as np | |
| import pandas as pd | |
| import pathlib | |
| import PIL | |
| import PIL.Image | |
| import cv2 | |
| from tqdm import tqdm | |
| TF_ENABLE_ONEDNN_OPTS=0 | |
| class Sampling(tf.keras.layers.Layer): | |
| def call(self, inputs): | |
| mean, log_var = inputs | |
| return tf.random.normal(tf.shape(log_var)) * tf.exp(log_var / 2) + mean | |
| custom_objects = {'Sampling': Sampling} | |
| # | |
| ''' | |
| Pre-trained models - Instructions about training you can find in another file | |
| ''' | |
| variational_ae = keras.models.load_model('./regular_celeba_vae/autoencoder/vae_autoencoder.keras', custom_objects=custom_objects) | |
| variational_encoder = keras.models.load_model('./regular_celeba_vae/encoder/vae_encoder.keras', custom_objects=custom_objects) | |
| variational_decoder = keras.models.load_model('./regular_celeba_vae/decoder/vae_decoder.keras', custom_objects=custom_objects) | |
| df_atts_loaded = pd.read_csv('./data/attributes_encodings.csv', index_col=0) | |
| df_example_instance_loaded = pd.read_csv('./data/example_instance.csv', index_col=0) | |
| O = df_example_instance_loaded.values[0] | |
| v_att1 = tf.convert_to_tensor(df_atts_loaded.values[0]) | |
| v_att2 = tf.convert_to_tensor(df_atts_loaded.values[1]) | |
| v_att3 = tf.convert_to_tensor(df_atts_loaded.values[2]) | |
| v_att4 = tf.convert_to_tensor(df_atts_loaded.values[3]) | |
| v_att5 = tf.convert_to_tensor(df_atts_loaded.values[4]) | |
| v_att6 = tf.convert_to_tensor(df_atts_loaded.values[5]) | |
| v_att7 = tf.convert_to_tensor(df_atts_loaded.values[6]) | |
| # maximum_ = 25 | |
| # delta = 3.0 / maximum_ | |
| # def image_classifier(value_1, value_2): | |
| # return np.clip(((variational_decoder(tf.reshape((O + delta * value_1 * v_att1 + delta * value_2 * v_att2), (1, 64)))[0]) * 255), 0, 255).astype(int)[:, :, :] | |
| # input_value_d_1 = gr.Slider(minimum=0, maximum=25, step=1) | |
| # input_value_d_2 = gr.Slider(minimum=0, maximum=25, step=1) | |
| # demo = gr.Interface(fn=image_classifier, inputs=[input_value_d_1, input_value_d_2], outputs="image", live=True) | |
| maximum_ = 25 | |
| delta = 1.0 / maximum_ | |
| def image_classifier(value_1, value_2, value_3, value_4, value_5, value_6, value_7): | |
| return np.clip(((variational_decoder(tf.reshape((O + delta * value_1 * v_att1 + \ | |
| delta * 1.5 * value_2 * v_att2 + \ | |
| delta * 3.5 * value_3 * v_att3 + \ | |
| 3.2 * delta * value_4 * v_att4 + \ | |
| 4.0 * delta * value_5 * v_att5 + \ | |
| delta * value_6 * v_att6 + \ | |
| 3.0 * delta * value_7 * v_att7), (1, 64)))[0]) * 255), 0, 255).astype(int)[:, :, :] | |
| input_value_d_1 = gr.Slider(minimum=0, maximum=25, step=1, label="Bald") | |
| input_value_d_2 = gr.Slider(minimum=0, maximum=25, step=1, label="Smiling") | |
| input_value_d_3 = gr.Slider(minimum=0, maximum=25, step=1, label="Mustache") | |
| input_value_d_4 = gr.Slider(minimum=0, maximum=25, step=1, label="Eyeglasses") | |
| input_value_d_5 = gr.Slider(minimum=0, maximum=25, step=1, label="Goatee") | |
| input_value_d_6 = gr.Slider(minimum=0, maximum=25, step=1, label="Arched_Eyebrows") | |
| input_value_d_7 = gr.Slider(minimum=0, maximum=25, step=1, label="Bags_Under_Eyes") | |
| demo = gr.Interface(fn=image_classifier, inputs=[input_value_d_1, | |
| input_value_d_2, | |
| input_value_d_3, | |
| input_value_d_4, | |
| input_value_d_5, | |
| input_value_d_6, | |
| input_value_d_7], outputs="image", live=True) | |
| if __name__ == "__main__": | |
| demo.launch() |