import os import numpy as np import tensorflow as tf import gradio as gr import random from PIL import Image from tensorflow.keras.preprocessing.image import load_img,img_to_array from tensorflow.keras.optimizers import SGD from tensorflow.keras.optimizers.schedules import ExponentialDecay # change in the actual written file from modules.nst_loss_functions import * from modules.nst_models import * from modules.preprocessing_utils import * # get the weights content_weight,style_weight = get_weights() # get the content and style layer lists content_layer,style_layers = get_layers_lists() # instance the pretrained model pretrained_vgg_model,feature_extractor = get_pretrained_vgg_model_fe() # training function @tf.function def compute_loss_and_grads(generated_image, base_image, style_image, row_cols): with tf.GradientTape() as tape: loss = loss_function(generated_image = generated_image, base_image = base_image, style_image = style_image, content_layer = content_layer, style_layers = style_layers, feature_extractor = feature_extractor, weights= (content_weight,style_weight), rows_cols = row_cols) grads = tape.gradient(loss, generated_image) return loss, grads # generate image def generate(base_image,style_image,epochs,progress=gr.Progress()): # instance images base_pil_image = base_image style_pil_image = style_image generated_pil_image = base_pil_image.copy() # determine the base image's dimentions width, height = base_pil_image.size img_nrows = 400 img_ncols = int(width * img_nrows / height) # instance the optimizer optimizer = SGD( ExponentialDecay( initial_learning_rate=100.0, decay_steps=100, decay_rate=0.96 ) ) # preprocess the images base_image = preprocess_image(base_pil_image,(img_nrows,img_ncols)) style_image = preprocess_image(style_pil_image,(img_nrows,img_ncols)) generated_image = tf.Variable(preprocess_image(generated_pil_image,(img_nrows,img_ncols))) for i in progress.tqdm(range(int(epochs))): loss, grads = compute_loss_and_grads( generated_image, base_image, style_image,(img_nrows,img_ncols) ) optimizer.apply_gradients([(grads, generated_image)]) generated_image = generated_image.numpy() generated_image = deprocess_image(generated_image,(img_nrows,img_ncols)) return generated_image title = "Neural Style Transfer Demo" description = "This is my implementation of the neural style transfer algorithm using Tensorflow2.To use this app, select a real photo as a content image and an art piece as style image from an URL or from your PC, set the number of epochs (it is recommended to leave the default value), and run the app. THIS MAY TAKE SOME TIME (AROUND 5 MINUTES WITH THE DEFAULT VALUE), PLEASE BE PATIENT (╯°□°)╯. If you want better and faster results, you can try running the code on Google Colab with a GPU. You can run 4000 epochs in 8 minutes ± 2. You can modify this [notebook](https://colab.research.google.com/drive/1KWqQmz_nM3tckiRm5ES_Y5YHl97aILmo?usp=sharing) for that purpose. It is the same implementation that is used in the app." article = "The NST algorithm is an algorithm that allows you to replicate an image A with similar features to the ones present in an image B. In a nutshell, this is done by using a pretrained CNN to perform gradient descent on the weighted cost of a style and content cost function, which correspond to the frobenius norm across the features’ cross covariance across different layers and the simple norm respectively. The result of the loss is applied to a random generated image to get the hybrid." example_list = list() # change in the app examples_path = "examples" content_examples_path = os.path.join(examples_path,"content") style_examples_path = os.path.join(examples_path,"style") content_examples = [[str(content_examples_path) + "/" + example] for example in os.listdir(content_examples_path)] style_examples = [[str(style_examples_path) + "/" + example] for example in os.listdir(style_examples_path)] img_input_1 = gr.Image(label = "Content Image",type = "pil",value = random.choice(content_examples)[0]) img_input_2 = gr.Image(label = "Style Image",type = "pil",value = random.choice(style_examples)[0]) demo = gr.Interface( fn = generate, inputs = [img_input_1,img_input_2,gr.Number(value = 25,label = "Number of epochs",)], outputs = [gr.Image(type = "pil")], title = title, description = description, article = article ) demo.queue().launch()