File size: 2,384 Bytes
ed7df54
 
 
 
 
 
 
 
6f4b781
ed7df54
 
 
 
982c710
ed7df54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import torch  # for model
import numpy as np
import torch.nn as nn
import torch.optim as optim
from PIL import Image  #for importing images
import torchvision.models as models  #to load vgg 19 model
import torchvision.transforms as transforms
from tqdm import tqdm
import spaces

from dataTransform import load_image
from vggModel import VGGNet

@spaces.GPU(duration = 242)
def style_transfer(content_img, style_img, total_steps, alpha=1e5, beta=1e10, learning_rate=0.001):
    # Preprocess the input images

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('-'*30)
    print(f'Device Initialized: {device}')
    print('-'*30)
    content_img = load_image(content_img, device)
    style_img = load_image(style_img, device)
    generated_img = content_img.clone().requires_grad_(True)
    optimizer = optim.Adam([generated_img], lr = learning_rate)
    model = VGGNet().to(device).eval()

    # print(content_img.shape)
    # print(style_img.shape)
    # print(generated_img.shape)


    for step in tqdm(range(total_steps)):

        #first we send the 3 images from the vgg network

        generated_feats = model(generated_img)
        original_image_feats = model(content_img)
        style_feats = model(style_img)

        #defining the style loss

        style_loss = original_loss =  0


        for gen_feat, orig_image_feat, styl_feat in zip(generated_feats, original_image_feats, style_feats):  #looping over each feature

            # print(gen_feat.shape)
            # print(orig_image_feat.shape)
            # print(styl_feat.shape)

            batch, channel, height, width = gen_feat.shape
            original_loss += torch.mean((gen_feat - orig_image_feat)**2)

            # computing gram matrix for gen and style to compute style loss

            G = gen_feat.view(channel, height*width).mm(
                gen_feat.view(channel, height*width).t()
            )

            # correlation matrix

            A = styl_feat.view(channel, height*width).mm(
                styl_feat.view(channel, height*width).t()
            )

            style_loss += torch.mean((G-A)**2)

        total_loss = alpha*original_loss + beta*style_loss

        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

    if step == total_steps - 1:
    # Postprocess and return the final generated image
      return generated_img