| |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| class Gerador(nn.Module): |
| def __init__(self): |
| super(Gerador, self).__init__() |
|
|
| self.encoder = nn.Sequential( |
| nn.Conv2d(12, 64, 5, padding=2), |
| nn.ReLU(), |
|
|
| nn.Conv2d(64, 64, 3, padding=1), |
| nn.ReLU() |
| ) |
|
|
| |
| self.ruido_proj = nn.Conv2d(1, 64, kernel_size=1) |
|
|
| self.super = nn.Sequential( |
| nn.ConvTranspose2d(128, 128, kernel_size=7, stride=7), |
| nn.BatchNorm2d(128), |
| nn.LeakyReLU(0.2), |
|
|
| nn.Conv2d(128, 64, kernel_size=5, padding=2), |
| nn.BatchNorm2d(64), |
| nn.LeakyReLU(0.2), |
|
|
| nn.Conv2d(64, 3, kernel_size=3, padding=1), |
| nn.Tanh() |
| ) |
|
|
| def forward(self, x, return_embedding=False, only_embedding=False): |
| embedding = self.encoder(x) |
|
|
| if only_embedding: |
| return embedding |
| |
| |
| ruido = torch.randn(embedding.size(0), 1, embedding.size(2), embedding.size(3), device=embedding.device) |
| ruido_feat = self.ruido_proj(ruido) |
|
|
| |
| x = torch.cat([embedding, ruido_feat], dim=1) |
|
|
| x = self.super(x) |
|
|
| if return_embedding: |
| return (embedding, x) |
| else: |
| return x |
|
|