Upload modeling_vae.py
Browse files- modeling_vae.py +2 -2
modeling_vae.py
CHANGED
|
@@ -29,7 +29,7 @@ class VAEModel(PreTrainedModel):
|
|
| 29 |
outputDim=self.hidden_channels * (2 ** i)
|
| 30 |
self.encoder.append(nn.Conv2d(inputDim, outputDim, kernel_size=4, stride=2, padding=1))# -> (hidden, H/2, W/2)
|
| 31 |
self.encoder.append(nn.BatchNorm2d(outputDim))
|
| 32 |
-
self.encoder.append(
|
| 33 |
self.encoderD = nn.Sequential(*self.encoder)
|
| 34 |
|
| 35 |
with torch.no_grad():
|
|
@@ -49,7 +49,7 @@ class VAEModel(PreTrainedModel):
|
|
| 49 |
outputDim=self.hidden_channels * i
|
| 50 |
self.decoder.append(nn.ConvTranspose2d(inputDim, outputDim, kernel_size=4, stride=2, padding=1))
|
| 51 |
self.decoder.append(nn.BatchNorm2d(outputDim))
|
| 52 |
-
self.decoder.append(
|
| 53 |
inputDim=outputDim
|
| 54 |
H_before_last = self.enc_H * (2 ** (self.encoder_layers - 1))
|
| 55 |
W_before_last = self.enc_W * (2 ** (self.encoder_layers - 1))
|
|
|
|
| 29 |
outputDim=self.hidden_channels * (2 ** i)
|
| 30 |
self.encoder.append(nn.Conv2d(inputDim, outputDim, kernel_size=4, stride=2, padding=1))# -> (hidden, H/2, W/2)
|
| 31 |
self.encoder.append(nn.BatchNorm2d(outputDim))
|
| 32 |
+
self.encoder.append(nn.ReLU())
|
| 33 |
self.encoderD = nn.Sequential(*self.encoder)
|
| 34 |
|
| 35 |
with torch.no_grad():
|
|
|
|
| 49 |
outputDim=self.hidden_channels * i
|
| 50 |
self.decoder.append(nn.ConvTranspose2d(inputDim, outputDim, kernel_size=4, stride=2, padding=1))
|
| 51 |
self.decoder.append(nn.BatchNorm2d(outputDim))
|
| 52 |
+
self.decoder.append(nn.ReLU())
|
| 53 |
inputDim=outputDim
|
| 54 |
H_before_last = self.enc_H * (2 ** (self.encoder_layers - 1))
|
| 55 |
W_before_last = self.enc_W * (2 ** (self.encoder_layers - 1))
|