Spaces:
Runtime error
Runtime error
Commit
·
6deedc6
1
Parent(s):
3927aba
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,26 +12,35 @@ from torchvision.utils import save_image
|
|
| 12 |
|
| 13 |
|
| 14 |
class Generator(nn.Module):
|
| 15 |
-
def __init__(self,
|
| 16 |
super(Generator, self).__init__()
|
| 17 |
self.model = nn.Sequential(
|
| 18 |
-
|
| 19 |
-
nn.
|
|
|
|
| 20 |
nn.ReLU(True),
|
| 21 |
-
|
| 22 |
-
nn.
|
|
|
|
| 23 |
nn.ReLU(True),
|
| 24 |
-
|
| 25 |
-
nn.
|
|
|
|
| 26 |
nn.ReLU(True),
|
| 27 |
-
|
| 28 |
-
nn.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
)
|
| 30 |
|
| 31 |
-
def forward(self,
|
| 32 |
-
|
| 33 |
-
return output
|
| 34 |
|
|
|
|
| 35 |
|
| 36 |
model = Generator()
|
| 37 |
weights_path = hf_hub_download('huggingnft/dooggies', 'pytorch_model.bin')
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
class Generator(nn.Module):
|
| 15 |
+
def __init__(self, num_channels=4, latent_dim=100, hidden_size=64):
|
| 16 |
super(Generator, self).__init__()
|
| 17 |
self.model = nn.Sequential(
|
| 18 |
+
# input is Z, going into a convolution
|
| 19 |
+
nn.ConvTranspose2d(latent_dim, hidden_size * 8, 4, 1, 0, bias=False),
|
| 20 |
+
nn.BatchNorm2d(hidden_size * 8),
|
| 21 |
nn.ReLU(True),
|
| 22 |
+
# state size. (hidden_size*8) x 4 x 4
|
| 23 |
+
nn.ConvTranspose2d(hidden_size * 8, hidden_size * 4, 4, 2, 1, bias=False),
|
| 24 |
+
nn.BatchNorm2d(hidden_size * 4),
|
| 25 |
nn.ReLU(True),
|
| 26 |
+
# state size. (hidden_size*4) x 8 x 8
|
| 27 |
+
nn.ConvTranspose2d(hidden_size * 4, hidden_size * 2, 4, 2, 1, bias=False),
|
| 28 |
+
nn.BatchNorm2d(hidden_size * 2),
|
| 29 |
nn.ReLU(True),
|
| 30 |
+
# state size. (hidden_size*2) x 16 x 16
|
| 31 |
+
nn.ConvTranspose2d(hidden_size * 2, hidden_size, 4, 2, 1, bias=False),
|
| 32 |
+
nn.BatchNorm2d(hidden_size),
|
| 33 |
+
nn.ReLU(True),
|
| 34 |
+
# state size. (hidden_size) x 32 x 32
|
| 35 |
+
nn.ConvTranspose2d(hidden_size, num_channels, 4, 2, 1, bias=False),
|
| 36 |
+
nn.Tanh()
|
| 37 |
+
# state size. (num_channels) x 64 x 64
|
| 38 |
)
|
| 39 |
|
| 40 |
+
def forward(self, noise):
|
| 41 |
+
pixel_values = self.model(noise)
|
|
|
|
| 42 |
|
| 43 |
+
return pixel_values
|
| 44 |
|
| 45 |
model = Generator()
|
| 46 |
weights_path = hf_hub_download('huggingnft/dooggies', 'pytorch_model.bin')
|