Spaces:
Running
Running
Update archs/model.py
Browse files- archs/model.py +2 -15
archs/model.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
-
import torch.nn.functional as F
|
| 4 |
|
| 5 |
class AttentionBlock(nn.Module):
|
| 6 |
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1):
|
|
@@ -30,11 +29,10 @@ class AttentionBlock(nn.Module):
|
|
| 30 |
return x
|
| 31 |
|
| 32 |
|
|
|
|
| 33 |
class UNet(nn.Module):
|
| 34 |
def __init__(self):
|
| 35 |
super(UNet, self).__init__()
|
| 36 |
-
|
| 37 |
-
self.padder_size = 32
|
| 38 |
|
| 39 |
self.encoder = nn.Sequential(
|
| 40 |
nn.Conv2d(3, 32, kernel_size=3, padding=1),
|
|
@@ -70,10 +68,6 @@ class UNet(nn.Module):
|
|
| 70 |
)
|
| 71 |
|
| 72 |
def forward(self, x):
|
| 73 |
-
|
| 74 |
-
_, _, H, W = x.shape
|
| 75 |
-
x = self.check_image_size(x)
|
| 76 |
-
|
| 77 |
skip_connections = []
|
| 78 |
|
| 79 |
for layer in self.encoder:
|
|
@@ -99,12 +93,5 @@ class UNet(nn.Module):
|
|
| 99 |
else:
|
| 100 |
x = layer(x)
|
| 101 |
|
| 102 |
-
return x[:, :, :H, :W]
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
def check_image_size(self, x):
|
| 106 |
-
_, _, h, w = x.size()
|
| 107 |
-
mod_pad_h = (self.padder_size - h % self.padder_size) % self.padder_size
|
| 108 |
-
mod_pad_w = (self.padder_size - w % self.padder_size) % self.padder_size
|
| 109 |
-
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), value = 0)
|
| 110 |
return x
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
|
|
|
| 3 |
|
| 4 |
class AttentionBlock(nn.Module):
|
| 5 |
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1):
|
|
|
|
| 29 |
return x
|
| 30 |
|
| 31 |
|
| 32 |
+
|
| 33 |
class UNet(nn.Module):
|
| 34 |
def __init__(self):
|
| 35 |
super(UNet, self).__init__()
|
|
|
|
|
|
|
| 36 |
|
| 37 |
self.encoder = nn.Sequential(
|
| 38 |
nn.Conv2d(3, 32, kernel_size=3, padding=1),
|
|
|
|
| 68 |
)
|
| 69 |
|
| 70 |
def forward(self, x):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
skip_connections = []
|
| 72 |
|
| 73 |
for layer in self.encoder:
|
|
|
|
| 93 |
else:
|
| 94 |
x = layer(x)
|
| 95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
return x
|
| 97 |
+
|