| | import torch |
| | import torch.nn.functional as F |
| | from torch import nn |
| |
|
| | class Conv2d(nn.Module): |
| | def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, use_act = True, *args, **kwargs): |
| | super().__init__(*args, **kwargs) |
| | self.conv_block = nn.Sequential( |
| | nn.Conv2d(cin, cout, kernel_size, stride, padding), |
| | nn.BatchNorm2d(cout) |
| | ) |
| | self.act = nn.ReLU() |
| | self.residual = residual |
| | self.use_act = use_act |
| |
|
| | def forward(self, x): |
| | out = self.conv_block(x) |
| | if self.residual: |
| | out += x |
| | |
| | if self.use_act: |
| | return self.act(out) |
| | else: |
| | return out |
| |
|
| | class SimpleWrapperV2(nn.Module): |
| | def __init__(self) -> None: |
| | super().__init__() |
| | self.audio_encoder = nn.Sequential( |
| | Conv2d(1, 32, kernel_size=3, stride=1, padding=1), |
| | Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), |
| | Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), |
| |
|
| | Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1), |
| | Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), |
| | Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), |
| |
|
| | Conv2d(64, 128, kernel_size=3, stride=3, padding=1), |
| | Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), |
| | Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), |
| |
|
| | Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1), |
| | Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), |
| |
|
| | Conv2d(256, 512, kernel_size=3, stride=1, padding=0), |
| | Conv2d(512, 512, kernel_size=1, stride=1, padding=0), |
| | ) |
| |
|
| | |
| | |
| | ''' |
| | wav2lip_state_dict = torch.load('/apdcephfs_cq2/share_1290939/wenxuazhang/checkpoints/wav2lip.pth')['state_dict'] |
| | state_dict = self.audio_encoder.state_dict() |
| | |
| | for k,v in wav2lip_state_dict.items(): |
| | if 'audio_encoder' in k: |
| | print('init:', k) |
| | state_dict[k.replace('module.audio_encoder.', '')] = v |
| | self.audio_encoder.load_state_dict(state_dict) |
| | ''' |
| |
|
| | self.mapping1 = nn.Linear(512+64+1, 64) |
| | |
| | |
| | nn.init.constant_(self.mapping1.bias, 0.) |
| |
|
| | def forward(self, x, ref, ratio): |
| | x = self.audio_encoder(x).view(x.size(0), -1) |
| | ref_reshape = ref.reshape(x.size(0), -1) |
| | ratio = ratio.reshape(x.size(0), -1) |
| | |
| | y = self.mapping1(torch.cat([x, ref_reshape, ratio], dim=1)) |
| | out = y.reshape(ref.shape[0], ref.shape[1], -1) |
| | return out |
| |
|