| import torch.nn as nn |
| import torch |
| import pdb |
| import os, torchvision |
| from PIL import Image |
| from torchvision import transforms as trans |
|
|
| def test3(): |
| from pytorch_wavelets import DWTForward, DWTInverse |
| |
| xfm = DWTForward(J=1, mode='zero', wave='haar') |
| |
|
|
| |
| |
| img = Image.open('../../../ultraedit/UltraEdit/sequential_deepfake_dataset/541/50_11_62_22.jpg') |
| transform = trans.Compose([ |
| trans.ToTensor() |
| ]) |
| img = transform(img).unsqueeze(0) |
| print(img.shape) |
| Yl, Yh = xfm(img) |
| print(Yl.shape) |
| print(len(Yh)) |
| |
| i = 0 |
| |
| if 1: |
| print(Yh[i].shape) |
| if i == len(Yh)-1: |
| h = torch.zeros([4,3,Yh[i].size(3),Yh[i].size(3)]).float() |
| h[0,:,:,:] = Yl |
| else: |
| h = torch.zeros([3,3,Yh[i].size(3),Yh[i].size(3)]).float() |
| |
| for j in range(3): |
| if i == len(Yh)-1: |
| h[j+1,:,:,:] = Yh[i][:,:,j,:,:] |
| else: |
| h[j,:,:,:] = Yh[i][:,:,j,:,:] |
| |
| |
| if i == len(Yh)-1: |
| img_grid = torchvision.utils.make_grid(h, 2) |
| else: |
| img_grid = torchvision.utils.make_grid(h, 3) |
| |
| s_img = Yh[0][:,:,2,:,:] |
| print(f"s_img.shape: {s_img.shape}") |
| |
| |
| |
| |
| s_img[s_img < torch.max(s_img) / 20] = 0 |
|
|
| s_img = s_img[0][0] * 12 |
| |
| torchvision.utils.save_image(s_img, "test_HH.jpg") |
|
|
| |
| |
| |
| |
|
|
| if __name__ == '__main__': |
| test3() |