SEED_balanced / FAITH /wavelet /wavelet.py
Mengieong's picture
Upload 253 files
02e9762 verified
import torch.nn as nn
import torch
import pdb
import os, torchvision
from PIL import Image
from torchvision import transforms as trans
def test3():
from pytorch_wavelets import DWTForward, DWTInverse # (or import DWT, IDWT)
#J为分解的层次数,wave表示使用的变换方法
xfm = DWTForward(J=1, mode='zero', wave='haar') # Accepts all wave types available to PyWavelets
# ifm = DWTInverse(mode='zero', wave='haar')
# img = Image.open('../../../ultraedit/UltraEdit/origin_dataset/CelebAMask-HQ/CelebA-HQ-img/37.jpg')
# img = Image.open('../../../ultraedit/UltraEdit/sequential_deepfake_dataset/37/50_10_21_62.jpg')
img = Image.open('../../../ultraedit/UltraEdit/sequential_deepfake_dataset/541/50_11_62_22.jpg')
transform = trans.Compose([
trans.ToTensor()
])
img = transform(img).unsqueeze(0)
print(img.shape)
Yl, Yh = xfm(img)
print(Yl.shape)
print(len(Yh))
# print(Yh[0].shape)
i = 0
# for i in range(len(Yh)):
if 1:
print(Yh[i].shape)
if i == len(Yh)-1:
h = torch.zeros([4,3,Yh[i].size(3),Yh[i].size(3)]).float()
h[0,:,:,:] = Yl
else:
h = torch.zeros([3,3,Yh[i].size(3),Yh[i].size(3)]).float()
for j in range(3):
if i == len(Yh)-1:
h[j+1,:,:,:] = Yh[i][:,:,j,:,:]
else:
h[j,:,:,:] = Yh[i][:,:,j,:,:]
# pdb.set_trace()
if i == len(Yh)-1:
img_grid = torchvision.utils.make_grid(h, 2) #一行2张图片
else:
img_grid = torchvision.utils.make_grid(h, 3)
s_img = Yh[0][:,:,2,:,:]
print(f"s_img.shape: {s_img.shape}")
# print(torch.max(s_img) + torch.min(s_img))
# s_img = s_img * 20
# print(torch.max(s_img) + torch.min(s_img))
s_img[s_img < torch.max(s_img) / 20] = 0
s_img = s_img[0][0] * 12
# print(f"s_img.shape: {s_img.shape}" )
torchvision.utils.save_image(s_img, "test_HH.jpg")
# img_grid = img_grid * 100
# img_grid[img_grid < 1] = 0
# torchvision.utils.save_image(img_grid, 'test.jpg')
# pdb.set_trace()
if __name__ == '__main__':
test3()