| import cv2 |
| import numpy as np |
| from scipy.fftpack import dctn, idctn |
| from torch_dct import dct_2d, idct_2d |
|
|
|
|
| import pdb |
| import torch |
| from torch import nn |
| from PIL import Image |
| from torchvision import transforms |
|
|
|
|
| class DCT(nn.Module): |
| def __init__(self) -> None: |
| super().__init__() |
| self.conv = nn.Conv2d(1, 1, kernel_size=16, stride=16, padding=0, bias=False) |
| |
|
|
| def forward(self, images): |
| """ 输入维度: [B, C, H, W] """ |
| |
| images_gray = torch.mean(images, dim=1) |
|
|
| |
| dct_coeff = dct_2d(images_gray, norm='ortho') |
|
|
| |
| B, H, W = dct_coeff.shape |
| mask = torch.zeros((H, W), device=images.device) |
| mask[H//2:, W//2:] = 1 |
| mask = mask.unsqueeze(0) |
|
|
| |
| dct_highpass = dct_coeff * mask |
|
|
| |
| img_highpass = idct_2d(dct_highpass, norm='ortho') |
| img_highpass = torch.abs(img_highpass) |
|
|
| |
| |
| |
|
|
| |
| coeffs = self.conv(img_highpass.unsqueeze(1)).flatten(1) |
|
|
| return coeffs |
|
|
|
|
| def forward_single_image(self, image): |
| image = np.array(image) |
| |
| dct_coeff = dctn(image.astype(float), norm='ortho') |
|
|
| |
| rows, cols = image.shape |
| mask = np.zeros((rows, cols)) |
| mask[rows//2:, cols//2:] = 1 |
|
|
| |
| high_freq_dct = dct_coeff * mask |
|
|
| |
| masked_image = idctn(high_freq_dct, norm='ortho') |
| masked_image = np.clip(masked_image, 0, 255).astype(np.uint8) * 10 |
|
|
| masked_image = Image.fromarray(masked_image) |
| masked_image.save("./dct_test.jpg") |
|
|
| return masked_image |
|
|
|
|
|
|
| if __name__ == "__main__": |
| img_path = "../../../ultraedit/UltraEdit/sequential_deepfake_dataset/0/50.jpg" |
| image = Image.open(img_path).convert('RGB') |
|
|
| trans = transforms.Compose([ |
| transforms.ColorJitter(brightness=[0.5, 1.3], contrast=[ |
| 0.8, 1.5], saturation=[0.2, 1.5]), |
| transforms.RandomHorizontalFlip(), |
| transforms.ToTensor(), |
| transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), |
| ]) |
|
|
| input_image = trans(image).unsqueeze(0) |
| print(input_image.shape) |
|
|
| |
| mask_generator = DCT() |
| output = mask_generator.forward(input_image) |