Mengieong's picture
Upload 253 files
02e9762 verified
import cv2
import numpy as np
from scipy.fftpack import dctn, idctn
from torch_dct import dct_2d, idct_2d
import pdb
import torch
from torch import nn
from PIL import Image
from torchvision import transforms
class DCT(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, kernel_size=16, stride=16, padding=0, bias=False)
# self.pool = nn.AdaptiveAvgPool2d((32, 32))
def forward(self, images):
""" 输入维度: [B, C, H, W] """
# 灰度化(保持GPU计算)
images_gray = torch.mean(images, dim=1) # [B, H, W]
# 二维DCT变换
dct_coeff = dct_2d(images_gray, norm='ortho') # [B, H, W]
# 创建高通滤波器掩膜(保留右下四分之一)
B, H, W = dct_coeff.shape
mask = torch.zeros((H, W), device=images.device)
mask[H//2:, W//2:] = 1 # 右下区域
mask = mask.unsqueeze(0) # [1, H, W] 用于广播
# 应用高通滤波
dct_highpass = dct_coeff * mask # 广播乘法 [B, H, W]
# 逆DCT变换
img_highpass = idct_2d(dct_highpass, norm='ortho') # [B, H, W]
img_highpass = torch.abs(img_highpass) # 确保数值稳定性
# # masked_image = Image.fromarray(img_highpass) # .size: [512, 512]
# masked_image = transforms.ToPILImage()(img_highpass[0]) # .size: [512, 512]
# masked_image.save("./dct_test.jpg")
# 通过卷积层(保持通道维度)
coeffs = self.conv(img_highpass.unsqueeze(1)).flatten(1) # [B, 1024]
return coeffs
def forward_single_image(self, image):
image = np.array(image)
# 执行二维DCT变换
dct_coeff = dctn(image.astype(float), norm='ortho')
# 创建高频掩码(保留右下四分之一)
rows, cols = image.shape
mask = np.zeros((rows, cols))
mask[rows//2:, cols//2:] = 1
# 提取高频分量
high_freq_dct = dct_coeff * mask
# 逆DCT变换恢复高频图像
masked_image = idctn(high_freq_dct, norm='ortho')
masked_image = np.clip(masked_image, 0, 255).astype(np.uint8) * 10
masked_image = Image.fromarray(masked_image) # .size: [512, 512]
masked_image.save("./dct_test.jpg")
return masked_image
if __name__ == "__main__":
img_path = "../../../ultraedit/UltraEdit/sequential_deepfake_dataset/0/50.jpg"
image = Image.open(img_path).convert('RGB')
trans = transforms.Compose([
transforms.ColorJitter(brightness=[0.5, 1.3], contrast=[
0.8, 1.5], saturation=[0.2, 1.5]), # 修改图像的亮度、对比度和饱和度
transforms.RandomHorizontalFlip(), # 按照概率p水平翻转图像
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
input_image = trans(image).unsqueeze(0)
print(input_image.shape)
# pdb.set_trace()
mask_generator = DCT()
output = mask_generator.forward(input_image)