Spaces:
Runtime error
Runtime error
| # Copyright (c) Meta Platforms, Inc. and affiliates. | |
| # | |
| # This source code is licensed under the Apache License, Version 2.0 | |
| # found in the LICENSE file in the root directory of this source tree. | |
| # import logging | |
| from torchvision import transforms | |
| import torch | |
| import cv2 | |
| from PIL import Image | |
| import numpy as np | |
| from my_transforms import ( | |
| GaussianBlur, | |
| make_normalize_transform, | |
| make_normalize_transform_clip, | |
| ) | |
| def add_gaussian_noise(tensor, mean=0.0, std=0.1): | |
| noise = torch.randn(tensor.size()).cuda() * std + mean | |
| return tensor + noise | |
| class DataAugmentationCLIP(object): | |
| def __init__( | |
| self, | |
| global_crops_scale, | |
| local_crops_scale, | |
| local_crops_number, | |
| global_crops_size=224, | |
| local_crops_size=96, | |
| ): | |
| self.source_trans = transforms.Compose([ | |
| # transforms.RandomCrop(224), | |
| # transforms.CenterCrop(224), | |
| transforms.ToTensor(), | |
| make_normalize_transform_clip(), | |
| ]) | |
| # self.crop = transforms.Compose([ | |
| # transforms.CenterCrop(224), | |
| # ]) | |
| self.crop = transforms.Compose([ | |
| transforms.Resize(224), # 将短边缩放到 224,长边会按比例缩放 | |
| transforms.RandomCrop(224), # 然后裁剪到 224x224 | |
| ]) | |
| self.centercrop = transforms.Compose([ | |
| transforms.CenterCrop(224), | |
| ]) | |
| self.randomcrop = transforms.Compose([ | |
| transforms.RandomCrop(224), | |
| ]) | |
| self.local_crops_number = local_crops_number | |
| def __call__(self, image): | |
| output = {} | |
| output["source"] = [] | |
| if np.array(image).shape[0]<224 or np.array(image).shape[1]<224: | |
| crops_all = [ | |
| self.centercrop(image) for _ in range(self.local_crops_number) | |
| ] | |
| else: | |
| crops_all = [ | |
| self.centercrop(image) for _ in range(self.local_crops_number) | |
| ] | |
| for crops_image in crops_all: | |
| output["source"].append(self.source_trans(crops_image)) #单独使用好一些 | |
| output["offsets"] = () | |
| return output | |
| class DataAugmentationDINO(object): | |
| def __init__( | |
| self, | |
| global_crops_scale, | |
| local_crops_scale, | |
| local_crops_number, | |
| global_crops_size=224, | |
| local_crops_size=96, | |
| ): | |
| self.source_trans = transforms.Compose([ | |
| # transforms.RandomCrop(224), | |
| # transforms.CenterCrop(224), | |
| transforms.ToTensor(), | |
| make_normalize_transform(), | |
| ]) | |
| # self.crop = transforms.Compose([ | |
| # transforms.CenterCrop(224), | |
| # ]) | |
| self.crop = transforms.Compose([ | |
| transforms.Resize(224), # 将短边缩放到 224,长边会按比例缩放 | |
| transforms.CenterCrop(224), # 然后裁剪到 224x224 | |
| ]) | |
| self.centercrop = transforms.Compose([ | |
| transforms.CenterCrop(224), | |
| ]) | |
| self.local_crops_number = local_crops_number | |
| def __call__(self, image): | |
| output = {} | |
| output["source"] = [] | |
| if np.array(image).shape[0]<224 or np.array(image).shape[1]<224: | |
| crops_all = [ | |
| self.centercrop(image) for _ in range(self.local_crops_number) | |
| ] | |
| else: | |
| crops_all = [ | |
| self.centercrop(image) for _ in range(self.local_crops_number) | |
| ] | |
| for crops_image in crops_all: | |
| output["source"].append(self.source_trans(crops_image)) #单独使用好一些 | |
| output["offsets"] = () | |
| return output | |
| class DataAugmentationResNet_test(object): | |
| def __init__( | |
| self, | |
| global_crops_scale, | |
| local_crops_scale, | |
| local_crops_number, | |
| global_crops_size=224, | |
| local_crops_size=96, | |
| ): | |
| self.source_trans = transforms.Compose([ | |
| # transforms.RandomCrop(224), | |
| # transforms.CenterCrop(224), | |
| transforms.ToTensor(), | |
| make_normalize_transform(), | |
| ]) | |
| # self.crop = transforms.Compose([ | |
| # transforms.CenterCrop(224), | |
| # ]) | |
| self.crop = transforms.Compose([ | |
| transforms.Resize(224), # 将短边缩放到 224,长边会按比例缩放 | |
| transforms.CenterCrop(224), # 然后裁剪到 224x224 | |
| ]) | |
| self.centercrop = transforms.Compose([ | |
| transforms.CenterCrop(224), | |
| ]) | |
| self.local_crops_number = local_crops_number | |
| def __call__(self, image): | |
| output = {} | |
| output["source"] = [] | |
| if np.array(image).shape[0]<224 or np.array(image).shape[1]<224: | |
| crops_all = [ | |
| self.centercrop(image) for _ in range(self.local_crops_number) | |
| ] | |
| else: | |
| crops_all = [ | |
| self.centercrop(image) for _ in range(self.local_crops_number) | |
| ] | |
| for crops_image in crops_all: | |
| output["source"].append(self.source_trans(crops_image)) #单独使用好一些 | |
| output["offsets"] = () | |
| return output | |
| class DataAugmentationCLIP_gen(object): | |
| def __init__( | |
| self, | |
| global_crops_scale, | |
| local_crops_scale, | |
| local_crops_number, | |
| global_crops_size=224, | |
| local_crops_size=96, | |
| ): | |
| self.source_trans = transforms.Compose([ | |
| # transforms.RandomCrop(224), | |
| # transforms.CenterCrop(224), | |
| transforms.ToTensor(), | |
| make_normalize_transform_clip(), | |
| ]) | |
| # self.crop = transforms.Compose([ | |
| # transforms.RandomCrop(224), | |
| # ]) | |
| self.crop = transforms.Compose([ | |
| transforms.Resize(224), # 将短边缩放到 224,长边会按比例缩放 | |
| transforms.CenterCrop(224), # 然后裁剪到 224x224 | |
| ]) | |
| self.centercrop = transforms.Compose([ | |
| transforms.CenterCrop(224), | |
| ]) | |
| self.local_crops_number = local_crops_number | |
| def __call__(self, image): | |
| output = {} | |
| output["source"] = [] | |
| # if np.array(image).shape[0]<224 or np.array(image).shape[1]<224: | |
| # crops_all = [ | |
| # self.crop(self.centercrop(image)) for _ in range(self.local_crops_number) | |
| # ] | |
| # else: | |
| crops_all = [ | |
| self.crop(image) for _ in range(self.local_crops_number) | |
| ] | |
| for crops_image in crops_all: | |
| output["source"].append(self.source_trans(crops_image)) #单独使用好一些 | |
| output["offsets"] = () | |
| return output |