|
|
import numpy as np |
|
|
import torch |
|
|
from torch.utils.data import Dataset, DataLoader |
|
|
import torchvision |
|
|
import os |
|
|
from torchvision import transforms,models |
|
|
import torch.nn as nn |
|
|
from PIL import Image |
|
|
import clip |
|
|
import pdb |
|
|
import argparse |
|
|
import glob |
|
|
import h5py |
|
|
import warnings |
|
|
warnings.filterwarnings("ignore") |
|
|
|
|
|
|
|
|
class Image_Dataset(Dataset): |
|
|
def __init__(self, image_list, transform=None): |
|
|
|
|
|
self.image_list = image_list |
|
|
self.transform = transform |
|
|
|
|
|
def __len__(self): |
|
|
return len(self.image_list) |
|
|
|
|
|
def __getitem__(self, idx): |
|
|
if torch.is_tensor(idx): |
|
|
idx = idx.tolist() |
|
|
|
|
|
img_name = self.image_list[idx] |
|
|
|
|
|
image = Image.open(img_name).convert('RGB') |
|
|
|
|
|
if self.transform: |
|
|
sample = self.transform(image) |
|
|
|
|
|
return sample |
|
|
|
|
|
|
|
|
|
|
|
if __name__=='__main__': |
|
|
|
|
|
parser = argparse.ArgumentParser(description='Extract CLIP features') |
|
|
parser.add_argument('--model_name', default='ViT-B32', type=str, help='name of the model') |
|
|
parser.add_argument('--root_dir', type=str, help='name of root feature dir') |
|
|
parser.add_argument('--cuda_base', help="in form cuda:x") |
|
|
parser.add_argument('--h5_path', type=str, help="path to store the extracted features") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
image_list = sorted(filter(os.path.isfile, |
|
|
glob.glob(os.path.join(args.root_dir,'*.jpg')))) |
|
|
model, preprocess = clip.load(args.model_name) |
|
|
|
|
|
pdb.set_trace() |
|
|
|
|
|
data = Image_Dataset(image_list, transform=preprocess) |
|
|
dataloader = DataLoader(data, batch_size=64, drop_last = False, shuffle=False, num_workers=4) |
|
|
|
|
|
device = torch.device(args.cuda_base if torch.cuda.is_available() else 'cpu') |
|
|
model = model.to(device) |
|
|
|
|
|
features = [] |
|
|
shot_features = [] |
|
|
|
|
|
with torch.no_grad(): |
|
|
for i, image in enumerate(dataloader): |
|
|
|
|
|
image = image.to(device) |
|
|
output = model.encode_image(image).float() |
|
|
output /= output.norm(dim=-1, keepdim=True) |
|
|
output = output.detach().to('cpu').numpy() |
|
|
features.append(output) |
|
|
|
|
|
features = np.concatenate(features, axis=0) |
|
|
|
|
|
for idx in range(0, len(features), 5): |
|
|
if idx+5 < len(features): |
|
|
shot = np.average(features[idx:idx+5,:], axis=0) |
|
|
shot_features.append(shot) |
|
|
else: |
|
|
shot = np.average(features[idx:len(features),:], axis=0) |
|
|
shot_features.append(shot) |
|
|
|
|
|
shot_features = np.asarray(shot_features) |
|
|
|
|
|
|
|
|
h5f = h5py.File(args.h5_path, 'w') |
|
|
h5f.create_dataset('feature', data=shot_features) |
|
|
|
|
|
print("Feature Extraction Finished") |
|
|
|