|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
import argparse |
|
|
import requests |
|
|
import numpy as np |
|
|
from tqdm import tqdm |
|
|
from pathlib import Path |
|
|
|
|
|
import torch |
|
|
from PIL import Image |
|
|
|
|
|
from src.models import deeplab |
|
|
from src.data.data_loader import CelebASegmentation |
|
|
from src.utils.deeplab_util import download_file |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument('--resolution', type=int, default=256, |
|
|
help='segmentation output size') |
|
|
parser.add_argument('--dataset_root', type=str, default="./images/images512x512", |
|
|
help='Path to images dir') |
|
|
parser.add_argument('--output_dir', type=str, default="./segmaps/segmaps512x512", |
|
|
help='Path to output segmap dir') |
|
|
|
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
resnet_file_spec = dict(file_url='https://drive.google.com/uc?id=1oRGgrI4KNdefbWVpw0rRkEP1gbJIRokM', file_path='deeplab_model/R-101-GN-WS.pth.tar', file_size=178260167, file_md5='aa48cc3d3ba3b7ac357c1489b169eb32') |
|
|
deeplab_file_spec = dict(file_url='https://drive.google.com/uc?id=1w2XjDywFr2NjuUWaLQDRktH7VwIfuNlY', file_path='deeplab_model/deeplab_model.pth', file_size=464446305, file_md5='8e8345b1b9d95e02780f9bed76cc0293') |
|
|
|
|
|
def main(): |
|
|
resolution = args.resolution |
|
|
assert torch.cuda.is_available() |
|
|
torch.backends.cudnn.benchmark = True |
|
|
model_fname = 'deeplab_model/deeplab_model.pth' |
|
|
|
|
|
assert os.path.isdir(args.dataset_root) |
|
|
|
|
|
dataset = CelebASegmentation(args.dataset_root, crop_size=513) |
|
|
|
|
|
if not os.path.isfile(resnet_file_spec['file_path']): |
|
|
print('Downloading backbone Resnet Model parameters') |
|
|
with requests.Session() as session: |
|
|
download_file(session, resnet_file_spec) |
|
|
|
|
|
print('Done!') |
|
|
|
|
|
model = getattr(deeplab, 'resnet101')( |
|
|
pretrained=True, |
|
|
num_classes=len(dataset.CLASSES), |
|
|
num_groups=32, |
|
|
weight_std=True, |
|
|
beta=False) |
|
|
|
|
|
model = model.cuda() |
|
|
model.eval() |
|
|
if not os.path.isfile(deeplab_file_spec['file_path']): |
|
|
print('Downloading DeeplabV3 Model parameters') |
|
|
with requests.Session() as session: |
|
|
download_file(session, deeplab_file_spec) |
|
|
|
|
|
print('Done!') |
|
|
|
|
|
checkpoint = torch.load(model_fname) |
|
|
state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items() if 'tracked' not in k} |
|
|
model.load_state_dict(state_dict) |
|
|
|
|
|
for i in tqdm(range(len(dataset))): |
|
|
inputs=dataset[i] |
|
|
inputs = inputs.cuda() |
|
|
outputs = model(inputs.unsqueeze(0)) |
|
|
_, pred = torch.max(outputs, 1) |
|
|
pred = pred.data.cpu().numpy().squeeze().astype(np.uint8) |
|
|
mask_pred = Image.fromarray(pred) |
|
|
|
|
|
mask_pred=mask_pred.resize((resolution,resolution), Image.NEAREST) |
|
|
|
|
|
if not os.path.exists(args.output_dir): |
|
|
os.makedirs(args.output_dir) |
|
|
|
|
|
savename = os.path.join(args.output_dir, f"{Path(dataset.images[i]).stem}.png") |
|
|
mask_pred.save(savename) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print('processed {0}/{1} images'.format(i + 1, len(dataset))) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|