code stringlengths 17 6.64M |
|---|
class Mask():
' Parent class for masks\n the output mask will be <mask_type>.mask\n channels: 1, 3 or 4:\n 1 - Returns a single channel mask\n 3 - Returns a 3 channel mask\n 4 - Returns the original image with the mask in the alpha channel '
def __init__(self, landmarks, face, channels=4):
self.landmarks = landmarks
self.face = face
self.channels = channels
mask = self.build_mask()
self.mask = self.merge_mask(mask)
def build_mask(self):
' Override to build the mask '
raise NotImplementedError
def merge_mask(self, mask):
' Return the mask in requested shape '
assert (self.channels in (1, 3, 4)), 'Channels should be 1, 3 or 4'
assert ((mask.shape[2] == 1) and (mask.ndim == 3)), 'Input mask be 3 dimensions with 1 channel'
if (self.channels == 3):
retval = np.tile(mask, 3)
elif (self.channels == 4):
retval = np.concatenate((self.face, mask), (- 1))
else:
retval = mask
return retval
|
class dfl_full(Mask):
' DFL facial mask '
def build_mask(self):
mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32)
nose_ridge = (self.landmarks[27:31], self.landmarks[33:34])
jaw = (self.landmarks[0:17], self.landmarks[48:68], self.landmarks[0:1], self.landmarks[8:9], self.landmarks[16:17])
eyes = (self.landmarks[17:27], self.landmarks[0:1], self.landmarks[27:28], self.landmarks[16:17], self.landmarks[33:34])
parts = [jaw, nose_ridge, eyes]
for item in parts:
merged = np.concatenate(item)
cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0)
return mask
|
class components(Mask):
' Component model mask '
def build_mask(self):
mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32)
r_jaw = (self.landmarks[0:9], self.landmarks[17:18])
l_jaw = (self.landmarks[8:17], self.landmarks[26:27])
r_cheek = (self.landmarks[17:20], self.landmarks[8:9])
l_cheek = (self.landmarks[24:27], self.landmarks[8:9])
nose_ridge = (self.landmarks[19:25], self.landmarks[8:9])
r_eye = (self.landmarks[17:22], self.landmarks[27:28], self.landmarks[31:36], self.landmarks[8:9])
l_eye = (self.landmarks[22:27], self.landmarks[27:28], self.landmarks[31:36], self.landmarks[8:9])
nose = (self.landmarks[27:31], self.landmarks[31:36])
parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose]
for item in parts:
merged = np.concatenate(item)
cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0)
return mask
|
class extended(Mask):
' Extended mask\n Based on components mask. Attempts to extend the eyebrow points up the forehead\n '
def build_mask(self):
mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32)
landmarks = self.landmarks.copy()
ml_pnt = ((landmarks[36] + landmarks[0]) // 2)
mr_pnt = ((landmarks[16] + landmarks[45]) // 2)
ql_pnt = ((landmarks[36] + ml_pnt) // 2)
qr_pnt = ((landmarks[45] + mr_pnt) // 2)
bot_l = np.array((ql_pnt, landmarks[36], landmarks[37], landmarks[38], landmarks[39]))
bot_r = np.array((landmarks[42], landmarks[43], landmarks[44], landmarks[45], qr_pnt))
top_l = landmarks[17:22]
top_r = landmarks[22:27]
landmarks[17:22] = (top_l + ((top_l - bot_l) // 2))
landmarks[22:27] = (top_r + ((top_r - bot_r) // 2))
r_jaw = (landmarks[0:9], landmarks[17:18])
l_jaw = (landmarks[8:17], landmarks[26:27])
r_cheek = (landmarks[17:20], landmarks[8:9])
l_cheek = (landmarks[24:27], landmarks[8:9])
nose_ridge = (landmarks[19:25], landmarks[8:9])
r_eye = (landmarks[17:22], landmarks[27:28], landmarks[31:36], landmarks[8:9])
l_eye = (landmarks[22:27], landmarks[27:28], landmarks[31:36], landmarks[8:9])
nose = (landmarks[27:31], landmarks[31:36])
parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose]
for item in parts:
merged = np.concatenate(item)
cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0)
return mask
|
class facehull(Mask):
' Basic face hull mask '
def build_mask(self):
mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32)
hull = cv2.convexHull(np.array(self.landmarks).reshape(((- 1), 2)))
cv2.fillConvexPoly(mask, hull, 255.0, lineType=cv2.LINE_AA)
return mask
|
class random_components(Mask):
' Extended mask\n Based on components mask. Attempts to extend the eyebrow points up the forehead\n '
def build_mask(self):
mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32)
landmarks = self.landmarks.copy()
ml_pnt = ((landmarks[36] + landmarks[0]) // 2)
mr_pnt = ((landmarks[16] + landmarks[45]) // 2)
ql_pnt = ((landmarks[36] + ml_pnt) // 2)
qr_pnt = ((landmarks[45] + mr_pnt) // 2)
bot_l = np.array((ql_pnt, landmarks[36], landmarks[37], landmarks[38], landmarks[39]))
bot_r = np.array((landmarks[42], landmarks[43], landmarks[44], landmarks[45], qr_pnt))
top_l = landmarks[17:22]
top_r = landmarks[22:27]
landmarks[17:22] = (top_l + ((top_l - bot_l) // 2))
landmarks[22:27] = (top_r + ((top_r - bot_r) // 2))
r_jaw = (landmarks[0:9], landmarks[17:18])
l_jaw = (landmarks[8:17], landmarks[26:27])
r_cheek = (landmarks[17:20], landmarks[8:9])
l_cheek = (landmarks[24:27], landmarks[8:9])
nose_ridge = (landmarks[19:25], landmarks[8:9])
r_eye = (landmarks[17:22], landmarks[27:28], landmarks[31:36], landmarks[8:9])
l_eye = (landmarks[22:27], landmarks[27:28], landmarks[31:36], landmarks[8:9])
nose = (landmarks[27:31], landmarks[31:36])
parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose]
parts = random.sample(parts, random.randint(0, (len(parts) - 1)))
for item in parts:
merged = np.concatenate(item)
cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0)
return mask
|
def simple_transform():
t = Compose([Resize(256, 256)])
return t
|
def strong_aug_pixel(p=0.5):
print('[DATA]: strong aug pixel')
from albumentations import Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue, MultiplicativeNoise, IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast, IAAPiecewiseAffine, IAASharpen, IAAEmboss, Flip, OneOf, Compose, JpegCompression, CLAHE
return Compose([OneOf([MultiplicativeNoise(multiplier=[0.5, 1.5], per_channel=True), JpegCompression(quality_lower=39, quality_upper=80)], p=0.2), OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2), OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2), OneOf([CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3), HueSaturationValue(p=0.3)], p=p)
|
def pixel_aug(p=0.5):
print('[DATA]: pixel aug')
from albumentations import JpegCompression, Blur, Downscale, CLAHE, HueSaturationValue, RandomBrightnessContrast, IAAAdditiveGaussianNoise, GaussNoise, GaussianBlur, MedianBlur, MotionBlur, Compose, OneOf
from random import sample, randint, uniform
return Compose([OneOf([JpegCompression(quality_lower=20, quality_upper=99, p=1)], p=0.2), OneOf([IAAAdditiveGaussianNoise(loc=randint(1, 9), p=1), GaussNoise(mean=uniform(0, 10.0), p=1)], p=0.3), OneOf([GaussianBlur(blur_limit=15, p=1), MotionBlur(blur_limit=19, p=1), Downscale(scale_min=0.3, scale_max=0.99, p=1), Blur(blur_limit=15, p=1), MedianBlur(blur_limit=9, p=1)], p=0.4), OneOf([CLAHE(clip_limit=4.0, p=1), HueSaturationValue(p=1), RandomBrightnessContrast(p=1)], p=0.1)], p=p)
|
def spatial_aug(p=0.5):
print('[DATA] spatial aug')
from albumentations import GridDropout, RandomResizedCrop, Rotate, HorizontalFlip, Compose
aug = Compose([GridDropout(holes_number_x=3, holes_number_y=3, random_offset=True, p=0.5), RandomResizedCrop(256, 256, scale=(0.7, 1.0), p=1.0), HorizontalFlip(p=0.5), Rotate(limit=90, p=0.5)], p=p)
return aug
|
def pixel_aug_mild(p=0.5):
print('[DATA]: pixel aug mild')
from albumentations import JpegCompression, Blur, Downscale, CLAHE, HueSaturationValue, RandomBrightnessContrast, IAAAdditiveGaussianNoise, GaussNoise, GaussianBlur, MedianBlur, MotionBlur, Compose, OneOf
from random import sample, randint, uniform
return Compose([OneOf([JpegCompression(quality_lower=60, quality_upper=99, p=1)], p=0.2), OneOf([IAAAdditiveGaussianNoise(loc=randint(1, 5), p=1), GaussNoise(mean=uniform(0, 5.0), p=1)], p=0.3), OneOf([GaussianBlur(blur_limit=7, p=1), MotionBlur(blur_limit=9, p=1), Downscale(scale_min=0.6, scale_max=0.99, p=1), Blur(blur_limit=7, p=1), MedianBlur(blur_limit=3, p=1)], p=0.4), OneOf([CLAHE(clip_limit=2.0, p=1), HueSaturationValue(p=1), RandomBrightnessContrast(p=1)], p=0.1)], p=p)
|
class Augmentator():
def __init__(self, augment_fn=''):
if (augment_fn == 'pixel_aug'):
self.augment_fn = pixel_aug()
elif (augment_fn == 'simple'):
self.augment_fn = simple_transform()
elif (augment_fn == 'pixel_mild'):
self.augment_fn = pixel_aug_mild()
elif (augment_fn == 'spatial'):
self.augment_fn = spatial_aug()
else:
raise NotImplementedError(augment_fn)
def __call__(self, img, mask=None):
if (mask is None):
return self.augment_fn(image=img)['image']
else:
augmented = self.augment_fn(image=img, mask=mask)
return (augmented['image'], augmented['mask'])
|
def data_transform(size=256, normalize=True):
if normalize:
t = Compose([Resize(size, size), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ToTensor()])
else:
t = Compose([Resize(size, size), ToTensor()])
return t
|
def color_transfer(source, target, clip=True, preserve_paper=True, mask=None):
'\n\tTransfers the color distribution from the source to the target\n\timage using the mean and standard deviations of the L*a*b*\n\tcolor space.\n\tThis implementation is (loosely) based on to the "Color Transfer\n\tbetween Images" paper by Reinhard et al., 2001.\n\tParameters:\n\t-------\n\tsource: NumPy array\n\t\tOpenCV image in BGR color space (the source image)\n\ttarget: NumPy array\n\t\tOpenCV image in BGR color space (the target image)\n\tclip: Should components of L*a*b* image be scaled by np.clip before \n\t\tconverting back to BGR color space?\n\t\tIf False then components will be min-max scaled appropriately.\n\t\tClipping will keep target image brightness truer to the input.\n\t\tScaling will adjust image brightness to avoid washed out portions\n\t\tin the resulting color transfer that can be caused by clipping.\n\tpreserve_paper: Should color transfer strictly follow methodology\n\t\tlayed out in original paper? The method does not always produce\n\t\taesthetically pleasing results.\n\t\tIf False then L*a*b* components will scaled using the reciprocal of\n\t\tthe scaling factor proposed in the paper. This method seems to produce\n\t\tmore consistently aesthetically pleasing results \n\tReturns:\n\t-------\n\ttransfer: NumPy array\n\t\tOpenCV image (w, h, 3) NumPy array (uint8)\n\t'
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype('float32')
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype('float32')
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source, mask)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target, mask)
(l, a, b) = cv2.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
if preserve_paper:
l = ((lStdTar / lStdSrc) * l)
a = ((aStdTar / aStdSrc) * a)
b = ((bStdTar / bStdSrc) * b)
else:
l = ((lStdSrc / lStdTar) * l)
a = ((aStdSrc / aStdTar) * a)
b = ((bStdSrc / bStdTar) * b)
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
l = _scale_array(l, clip=clip)
a = _scale_array(a, clip=clip)
b = _scale_array(b, clip=clip)
transfer = cv2.merge([l, a, b])
transfer = cv2.cvtColor(transfer.astype('uint8'), cv2.COLOR_LAB2BGR)
return transfer
|
def image_stats(image, mask=None):
'\n\tParameters:\n\t-------\n\timage: NumPy array\n\t\tOpenCV image in L*a*b* color space\n\tReturns:\n\t-------\n\tTuple of mean and standard deviations for the L*, a*, and b*\n\tchannels, respectively\n\t'
(l, a, b) = cv2.split(image)
if (mask is not None):
(l, a, b) = (l.reshape((- 1)), a.reshape((- 1)), b.reshape((- 1)))
mask = mask.reshape((- 1))
(l, a, b) = (l[mask], a[mask], b[mask])
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
return (lMean, lStd, aMean, aStd, bMean, bStd)
|
def _min_max_scale(arr, new_range=(0, 255)):
'\n\tPerform min-max scaling to a NumPy array\n\tParameters:\n\t-------\n\tarr: NumPy array to be scaled to [new_min, new_max] range\n\tnew_range: tuple of form (min, max) specifying range of\n\t\ttransformed array\n\tReturns:\n\t-------\n\tNumPy array that has been scaled to be in\n\t[new_range[0], new_range[1]] range\n\t'
mn = arr.min()
mx = arr.max()
if ((mn < new_range[0]) or (mx > new_range[1])):
scaled = ((((new_range[1] - new_range[0]) * (arr - mn)) / (mx - mn)) + new_range[0])
else:
scaled = arr
return scaled
|
def _scale_array(arr, clip=True):
'\n\tTrim NumPy array values to be in [0, 255] range with option of\n\tclipping or scaling.\n\tParameters:\n\t-------\n\tarr: array to be trimmed to [0, 255] range\n\tclip: should array be scaled by np.clip? if False then input\n\t\tarray will be min-max scaled to range\n\t\t[max([arr.min(), 0]), min([arr.max(), 255])]\n\tReturns:\n\t-------\n\tNumPy array that has been scaled to be in [0, 255] range\n\t'
if clip:
scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))
scaled = _min_max_scale(arr, new_range=scale_range)
return scaled
|
def colorTransfer(src, dst, mask):
transferredDst = np.copy(dst)
maskIndices = np.where((mask != 0))
maskedSrc = src[(maskIndices[0], maskIndices[1])].astype(np.int32)
maskedDst = dst[(maskIndices[0], maskIndices[1])].astype(np.int32)
meanSrc = np.mean(maskedSrc, axis=0)
meanDst = np.mean(maskedDst, axis=0)
maskedDst = (maskedDst - meanDst)
maskedDst = (maskedDst + meanSrc)
maskedDst = np.clip(maskedDst, 0, 255)
transferredDst[(maskIndices[0], maskIndices[1])] = maskedDst
return transferredDst
|
def color_transfer(source, target, clip=None, preserve_paper=None, mask=None):
return colorTransfer(src=source, dst=target, mask=mask)
|
def mkdir_p(path):
try:
os.makedirs(os.path.abspath(path))
except OSError as exc:
if ((exc.errno == errno.EEXIST) and os.path.isdir(path)):
pass
else:
raise
|
def files(path, exts=None, r=False):
if os.path.isfile(path):
if ((exts is None) or ((exts is not None) and (splitext(path)[(- 1)] in exts))):
(yield path)
elif os.path.isdir(path):
for (p, _, fs) in os.walk(path):
for f in sorted(fs):
if (exts is not None):
if (splitext(f)[1] in exts):
(yield join(p, f))
else:
(yield join(p, f))
if (not r):
break
|
def rect_to_bb(rect):
x = rect.left()
y = rect.top()
w = (rect.right() - x)
h = (rect.bottom() - y)
return (x, y, w, h)
|
def shape_to_np(shape, dtype='int'):
if isinstance(shape, np.ndarray):
return shape.astype(dtype)
coords = np.zeros((68, 2), dtype=dtype)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
|
def shape_to_np(shape, dtype='int'):
coords = np.zeros((68, 2), dtype=dtype)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
|
def rot90(v):
return np.array([(- v[1]), v[0]])
|
def find_face_cvhull(im):
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
rects = detector(gray, 1)
if (not rects):
return None
shape = predictor(gray, rects[0])
shape = shape_to_np(shape)
hull = cv2.convexHull(shape)
return hull
|
def find_face_landmark(im):
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
rects = detector(gray, 1)
if (not rects):
return None
shape = predictor(gray, rects[0])
shape = shape_to_np(shape)
return shape
|
class Masks4D(object):
def __call__(self, masks):
first_w = True
first_h = True
first_c = True
for (k, mask) in enumerate(masks):
(h, w) = mask.shape
real_mask = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(mask, 0), 0), 0)
for (i, mask_h) in enumerate(mask):
for (j, mask_w) in enumerate(mask_h):
curr_mask = (1 - torch.abs((mask_w - real_mask)))
if first_w:
total_mask_w = real_mask
first_w = False
else:
total_mask_w = torch.cat((total_mask_w, curr_mask), dim=2)
if first_h:
total_mask_h = total_mask_w
first_h = False
else:
total_mask_h = torch.cat((total_mask_h, total_mask_w), dim=1)
first_w = True
if first_c:
total_mask_c = total_mask_h
first_c = False
else:
total_mask_c = torch.cat((total_mask_c, total_mask_h), dim=0)
first_h = True
return total_mask_c
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', help='path to pretrained model')
parser.add_argument('--pretrained', help='downloads pretrained model [celebahq]')
parser.add_argument('--output_path', required=True, help='path to save generated samples')
parser.add_argument('--num_samples', type=int, default=100, help='number of samples')
parser.add_argument('--seed', type=int, default=0, help='random seed for sampling')
parser.add_argument('--batch_size', type=int, default=64, help='batch size for generating samples')
parser.add_argument('--gpu', default='', type=str, help='GPUs to use (leave blank for CPU only)')
parser.add_argument('--manipulate', action='store_true', help='add random manipulations to face')
parser.add_argument('--format', default='jpg', type=str, help='file format to save generated images')
parser.add_argument('--resize', type=int, help='resizes images to this size before saving')
opt = parser.parse_args()
print(opt)
return opt
|
def sample(opt):
tf.InteractiveSession()
assert (opt.model_path or opt.pretrained), 'specify weights path or pretrained model'
if opt.model_path:
raise NotImplementedError
elif opt.pretrained:
assert (opt.pretrained == 'celebahq')
sys.path.append('resources/glow/demo')
import model
eps_std = 0.7
eps_size = model.eps_size
rng = np.random.RandomState(opt.seed)
attr = np.random.RandomState((opt.seed + 1))
tags = []
amts = []
for batch_start in tqdm(range(0, opt.num_samples, opt.batch_size)):
bs = (min(opt.num_samples, (batch_start + opt.batch_size)) - batch_start)
feps = rng.normal(scale=eps_std, size=[bs, eps_size])
if opt.manipulate:
tag = attr.randint(len(model._TAGS), size=bs)
amt = attr.uniform((- 1), 1, size=(bs, 1))
dzs = model.z_manipulate[tag]
feps = (feps + (amt * dzs))
tags.append(tag)
amts.append(amt)
images = model.decode(feps)
for idx in range(images.shape[0]):
filename = os.path.join(opt.output_path, ('seed%03d_sample%06d.%s' % (opt.seed, (batch_start + idx), opt.format)))
im = PIL.Image.fromarray(images[idx], 'RGB')
if opt.resize:
im = im.resize((opt.resize, opt.resize), PIL.Image.LANCZOS)
im.save(filename)
if opt.manipulate:
outfile = os.path.join(opt.output_path, 'manipulations.npz')
np.savez(outfile, tags=np.concatenate(tags), amts=np.concatenate(amts))
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', required=True, help='path to pretrained model')
parser.add_argument('--output_path', required=True, help='path to save generated samples')
parser.add_argument('--num_samples', type=int, default=100, help='number of samples')
parser.add_argument('--seed', type=int, default=0, help='random seed for sampling')
parser.add_argument('--batch_size', type=int, default=64, help='batch size for generating samples')
parser.add_argument('--gpu', default='', type=str, help='GPUs to use (leave blank for CPU only)')
parser.add_argument('--format', default='jpg', type=str, help='file format to save generated images')
parser.add_argument('--resize', type=int, help='resizes images to this size before saving')
parser.add_argument('--quality', type=int, help='compression quality')
opt = parser.parse_args()
print(opt)
return opt
|
def sample(opt):
tf.InteractiveSession()
with open(opt.model_path, 'rb') as file:
(G, D, Gs) = pickle.load(file)
rng = np.random.RandomState(opt.seed)
for batch_start in tqdm(range(0, opt.num_samples, opt.batch_size)):
bs = (min(opt.num_samples, (batch_start + opt.batch_size)) - batch_start)
latents = rng.randn(bs, *Gs.input_shapes[0][1:])
labels = np.zeros(([latents.shape[0]] + Gs.input_shapes[1][1:]))
images = Gs.run(latents, labels)
images = np.clip(np.rint((((images + 1.0) / 2.0) * 255.0)), 0.0, 255.0).astype(np.uint8)
images = images.transpose(0, 2, 3, 1)
for idx in range(images.shape[0]):
filename = os.path.join(opt.output_path, ('seed%03d_sample%06d.%s' % (opt.seed, (batch_start + idx), opt.format)))
im = PIL.Image.fromarray(images[idx], 'RGB')
if opt.resize:
im = im.resize((opt.resize, opt.resize), PIL.Image.LANCZOS)
if opt.quality:
aug = A.augmentations.transforms.JpegCompression(p=1)
(w, h) = im.size
im_np = np.asarray(im.resize((1024, 1024), PIL.Image.LANCZOS))
im = PIL.Image.fromarray(aug.apply(im_np, quality=opt.quality))
im = im.resize((w, h), PIL.Image.LANCZOS)
im.save(filename)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', help='path to pretrained model')
parser.add_argument('--pretrained', help='downloads pretrained model [ffhq, celebahq]')
parser.add_argument('--output_path', required=True, help='path to save generated samples')
parser.add_argument('--num_samples', type=int, default=100, help='number of samples')
parser.add_argument('--seed', type=int, default=0, help='random seed for sampling')
parser.add_argument('--batch_size', type=int, default=64, help='batch size for generating samples')
parser.add_argument('--gpu', default='', type=str, help='GPUs to use (leave blank for CPU only)')
parser.add_argument('--format', default='jpg', type=str, help='file format to save generated images')
parser.add_argument('--resize', type=int, help='resizes images to this size before saving')
opt = parser.parse_args()
print(opt)
return opt
|
def sample(opt):
tf.InteractiveSession()
assert (opt.model_path or opt.pretrained), 'specify weights path or pretrained model'
if opt.model_path:
with open(opt.model_path, 'rb') as file:
(G, D, Gs) = pickle.load(file)
elif opt.pretrained:
urls = dict(ffhq='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', celebahq='https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf')
url = urls[opt.pretrained]
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
(_G, _D, Gs) = pickle.load(f)
rng = np.random.RandomState(opt.seed)
for batch_start in tqdm(range(0, opt.num_samples, opt.batch_size)):
bs = (min(opt.num_samples, (batch_start + opt.batch_size)) - batch_start)
latents = rng.randn(bs, *Gs.input_shapes[0][1:])
labels = np.zeros(([latents.shape[0]] + Gs.input_shapes[1][1:]))
images = Gs.run(latents, labels)
images = np.clip(np.rint((((images + 1.0) / 2.0) * 255.0)), 0.0, 255.0).astype(np.uint8)
images = images.transpose(0, 2, 3, 1)
for idx in range(images.shape[0]):
filename = os.path.join(opt.output_path, ('seed%03d_sample%06d.%s' % (opt.seed, (batch_start + idx), opt.format)))
im = PIL.Image.fromarray(images[idx], 'RGB')
if opt.resize:
im = im.resize((opt.resize, opt.resize), PIL.Image.LANCZOS)
im.save(filename)
|
def get_transform(opt, for_val=False):
transform_list = []
if for_val:
transform_list.append(transforms.Resize(opt.loadSize, interpolation=PIL.Image.LANCZOS))
transform_list.append(transforms.CenterCrop(opt.loadSize))
transform_list.append(transforms.ToTensor())
else:
transform_list.append(transforms.Resize(opt.loadSize, interpolation=PIL.Image.LANCZOS))
transform_list.append(transforms.CenterCrop(opt.fineSize))
transform_list.append(AllAugmentations())
transform_list.append(transforms.ToTensor())
transform_list.append(transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)))
if (not for_val):
transform_list.append(transforms.RandomErasing())
transform = transforms.Compose(transform_list)
print(transform)
logging.info(transform)
return transform
|
def get_mask_transform(opt, for_val=False):
transform_list = []
transform_list.append(transforms.ToTensor())
transform = transforms.Compose(transform_list)
return transform
|
class AllAugmentations(object):
def __init__(self):
import albumentations
self.transform = albumentations.Compose([albumentations.Blur(blur_limit=3), albumentations.JpegCompression(quality_lower=30, quality_upper=100, p=0.5), albumentations.RandomBrightnessContrast(), albumentations.augmentations.transforms.ColorJitter()])
def __call__(self, image):
image_np = np.array(image)
augmented = self.transform(image=image_np)
image_pil = PIL.Image.fromarray(augmented['image'])
return image_pil
|
class JPEGCompression(object):
def __init__(self, level):
import albumentations as A
self.level = level
self.transform = A.augmentations.transforms.JpegCompression(p=1)
def __call__(self, image):
image_np = np.array(image)
image_out = self.transform.apply(image_np, quality=self.level)
image_pil = PIL.Image.fromarray(image_out)
return image_pil
|
class Blur(object):
def __init__(self, level):
import albumentations as A
self.level = level
self.transform = A.Blur(blur_limit=(self.level, self.level), always_apply=True)
def __call__(self, image):
image_np = np.array(image)
augmented = self.transform(image=image_np)
image_pil = PIL.Image.fromarray(augmented['image'])
return image_pil
|
class Gamma(object):
def __init__(self, level):
import albumentations as A
self.level = level
self.transform = A.augmentations.transforms.RandomGamma(p=1)
def __call__(self, image):
image_np = np.array(image)
image_out = self.transform.apply(image_np, gamma=(self.level / 100))
image_pil = PIL.Image.fromarray(image_out)
return image_pil
|
class UnpairedMaskDataset(data.Dataset):
'A dataset class for loading images within a single folder\n '
def __init__(self, opt, im_path, label, is_val=False):
'Initialize this dataset class.\n\n Parameters:\n opt -- experiment options\n im_path -- path to folder of images\n is_val -- is this training or validation? used to determine\n transform\n '
super().__init__()
self.dir = im_path
self.paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
self.label = label
self.size = len(self.paths)
assert (self.size > 0)
self.transform = transforms.get_transform(opt, for_val=is_val)
self.mask_transform = transforms.get_mask_transform(opt, for_val=is_val)
self.opt = opt
def __getitem__(self, index):
'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n '
path = self.paths[index]
img = Image.open(path).convert('RGB')
img = self.transform(img)
(C, H, W) = np.array(img).shape
real_mask = torch.ones([H, W])
img_mask = Image.fromarray(np.uint8((real_mask * 255)), 'L')
img_mask = self.mask_transform(img_mask)
return {'img': img, 'path': path, 'mask': img_mask}
def __len__(self):
return self.size
|
class Struct():
def __init__(self, **entries):
self.__dict__.update(entries)
|
def find_model_using_name(model_name):
model_filename = (('models.' + model_name) + '_model')
modellib = importlib.import_module(model_filename)
model = None
target_model_name = (model_name.replace('_', '') + 'model')
for (name, cls) in modellib.__dict__.items():
if ((name.lower() == target_model_name.lower()) and issubclass(cls, BaseModel)):
model = cls
if (model is None):
print(('In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.' % (model_filename, target_model_name)))
exit(0)
return model
|
def get_option_setter(model_name):
model_class = find_model_using_name(model_name)
return model_class.modify_commandline_options
|
def create_model(opt, **kwargs):
model = find_model_using_name(opt.model)
instance = model(opt, **kwargs)
print(('model [%s] was created' % instance.name()))
return instance
|
class BaseModel():
@staticmethod
def modify_commandline_options(parser):
networks.modify_commandline_options(parser)
return parser
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = (torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu'))
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
self.optimizers = {}
def name(self):
return 'BaseModel'
def set_input(self, input, mode='TRAIN'):
self.input = input
def forward(self):
pass
def setup(self, opt, parser=None):
current_ep = 0
(best_val_metric, best_val_ep) = (0, 0)
self.print_networks()
if self.isTrain:
self.schedulers = {k: netutils.get_scheduler(optim, opt) for (k, optim) in self.optimizers.items()}
if ((not self.isTrain) or opt.load_model):
(current_ep, best_val_metric, best_val_ep) = self.load_networks(opt.which_epoch)
if (opt.which_epoch not in ['latest', 'bestval']):
current_ep += 1
return (current_ep, best_val_metric, best_val_ep)
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net_' + name))
net.eval()
def train(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net_' + name))
net.train()
def test(self, compute_losses=False):
with torch.no_grad():
self.forward()
if compute_losses:
self.compute_losses_D()
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self):
pass
def update_learning_rate(self, metric=None):
for (k, scheduler) in self.schedulers.items():
if (metric is not None):
assert (self.opt.lr_policy in ['plateau', 'constant'])
scheduler.step(metric)
else:
scheduler.step()
for (k, optim) in self.optimizers.items():
logging.info(('learning rate net_%s = %0.7f' % (k, optim.param_groups[0]['lr'])))
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
assert isinstance(name, str)
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
assert isinstance(name, str)
errors_ret[name] = float(getattr(self, name))
return errors_ret
def save_networks(self, save_name, current_ep, best_val_metric, best_val_ep):
for name in self.model_names:
assert isinstance(name, str)
save_filename = ('%s_net_%s.pth' % (save_name, name))
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, ('net_' + name))
if isinstance(net, torch.nn.DataParallel):
sd = net.module.state_dict()
else:
sd = net.state_dict()
optim = self.optimizers[name].state_dict()
sched = self.schedulers[name].state_dict()
checkpoint = dict(state_dict=sd, optimizer=optim, scheduler=sched, epoch=current_ep, best_val_metric=best_val_metric, best_val_ep=best_val_ep)
torch.save(checkpoint, save_path)
def load_networks(self, save_name):
for name in self.model_names:
assert isinstance(name, str)
load_filename = ('%s_net_%s.pth' % (save_name, name))
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, ('net_' + name))
if isinstance(net, torch.nn.DataParallel):
net = net.module
print(('loading the model from %s' % load_path))
checkpoint = torch.load(load_path, map_location=str(self.device))
state_dict = checkpoint['state_dict']
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
net.load_state_dict(state_dict)
if self.isTrain:
print(('restoring optimizer and scheduler for %s' % name))
self.optimizers[name].load_state_dict(checkpoint['optimizer'])
self.schedulers[name].load_state_dict(checkpoint['scheduler'])
current_ep = checkpoint['epoch']
best_val_metric = checkpoint['best_val_metric']
best_val_ep = checkpoint['best_val_ep']
return (current_ep, best_val_metric, best_val_ep)
def print_networks(self):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net_' + name))
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print(('[Network %s] Total number of parameters : %.3f M' % (name, (num_params / 1000000.0))))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
if (not isinstance(nets, list)):
nets = [nets]
for net in nets:
if (net is not None):
for param in net.parameters():
param.requires_grad = requires_grad
|
def compute_mhsa(q, k, v, scale_factor=1, mask=None):
scaled_dot_prod = (torch.einsum('... i d , ... j d -> ... i j', q, k) * scale_factor)
if (mask is not None):
assert (mask.shape == scaled_dot_prod.shape[2:])
scaled_dot_prod = scaled_dot_prod.masked_fill(mask, (- np.inf))
attention = torch.softmax(scaled_dot_prod, dim=(- 1))
return torch.einsum('... i j , ... j d -> ... i d', attention, v)
|
class MultiHeadSelfAttention(nn.Module):
def __init__(self, dim, heads=8, dim_head=None):
"\n Implementation of multi-head attention layer of the original transformer model.\n einsum and einops.rearrange is used whenever possible\n Args:\n dim: token's dimension, i.e. word embedding vector size\n heads: the number of distinct representations to learn\n dim_head: the dim of the head. In general dim_head<dim.\n However, it may not necessary be (dim/heads)\n "
super().__init__()
self.dim_head = (int((dim / heads)) if (dim_head is None) else dim_head)
_dim = (self.dim_head * heads)
self.heads = heads
self.to_qvk = nn.Linear(dim, (_dim * 3), bias=False)
self.W_0 = nn.Linear(_dim, dim, bias=False)
self.scale_factor = (self.dim_head ** (- 0.5))
def forward(self, x, mask=None):
assert (x.dim() == 3)
qkv = self.to_qvk(x)
(q, k, v) = tuple(rearrange(qkv, 'b t (d k h ) -> k b h t d ', k=3, h=self.heads))
out = compute_mhsa(q, k, v, mask=mask, scale_factor=self.scale_factor)
out = rearrange(out, 'b h t d -> b t (h d)')
return self.W_0(out)
|
class NLBlockND(nn.Module):
def __init__(self, in_channels=256):
"Implementation of Non-Local Block with 4 different pairwise functions but doesn't include subsampling trick\n args:\n in_channels: original channel size (1024 in the paper)\n inter_channels: channel size inside the block if not specifed reduced to half (512 in the paper)\n mode: supports Gaussian, Embedded Gaussian, Dot Product, and Concatenation\n dimension: can be 1 (temporal), 2 (spatial), 3 (spatiotemporal)\n bn_layer: whether to add batch norm\n "
super(NLBlockND, self).__init__()
self.in_channels = in_channels
self.sig = nn.Sigmoid()
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1)
def forward(self, x, return_nl_map=False):
'\n args\n x: (N, C, T, H, W) for dimension=3; (N, C, H, W) for dimension 2; (N, C, T) for dimension 1\n '
batch_size = x.size(0)
theta_x = self.theta(x).view(batch_size, self.in_channels, (- 1))
phi_x = self.phi(x).view(batch_size, self.in_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
f = torch.matmul(theta_x, phi_x)
f_div_C = (f / math.sqrt(self.in_channels))
y = f_div_C.permute(0, 2, 1).contiguous()
sig_y = self.sig(y)
final_y = sig_y.view(batch_size, *x.size()[2:], *x.size()[2:])
if return_nl_map:
return (final_y, sig_y)
else:
return final_y
|
def make_patch_resnet(depth, layername, num_classes=2, extra_output=None):
def change_out(layers):
(ind, layer) = [(i, l) for (i, (n, l)) in enumerate(layers) if (n == layername)][0]
if layername.startswith('layer'):
bn = list(layer.modules())[((- 1) if (depth < 50) else (- 2))]
assert isinstance(bn, nn.BatchNorm2d)
num_ch = bn.num_features
else:
num_ch = 64
layers[(ind + 1):] = [('convout', nn.Conv2d(num_ch, num_classes, kernel_size=1))]
return layers
if (extra_output == None):
model = CustomResNet(depth, modify_sequence=change_out)
else:
print(extra_output)
model = CustomResNet(depth, modify_sequence=change_out, extra_output=extra_output)
return model
|
def make_patch_xceptionnet(layername, num_classes=2, extra_output=None):
def change_out(layers):
(ind, layer) = [(i, l) for (i, (n, l)) in enumerate(layers) if (n == layername)][0]
if layername.startswith('block'):
module_list = list(layer.modules())
bn = module_list[(- 1)]
if (not isinstance(bn, nn.BatchNorm2d)):
bn = module_list[(- 2)]
assert isinstance(bn, nn.BatchNorm2d)
num_ch = bn.num_features
elif layername.startswith('relu'):
bn = layers[(ind - 1)][1]
assert isinstance(bn, nn.BatchNorm2d)
num_ch = bn.num_features
else:
raise NotImplementedError
layers[(ind + 1):] = [('convout', nn.Conv2d(num_ch, num_classes, kernel_size=1))]
return layers
if (extra_output == None):
model = CustomXceptionNet(modify_sequence=change_out)
else:
model = CustomXceptionNet(extra_output=extra_output, modify_sequence=change_out)
return model
|
def make_pcl(backbone='xception', layername='block3', input_size=128):
if (backbone == 'xception'):
channels = [128, 256, 728, 728, 728, 728, 728, 728, 728, 728, 728, 1024]
(b1, b2, b3, b12) = (int((input_size / 4)), int((input_size / 8)), int((input_size / 16)), int((input_size / 32)))
out_ch = [b1, b2, b3, b3, b3, b3, b3, b3, b3, b3, b3, b12]
layer = int(layername[5])
channel = out_ch[(layer - 1)]
elif (backbone[:6] == 'resnet'):
layer = int(layername[5:])
assert (layer >= 2)
channels = [0, 64, 128, 256, 512]
(b1, b2, b3, b4) = (int((input_size / 2)), int((input_size / 4)), int((input_size / 8)), int((input_size / 16)))
out_ch = [b1, b2, b3, b4]
channel = out_ch[(layer - 1)]
from . import PCL
model = PCL.NLBlockND(in_channels=channels[(layer - 1)])
return (model, channel)
|
def make_xceptionnet_long():
from . import xception
def change_out(layers):
channels = [3, 32, 64, 128, 256, 728, 728, 728, 728, 728, 728, 728, 728, 728, 1024, 1536, 2048]
(ind, layer) = [(i, l) for (i, (n, l)) in enumerate(layers) if (n == 'block2')][0]
new_layers = [('pblock3', xception.PixelBlock(channels[4], channels[5], 2, 1, start_with_relu=True, grow_first=True)), ('pblock4', xception.PixelBlock(channels[5], channels[6], 3, 1, start_with_relu=True, grow_first=True))]
num_ch = channels[9]
new_layers.append(('convout', nn.Conv2d(num_ch, 2, kernel_size=1)))
layers[(ind + 1):] = new_layers
return layers
model = CustomXceptionNet(modify_sequence=change_out)
return model
|
class CustomResNet(nn.Module):
"\n Customizable ResNet, compatible with pytorch's resnet, but:\n * The top-level sequence of modules can be modified to add\n or remove or alter layers.\n * Extra outputs can be produced, to allow backprop and access\n to internal features.\n * Pooling is replaced by resizable GlobalAveragePooling so that\n any size can be input (e.g., any multiple of 32 pixels).\n * halfsize=True halves striding on the first pooling to\n set the default size to 112x112 instead of 224x224.\n "
def __init__(self, size=None, block=None, layers=None, num_classes=1000, extra_output=None, modify_sequence=None, halfsize=False):
standard_sizes = {18: (resnet.BasicBlock, [2, 2, 2, 2]), 34: (resnet.BasicBlock, [3, 4, 6, 3]), 50: (resnet.Bottleneck, [3, 4, 6, 3]), 101: (resnet.Bottleneck, [3, 4, 23, 3]), 152: (resnet.Bottleneck, [3, 8, 36, 3])}
assert ((size in standard_sizes) == (block is None) == (layers is None))
if (size in standard_sizes):
(block, layers) = standard_sizes[size]
if (modify_sequence is None):
def modify_sequence(x):
return x
self.inplanes = 64
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.dilation = 1
self.groups = 1
self.base_width = 64
sequence = modify_sequence([('conv1', nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', norm_layer(64)), ('relu', nn.ReLU(inplace=True)), ('maxpool', nn.MaxPool2d(3, stride=(1 if halfsize else 2), padding=1)), ('layer1', self._make_layer(block, 64, layers[0])), ('layer2', self._make_layer(block, 128, layers[1], stride=2)), ('layer3', self._make_layer(block, 256, layers[2], stride=2)), ('layer4', self._make_layer(block, 512, layers[3], stride=2)), ('avgpool', GlobalAveragePool2d()), ('fc', nn.Linear((512 * block.expansion), num_classes))])
super(CustomResNet, self).__init__()
for (name, layer) in sequence:
setattr(self, name, layer)
self.extra_output = extra_output
def _make_layer(self, block, channels, depth, stride=1):
return resnet.ResNet._make_layer(self, block, channels, depth, stride)
def forward(self, x):
extra = []
for (name, module) in self._modules.items():
x = module(x)
if (self.extra_output and (name in self.extra_output)):
extra.append(x)
if self.extra_output:
return ((x,) + tuple(extra))
return x
|
class CustomXceptionNet(nn.Module):
'\n Customizable Xceptionnet, compatible with https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py\n but:\n * The top-level sequence of modules can be modified to add\n or remove or alter layers.\n * Extra outputs can be produced, to allow backprop and access\n to internal features.\n * halfsize=True halves striding on the first convolution to\n allow 151x151 images to be processed rather than 299x299 only.\n '
def __init__(self, channels=None, num_classes=1000, extra_output=None, modify_sequence=None, halfsize=False):
from . import xception
if (channels is None):
channels = [3, 32, 64, 128, 256, 728, 728, 728, 728, 728, 728, 728, 728, 728, 1024, 1536, 2048]
assert (len(channels) == 17)
if (modify_sequence is None):
def modify_sequence(x):
return x
sequence = modify_sequence([('conv1', nn.Conv2d(channels[0], channels[1], kernel_size=3, stride=(1 if halfsize else 2), padding=0, bias=False)), ('bn1', nn.BatchNorm2d(channels[1])), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(channels[1], channels[2], 3, bias=False)), ('bn2', nn.BatchNorm2d(channels[2])), ('relu2', nn.ReLU(inplace=True)), ('block1', xception.Block(channels[2], channels[3], 2, 2, start_with_relu=False, grow_first=True)), ('block2', xception.Block(channels[3], channels[4], 2, 2, start_with_relu=True, grow_first=True)), ('block3', xception.Block(channels[4], channels[5], 2, 2, start_with_relu=True, grow_first=True)), ('block4', xception.Block(channels[5], channels[6], 3, 1, start_with_relu=True, grow_first=True)), ('block5', xception.Block(channels[6], channels[7], 3, 1, start_with_relu=True, grow_first=True)), ('block6', xception.Block(channels[7], channels[8], 3, 1, start_with_relu=True, grow_first=True)), ('block7', xception.Block(channels[8], channels[9], 3, 1, start_with_relu=True, grow_first=True)), ('block8', xception.Block(channels[9], channels[10], 3, 1, start_with_relu=True, grow_first=True)), ('block9', xception.Block(channels[10], channels[11], 3, 1, start_with_relu=True, grow_first=True)), ('block10', xception.Block(channels[11], channels[12], 3, 1, start_with_relu=True, grow_first=True)), ('block11', xception.Block(channels[12], channels[13], 3, 1, start_with_relu=True, grow_first=True)), ('block12', xception.Block(channels[13], channels[14], 2, 2, start_with_relu=True, grow_first=False)), ('conv3', xception.SeparableConv2d(channels[14], channels[15], 3, 1, 1)), ('bn3', nn.BatchNorm2d(channels[15])), ('relu3', nn.ReLU(inplace=True)), ('conv4', xception.SeparableConv2d(channels[15], channels[16], 3, 1, 1)), ('bn4', nn.BatchNorm2d(channels[16])), ('relu4', nn.ReLU(inplace=True)), ('avgpool', GlobalAveragePool2d()), ('fc', nn.Linear(channels[16], num_classes))])
super(CustomXceptionNet, self).__init__()
for (name, layer) in sequence:
setattr(self, name, layer)
self.extra_output = extra_output
def forward(self, x):
extra = []
for (name, module) in self._modules.items():
x = module(x)
if (self.extra_output and (name in self.extra_output)):
extra.append(x)
if self.extra_output:
return ((x,) + tuple(extra))
return x
|
class Vectorize(nn.Module):
def __init__(self):
super(Vectorize, self).__init__()
def forward(self, x):
x = x.view(x.size(0), int(numpy.prod(x.size()[1:])))
return x
|
class GlobalAveragePool2d(nn.Module):
def __init__(self):
super(GlobalAveragePool2d, self).__init__()
def forward(self, x):
x = torch.mean(x.view(x.size(0), x.size(1), (- 1)), dim=2)
return x
|
def get_scheduler(optimizer, opt):
if (opt.lr_policy == 'plateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, threshold=0.0001, patience=opt.patience, eps=1e-06)
elif (opt.lr_policy == 'constant'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, threshold=0.0001, patience=1000, eps=opt.lr)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
|
def init_weights(net, init_type='xavier', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print(('initialize network with %s' % init_type))
net.apply(init_func)
|
def init_net(net, init_type='xavier', gpu_ids=[]):
if (len(gpu_ids) > 0):
assert torch.cuda.is_available()
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
if (init_type is None):
return net
init_weights(net, init_type)
return net
|
def modify_commandline_options(parser):
(opt, _) = parser.parse_known_args()
if ('xception' in opt.which_model_netD):
parser.set_defaults(loadSize=333, fineSize=299)
elif ('resnet' in opt.which_model_netD):
parser.set_defaults(loadSize=256, fineSize=224)
else:
raise NotImplementedError
|
def define_D(which_model_netD, init_type, gpu_ids=[]):
if ('resnet' in which_model_netD):
from torchvision.models import resnet
model = getattr(resnet, which_model_netD)
netD = model(pretrained=False, num_classes=2)
elif ('xception' in which_model_netD):
from . import xception
netD = xception.xception(num_classes=2)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
|
def define_patch_D(which_model_netD, init_type, gpu_ids=[]):
if which_model_netD.startswith('resnet'):
from . import customnet
splits = which_model_netD.split('_')
depth = int(splits[0][6:])
layer = splits[1]
if (len(splits) == 2):
netD = customnet.make_patch_resnet(depth, layer)
else:
extra_output = [i.replace('extra', 'layer') for i in splits[2:]]
netD = customnet.make_patch_resnet(depth, layer, extra_output=extra_output)
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
elif which_model_netD.startswith('widenet'):
splits = which_model_netD.split('_')
kernel_size = int(splits[1][2:])
dilation = int(splits[2][1:])
netD = WideNet(kernel_size, dilation)
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
elif which_model_netD.startswith('xception'):
from . import customnet
splits = which_model_netD.split('_')
layer = splits[1]
if (len(splits) == 2):
netD = customnet.make_patch_xceptionnet(layer)
else:
extra_output = [i.replace('extra', 'block') for i in splits[2:]]
netD = customnet.make_patch_xceptionnet(layername=layer, extra_output=extra_output)
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
elif which_model_netD.startswith('longxception'):
from . import customnet
netD = customnet.make_xceptionnet_long()
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
|
def define_PCL(which_model_netD, init_type, gpu_ids=[], input_size=128):
if which_model_netD.startswith('resnet'):
from . import customnet
backbone = which_model_netD.split('_')[0]
layer = which_model_netD.split('_')[1]
(netPCL, out_ch) = customnet.make_pcl(backbone=backbone, layername=layer, input_size=input_size)
return (netutils.init_net(netPCL, init_type, gpu_ids=gpu_ids), out_ch)
elif which_model_netD.startswith('widenet'):
splits = which_model_netD.split('_')
kernel_size = int(splits[1][2:])
dilation = int(splits[2][1:])
netD = WideNet(kernel_size, dilation)
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
elif which_model_netD.startswith('xception'):
from . import customnet
splits = which_model_netD.split('_')
backbone = splits[0]
layer = splits[1]
(netPCL, out_ch) = customnet.make_pcl(backbone=backbone, layername=layer, input_size=input_size)
return (netutils.init_net(netPCL, init_type, gpu_ids=gpu_ids), out_ch)
elif which_model_netD.startswith('longxception'):
from . import customnet
netD = customnet.make_xceptionnet_long()
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
|
class WideNet(nn.Module):
def __init__(self, kernel_size=7, dilation=1):
super().__init__()
sequence = [nn.Conv2d(3, 256, kernel_size=kernel_size, dilation=dilation, stride=2, padding=(kernel_size // 2), bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride=2, padding=1), nn.Conv2d(256, 256, kernel_size=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=1), nn.ReLU(inplace=True), nn.Conv2d(256, 2, kernel_size=1)]
self.model = nn.Sequential(*sequence)
def forward(self, x):
return self.model(x)
|
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
|
class PixelBlock(nn.Module):
def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True):
super(PixelBlock, self).__init__()
assert (strides == 1)
if ((out_filters != in_filters) or (strides != 1)):
self.skip = nn.Conv2d(in_filters, out_filters, 1, stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip = None
rep = []
filters = in_filters
if grow_first:
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters, out_filters, 1, stride=1, padding=0, bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range((reps - 1)):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(filters, filters, 1, stride=1, padding=0, bias=False))
rep.append(nn.BatchNorm2d(filters))
if (not grow_first):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters, out_filters, 1, stride=1, padding=0, bias=False))
rep.append(nn.BatchNorm2d(out_filters))
if (not start_with_relu):
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if (strides != 1):
pass
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
|
class Block(nn.Module):
def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True):
super(Block, self).__init__()
if ((out_filters != in_filters) or (strides != 1)):
self.skip = nn.Conv2d(in_filters, out_filters, 1, stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip = None
rep = []
filters = in_filters
if grow_first:
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range((reps - 1)):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(filters, filters, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(filters))
if (not grow_first):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(out_filters))
if (not start_with_relu):
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if (strides != 1):
rep.append(nn.MaxPool2d(3, strides, 1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
|
class Xception(nn.Module):
'\n Xception optimized for the ImageNet dataset, as specified in\n https://arxiv.org/pdf/1610.02357.pdf\n '
def __init__(self, num_classes=1000):
' Constructor\n Args:\n num_classes: number of classes\n '
super(Xception, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True)
self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True)
self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 1024, 2, 2, start_with_relu=True, grow_first=False)
self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(1536)
self.relu3 = nn.ReLU(inplace=True)
self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, num_classes)
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.bn4(x)
return x
def logits(self, features):
x = nn.ReLU(inplace=True)(features)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), (- 1))
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
|
def xception(num_classes=1000, pretrained='imagenet'):
model = Xception(num_classes=num_classes)
if pretrained:
settings = pretrained_settings['xception'][pretrained]
model = Xception(num_classes=num_classes)
pretrained_state = model_zoo.load_url(settings['url'])
model_state = model.state_dict()
pretrained_state = {k: v for (k, v) in pretrained_state.items() if ((k in model_state) and (v.size() == model_state[k].size()))}
print(list(pretrained_state.keys()))
model_state.update(pretrained_state)
model.load_state_dict(model_state)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
model.last_linear = model.fc
del model.fc
return model
|
class BaseOptions(options.Options):
def __init__(self, print_opt=True):
options.Options.__init__(self)
self.isTrain = False
self.print_opt = print_opt
parser = self.parser
parser.add_argument('--model', type=str, default='basic_discriminator', help='chooses which model to use')
parser.add_argument('--which_model_netD', type=str, default='resnet18', help='selects model to use for netD')
parser.add_argument('--fake_class_id', type=int, default=0, help='class id of fake ims')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_model', action='store_true', help='load the latest model')
parser.add_argument('--seed', type=int, default=0, help='torch.manual_seed value')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--lbda', type=int, default=10, help='lambda value for Patch-consistency learning')
parser.add_argument('--loadSize', type=int, default=256, help='scale images to this size')
parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
parser.add_argument('--nThreads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size')
parser.add_argument('--real_im_path', type=str, help='path to real images')
parser.add_argument('--fake_im_path', type=str, help='path to fake images')
parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples to use in dataset')
parser.add_argument('--name', type=str, default='', help='name of the experiment. it decides where to store samples and models')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{which_model_netG}_size{loadSize}')
parser.add_argument('--prefix', default='', type=str, help='customized prefix: opt.name = prefix + opt.name: e.g., {model}_{which_model_netG}_size{loadSize}')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
def parse(self):
opt = options.Options.parse(self, print_opt=False)
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
self.parser = model_option_setter(self.parser)
opt = options.Options.parse(self, print_opt=False)
opt.isTrain = self.isTrain
if (opt.name == ''):
opt.name = '{model}_{which_model_netD}_size{fineSize}'.format(**vars(opt))
else:
opt.name = opt.name.format(**vars(opt))
if opt.suffix:
suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '')
opt.name = (opt.name + suffix)
opt.suffix = ''
if opt.prefix:
prefix = (opt.prefix.format(**vars(opt)) if (opt.prefix != '') else '')
prefix += '-'
opt.name = (prefix + opt.name)
opt.prefix = ''
if self.print_opt:
self.print_options(opt)
str_ids = opt.gpu_ids
if isinstance(opt.gpu_ids, str):
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if ((len(opt.gpu_ids) > 0) and torch.cuda.is_available()):
torch.cuda.set_device(opt.gpu_ids[0])
if ((not hasattr(opt, 'dataset_name')) or (opt.dataset_name != 'openmfc')):
if (not (opt.model == 'patch_inconsistency_discriminator')):
assert (opt.real_im_path and opt.fake_im_path)
return opt
|
class TestOptions(BaseOptions):
def __init__(self):
BaseOptions.__init__(self, print_opt=False)
parser = self.parser
parser.add_argument('--train_config', type=argparse.FileType(mode='r'), required=True, help='config file saved from model training')
parser.add_argument('--partition', type=str, default='val', help='val or test')
parser.add_argument('--dataset_name', type=str, required=True, help='name to describe test dataset when saving results, e.g. celebahq_pgan')
parser.add_argument('--force_redo', action='store_true', help='force recompute results')
parser.add_argument('--test_compression', type=int, help='jpeg compression level')
parser.add_argument('--test_gamma', type=int, help='gamma adjustment level')
parser.add_argument('--test_blur', type=int, help='blur level')
parser.add_argument('--test_flip', action='store_true', help='flip all test images')
parser.add_argument('--visualize', action='store_true', help='save visualizations when running test')
parser.add_argument('--average_mode', help='which kind of patch averaging to use for visualizations [vote, before_softmax, after_softmax]')
parser.add_argument('--topn', type=int, default=100, help='visualize top n')
def parse(self):
opt = super().parse()
train_conf = yaml.load(opt.train_config, Loader=yaml.FullLoader)
option_strings = {}
for action_group in self.parser._action_groups:
for action in action_group._group_actions:
for option in action.option_strings:
option_strings[option] = action.dest
specified_options = set([option_strings[x] for x in sys.argv if (x in option_strings)])
options_from_train = []
for (k, v) in train_conf.items():
if (k in ['real_im_path', 'fake_im_path', 'gpu_ids']):
continue
if (getattr(opt, k, None) is None):
continue
if (k not in specified_options):
setattr(opt, k, v)
options_from_train.append((k, v))
print('Using the following options from the train configuration file:')
print(options_from_train)
if opt.real_im_path:
assert (opt.partition in opt.real_im_path)
opt.real_im_path = opt.real_im_path.rstrip('/')
if opt.fake_im_path:
assert (opt.partition in opt.fake_im_path)
opt.fake_im_path = opt.fake_im_path.rstrip('/')
opt.load_model = True
opt.model_seed = 0
opt.isTrain = False
return opt
|
class TrainOptions(BaseOptions):
def __init__(self, print_opt=True):
BaseOptions.__init__(self, print_opt)
parser = self.parser
parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results visualization')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=100, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--beta1', type=float, default=0.9, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--lr_policy', default='constant', help='lr schedule [constant|plateau]')
parser.add_argument('--patience', type=int, default=10, help='will stop training if val metric does not improve for this many epochs')
parser.add_argument('--max_epochs', type=int, help='maximum epochs to train, if not specified, will stop based on patience, or whichever is sooner')
self.isTrain = True
|
def train(opt):
torch.manual_seed(opt.seed)
if (opt.model == 'patch_inconsistency_discriminator'):
WITH_MASK = True
else:
WITH_MASK = False
if (not WITH_MASK):
dset = PairedDataset(opt, os.path.join(opt.real_im_path, 'train'), os.path.join(opt.fake_im_path, 'train'), with_mask=WITH_MASK)
else:
dset = PairedDataset(opt, os.path.join(opt.real_im_path), os.path.join(opt.fake_im_path), with_mask=WITH_MASK)
dl = DataLoader(dset, batch_size=(opt.batch_size // 2), num_workers=opt.nThreads, pin_memory=False, shuffle=True)
assert (opt.fake_class_id in [0, 1])
fake_label = opt.fake_class_id
real_label = (1 - fake_label)
logging.info(('real label = %d' % real_label))
logging.info(('fake label = %d' % fake_label))
dataset_size = len(dset)
logging.info(('# total images = %d' % dataset_size))
logging.info(('# total batches = %d' % len(dl)))
model = create_model(opt)
(epoch, best_val_metric, best_val_ep) = model.setup(opt)
visualizer_losses = (model.loss_names + [(n + '_val') for n in model.loss_names])
visualizer = Visualizer(opt, visualizer_losses, model.visual_names)
total_batches = (epoch * len(dl))
t_data = 0
now = time.strftime('%c')
logging.info(('================ Training Loss (%s) ================\n' % now))
while True:
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for (i, ims) in enumerate(dl):
ims_real = ims['original'].to(opt.gpu_ids[0])
ims_fake = ims['manipulated'].to(opt.gpu_ids[0])
labels_real = (real_label * torch.ones(ims_real.shape[0], dtype=torch.long).to(opt.gpu_ids[0]))
labels_fake = (fake_label * torch.ones(ims_fake.shape[0], dtype=torch.long).to(opt.gpu_ids[0]))
if (not WITH_MASK):
inputs = dict(ims=torch.cat((ims_real, ims_fake), axis=0), labels=torch.cat((labels_real, labels_fake), axis=0))
else:
masks_real = ims['mask_original'].to(opt.gpu_ids[0])
masks_fake = ims['mask_manipulated'].to(opt.gpu_ids[0])
inputs = dict(ims=torch.cat((ims_real, ims_fake), axis=0), masks=torch.cat((masks_real, masks_fake), axis=0), labels=torch.cat((labels_real, labels_fake), axis=0))
batch_data = dict(inputs)
iter_start_time = time.time()
if ((total_batches % opt.print_freq) == 0):
t_data = (iter_start_time - iter_data_time)
total_batches += 1
epoch_iter += 1
model.reset()
model.set_input(batch_data)
model.optimize_parameters()
if ((epoch_iter % opt.print_freq) == 0):
losses = model.get_current_losses()
t = (time.time() - iter_start_time)
visualizer.print_current_losses(epoch, (float(epoch_iter) / len(dl)), total_batches, losses, t, t_data)
visualizer.plot_current_losses(total_batches, losses)
if ((epoch_iter % opt.save_latest_freq) == 0):
logging.info(('saving the latest model (epoch %d, total_batches %d)' % (epoch, total_batches)))
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.reset()
iter_data_time = time.time()
model.eval()
val_start_time = time.time()
val_losses = validate(model, opt)
visualizer.plot_current_losses(epoch, val_losses)
logging.info('Printing validation losses:')
visualizer.print_current_losses(epoch, 0.0, total_batches, val_losses, (time.time() - val_start_time), 0.0)
model.train()
model.reset()
assert model.net_D.training
if (val_losses[(model.val_metric + '_val')] > best_val_metric):
logging.info(('Updating best val mode at ep %d' % epoch))
logging.info(('The previous values: ep %d, val %0.2f' % (best_val_ep, best_val_metric)))
best_val_ep = epoch
best_val_metric = val_losses[(model.val_metric + '_val')]
logging.info(('The updated values: ep %d, val %0.2f' % (best_val_ep, best_val_metric)))
model.save_networks('bestval', epoch, best_val_metric, best_val_ep)
with open(os.path.join(model.save_dir, 'bestval_ep.txt'), 'a') as f:
f.write(('ep: %d %s: %f\n' % (epoch, (model.val_metric + '_val'), best_val_metric)))
elif (epoch > (best_val_ep + (5 * opt.patience))):
logging.info(('Current epoch %d, last updated val at ep %d' % (epoch, best_val_ep)))
logging.info('Stopping training...')
break
elif (best_val_metric == 1):
logging.info('Reached perfect val accuracy metric')
logging.info('Stopping training...')
break
elif (opt.max_epochs and (epoch > opt.max_epochs)):
logging.info('Reached max epoch count')
logging.info('Stopping training...')
break
logging.info(('Best val ep: %d' % best_val_ep))
logging.info(('Best val metric: %0.2f' % best_val_metric))
visualizer.save_final_plots()
if (((epoch % opt.save_epoch_freq) == 0) and (epoch > 0)):
logging.info(('saving the model at the end of epoch %d, total batches %d' % (epoch, total_batches)))
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.save_networks(epoch, epoch, best_val_metric, best_val_ep)
logging.info(('End of epoch %d \t Time Taken: %d sec' % (epoch, (time.time() - epoch_start_time))))
model.update_learning_rate(metric=val_losses[(model.val_metric + '_val')])
epoch += 1
visualizer.save_final_plots()
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.save_networks(epoch, epoch, best_val_metric, best_val_ep)
logging.info('Finished Training')
|
def validate(model, opt):
logging.info('Starting evaluation loop ...')
model.reset()
assert (not model.net_D.training)
if (opt.model == 'patch_inconsistency_discriminator'):
WITH_MASK = True
else:
WITH_MASK = False
if (not WITH_MASK):
val_dset = PairedDataset(opt, os.path.join(opt.real_im_path, 'val'), os.path.join(opt.fake_im_path, 'val'), with_mask=WITH_MASK)
else:
val_dset = PairedDataset(opt, os.path.join(opt.real_im_path), os.path.join(opt.fake_im_path), with_mask=WITH_MASK)
val_dl = DataLoader(val_dset, batch_size=opt.batch_size, num_workers=opt.nThreads, pin_memory=False, shuffle=True)
val_losses = OrderedDict([((k + '_val'), util.AverageMeter()) for k in model.loss_names])
fake_label = opt.fake_class_id
real_label = (1 - fake_label)
val_start_time = time.time()
for (i, ims) in enumerate(val_dl):
ims_real = ims['original'].to(opt.gpu_ids[0])
ims_fake = ims['manipulated'].to(opt.gpu_ids[0])
labels_real = (real_label * torch.ones(ims_real.shape[0], dtype=torch.long).to(opt.gpu_ids[0]))
labels_fake = (fake_label * torch.ones(ims_fake.shape[0], dtype=torch.long).to(opt.gpu_ids[0]))
if WITH_MASK:
masks_real = ims['mask_original'].to(opt.gpu_ids[0])
masks_fake = ims['mask_manipulated'].to(opt.gpu_ids[0])
inputs = dict(ims=torch.cat((ims_real, ims_fake), axis=0), masks=torch.cat((masks_real, masks_fake), axis=0), labels=torch.cat((labels_real, labels_fake), axis=0))
else:
inputs = dict(ims=torch.cat((ims_real, ims_fake), axis=0), labels=torch.cat((labels_real, labels_fake), axis=0))
model.reset()
model.set_input(inputs)
model.test(True)
losses = model.get_current_losses()
for (k, v) in losses.items():
val_losses[(k + '_val')].update(v, n=len(inputs['labels']))
for (k, v) in val_losses.items():
val_losses[k] = v.avg
return val_losses
|
def train(opt):
torch.manual_seed(opt.seed)
dset = I2GDataset(opt, os.path.join(opt.real_im_path, 'train'))
dset.get32frames()
dl = DataLoader(dset, batch_size=opt.batch_size, num_workers=opt.nThreads, pin_memory=False, shuffle=True)
assert (opt.fake_class_id in [0, 1])
fake_label = opt.fake_class_id
real_label = (1 - fake_label)
logging.info(('real label = %d' % real_label))
logging.info(('fake label = %d' % fake_label))
dataset_size = len(dset)
logging.info(('# total images = %d' % dataset_size))
logging.info(('# total batches = %d' % len(dl)))
model = create_model(opt)
(epoch, best_val_metric, best_val_ep) = model.setup(opt)
visualizer_losses = (model.loss_names + [(n + '_val') for n in model.loss_names])
visualizer = Visualizer(opt, visualizer_losses, model.visual_names)
total_batches = (epoch * len(dl))
t_data = 0
now = time.strftime('%c')
logging.info(('================ Training Loss (%s) ================\n' % now))
while True:
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for (i, ims) in enumerate(dl):
images = ims['img'].to(opt.gpu_ids[0])
masks = ims['mask'].to(opt.gpu_ids[0])
labels = ims['label'].to(opt.gpu_ids[0])
batch_im = images
batch_mask = masks
batch_label = labels
batch_data = dict(ims=batch_im, masks=batch_mask, labels=batch_label)
iter_start_time = time.time()
if ((total_batches % opt.print_freq) == 0):
t_data = (iter_start_time - iter_data_time)
total_batches += 1
epoch_iter += 1
model.reset()
model.set_input(batch_data)
model.optimize_parameters()
if ((epoch_iter % opt.print_freq) == 0):
losses = model.get_current_losses()
t = (time.time() - iter_start_time)
visualizer.print_current_losses(epoch, (float(epoch_iter) / len(dl)), total_batches, losses, t, t_data)
visualizer.plot_current_losses(total_batches, losses)
if ((epoch_iter % opt.save_latest_freq) == 0):
logging.info(('saving the latest model (epoch %d, total_batches %d)' % (epoch, total_batches)))
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.reset()
iter_data_time = time.time()
model.eval()
val_start_time = time.time()
val_losses = validate(model, opt)
visualizer.plot_current_losses(epoch, val_losses)
logging.info('Printing validation losses:')
visualizer.print_current_losses(epoch, 0.0, total_batches, val_losses, (time.time() - val_start_time), 0.0)
model.train()
model.reset()
assert model.net_D.training
if (val_losses[(model.val_metric + '_val')] > best_val_metric):
logging.info(('Updating best val mode at ep %d' % epoch))
logging.info(('The previous values: ep %d, val %0.2f' % (best_val_ep, best_val_metric)))
best_val_ep = epoch
best_val_metric = val_losses[(model.val_metric + '_val')]
logging.info(('The updated values: ep %d, val %0.2f' % (best_val_ep, best_val_metric)))
model.save_networks('bestval', epoch, best_val_metric, best_val_ep)
with open(os.path.join(model.save_dir, 'bestval_ep.txt'), 'a') as f:
f.write(('ep: %d %s: %f\n' % (epoch, (model.val_metric + '_val'), best_val_metric)))
elif (epoch > (best_val_ep + (5 * opt.patience))):
logging.info(('Current epoch %d, last updated val at ep %d' % (epoch, best_val_ep)))
logging.info('Stopping training...')
break
elif (best_val_metric == 1):
logging.info('Reached perfect val accuracy metric')
logging.info('Stopping training...')
break
elif (opt.max_epochs and (epoch > opt.max_epochs)):
logging.info('Reached max epoch count')
logging.info('Stopping training...')
break
logging.info(('Best val ep: %d' % best_val_ep))
logging.info(('Best val metric: %0.2f' % best_val_metric))
visualizer.save_final_plots()
if (((epoch % opt.save_epoch_freq) == 0) and (epoch > 0)):
logging.info(('saving the model at the end of epoch %d, total batches %d' % (epoch, total_batches)))
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.save_networks(epoch, epoch, best_val_metric, best_val_ep)
logging.info(('End of epoch %d \t Time Taken: %d sec' % (epoch, (time.time() - epoch_start_time))))
model.update_learning_rate(metric=val_losses[(model.val_metric + '_val')])
epoch += 1
dset.get32frames()
dl = DataLoader(dset, batch_size=opt.batch_size, num_workers=opt.nThreads, pin_memory=False, shuffle=True)
visualizer.save_final_plots()
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.save_networks(epoch, epoch, best_val_metric, best_val_ep)
logging.info('Finished Training')
|
def validate(model, opt):
logging.info('Starting evaluation loop ...')
model.reset()
assert (not model.net_D.training)
val_dset = I2GDataset(opt, os.path.join(opt.real_im_path, 'val'), is_val=True)
val_dset.get32frames()
val_dl = DataLoader(val_dset, batch_size=opt.batch_size, num_workers=opt.nThreads, pin_memory=False, shuffle=True)
val_losses = OrderedDict([((k + '_val'), util.AverageMeter()) for k in model.loss_names])
fake_label = opt.fake_class_id
real_label = (1 - fake_label)
val_start_time = time.time()
for (i, ims) in enumerate(val_dl):
images = ims['img'].to(opt.gpu_ids[0])
masks = ims['mask'].to(opt.gpu_ids[0])
labels = ims['label'].to(opt.gpu_ids[0])
inputs = dict(ims=images, masks=masks, labels=labels)
model.reset()
model.set_input(inputs)
model.test(True)
losses = model.get_current_losses()
for (k, v) in losses.items():
val_losses[(k + '_val')].update(v, n=len(inputs['labels']))
for (k, v) in val_losses.items():
val_losses[k] = v.avg
return val_losses
|
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super(self.__class__, self).__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
|
class MultiLineFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None, style='%'):
assert (style == '%')
super(MultiLineFormatter, self).__init__(fmt, datefmt, style)
self.multiline_fmt = fmt
def format(self, record):
"\n This is mostly the same as logging.Formatter.format except for the splitlines() thing.\n This is done so (copied the code) to not make logging a bottleneck. It's not lots of code\n after all, and it's pretty straightforward.\n "
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
if ('\n' in record.message):
splitted = record.message.splitlines()
output = (self._fmt % dict(record.__dict__, message=splitted.pop(0)))
output += (' \n' + '\n'.join(((self.multiline_fmt % dict(record.__dict__, message=line)) for line in splitted)))
else:
output = (self._fmt % record.__dict__)
if record.exc_info:
if (not record.exc_text):
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
output += ' \n'
try:
output += '\n'.join(((self.multiline_fmt % dict(record.__dict__, message=line)) for (index, line) in enumerate(record.exc_text.splitlines())))
except UnicodeError:
output += '\n'.join(((self.multiline_fmt % dict(record.__dict__, message=line)) for (index, line) in enumerate(record.exc_text.decode(sys.getfilesystemencoding(), 'replace').splitlines())))
return output
|
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logging.error('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback))
|
def configure(logging_file, log_level=logging.INFO, level_prefix='', prefix='', write_to_stdout=True, append=True):
logging.getLogger().setLevel(logging.INFO)
sys.excepthook = handle_exception
handlers = []
if write_to_stdout:
handlers.append(TqdmLoggingHandler())
delayed_logging = []
if (logging_file is not None):
delayed_logging.append((logging.info, 'Logging to {}'.format(logging_file)))
if append:
if os.path.isfile(logging_file):
delayed_logging.append((logging.warning, 'Log file already exists, will append'))
handlers.append(logging.FileHandler(logging_file))
else:
delayed_logging.append((logging.warning, 'Creating {} with mode write'.format(logging_file)))
handlers.append(logging.FileHandler(logging_file, mode='w'))
formatter = MultiLineFormatter('{}%(asctime)s [{}%(levelname)-5s] %(message)s'.format(prefix, level_prefix), '%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()
logger.handlers = []
for h in handlers:
h.setFormatter(formatter)
logger.addHandler(h)
logger.setLevel(log_level)
for (fn, msg) in delayed_logging:
fn(msg)
return logger
|
@contextlib.contextmanager
def disable(level):
prev_level = logging.getLogger().getEffectiveLevel()
logging.disable(level)
(yield)
logging.disable(prev_level)
|
class Options():
def __init__(self):
self.parser = parser = argparse.ArgumentParser()
self.parser.add_argument('config_file', nargs='?', type=argparse.FileType(mode='r'))
self.parser.add_argument('--overwrite_config', action='store_true', help='overwrite config files if they exist')
def print_options(self, opt):
opt_dict = OrderedDict()
message = ''
message += '----------------- Options ---------------\n'
for (k, v) in sorted(vars(opt).items()):
if (type(v) == argparse.Namespace):
grouped_k.append((k, v))
continue
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
opt_dict[k] = v
message += '----------------- End -------------------'
print(message)
if (hasattr(opt, 'checkpoints_dir') and hasattr(opt, 'name')):
if (opt.name != ''):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
else:
expr_dir = os.path.join(opt.checkpoints_dir)
else:
expr_dir = './'
os.makedirs(expr_dir, exist_ok=True)
file_name = os.path.join(expr_dir, 'opt.txt')
if (not opt.overwrite_config):
assert (not os.path.isfile(file_name)), 'config file exists, use --overwrite_config'
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
file_name = os.path.join(expr_dir, 'opt.yml')
if (not opt.overwrite_config):
assert (not os.path.isfile(file_name)), 'config file exists, use --overwrite_config'
with open(file_name, 'wt') as opt_file:
opt_dict['overwrite_config'] = False
yaml.dump(opt_dict, opt_file, default_flow_style=False)
def parse(self, print_opt=True):
opt = self.parser.parse_args()
if opt.config_file:
data = yaml.load(opt.config_file)
else:
data = {}
option_strings = {}
for action_group in self.parser._action_groups:
for action in action_group._group_actions:
for option in action.option_strings:
option_strings[option] = action.dest
specified_options = set([option_strings[x] for x in sys.argv if (x in option_strings)])
args = {}
for group in self.parser._action_groups:
assert (group.title in ['positional arguments', 'optional arguments'])
group_dict = {a.dest: (data[a.dest] if ((a.dest in data) and (a.dest not in specified_options)) else getattr(opt, a.dest, None)) for a in group._group_actions}
args.update(group_dict)
opt = argparse.Namespace(**args)
delattr(opt, 'config_file')
if print_opt:
self.print_options(opt)
self.opt = opt
return opt
|
def verbose(verbose):
'\n Sets default verbosity level. Set to True to see progress bars.\n '
global default_verbosity
default_verbosity = verbose
|
def post(**kwargs):
'\n When within a progress loop, pbar.post(k=str) will display\n the given k=str status on the right-hand-side of the progress\n status bar. If not within a visible progress bar, does nothing.\n '
innermost = innermost_tqdm()
if innermost:
innermost.set_postfix(**kwargs)
|
def desc(desc):
'\n When within a progress loop, pbar.desc(str) changes the\n left-hand-side description of the loop toe the given description.\n '
innermost = innermost_tqdm()
if innermost:
innermost.set_description(str(desc))
|
def descnext(desc):
'\n Called before starting a progress loop, pbar.descnext(str)\n sets the description text that will be used in the following loop.\n '
global next_description
if ((not default_verbosity) or (tqdm is None)):
return
next_description = desc
|
def print(*args):
'\n When within a progress loop, will print above the progress loop.\n '
global next_description
next_description = None
if default_verbosity:
msg = ' '.join((str(s) for s in args))
if (tqdm is None):
print(msg)
else:
tqdm.write(msg)
|
def tqdm_terminal(it, *args, **kwargs):
'\n Some settings for tqdm that make it run better in resizable terminals.\n '
return tqdm(it, *args, dynamic_ncols=True, ascii=True, leave=(not innermost_tqdm()), **kwargs)
|
def in_notebook():
'\n True if running inside a Jupyter notebook.\n '
try:
shell = get_ipython().__class__.__name__
if (shell == 'ZMQInteractiveShell'):
return True
elif (shell == 'TerminalInteractiveShell'):
return False
else:
return False
except NameError:
return False
|
def innermost_tqdm():
'\n Returns the innermost active tqdm progress loop on the stack.\n '
if (hasattr(tqdm, '_instances') and (len(tqdm._instances) > 0)):
return max(tqdm._instances, key=(lambda x: x.pos))
else:
return None
|
def __call__(x, *args, **kwargs):
'\n Invokes a progress function that can wrap iterators to print\n progress messages, if verbose is True.\n \n If verbose is False or tqdm is unavailable, then a quiet\n non-printing identity function is used.\n\n verbose can also be set to a spefific progress function rather\n than True, and that function will be used.\n '
global default_verbosity, next_description
if ((not default_verbosity) or (tqdm is None)):
return x
if (default_verbosity == True):
fn = (tqdm_notebook if in_notebook() else tqdm_terminal)
else:
fn = default_verbosity
if (next_description is not None):
kwargs = dict(kwargs)
kwargs['desc'] = next_description
next_description = None
return fn(x, *args, **kwargs)
|
class CallableModule(types.ModuleType):
def __init__(self):
types.ModuleType.__init__(self, __name__)
self.__dict__.update(sys.modules[__name__].__dict__)
def __call__(self, x, *args, **kwargs):
return __call__(x, *args, **kwargs)
|
def exit_if_job_done(directory, redo=False, force=False, verbose=True):
if pidfile_taken(os.path.join(directory, 'lockfile.pid'), force=force, verbose=verbose):
sys.exit(0)
donefile = os.path.join(directory, 'done.txt')
if os.path.isfile(donefile):
with open(donefile) as f:
msg = f.read()
if (redo or force):
if verbose:
print(('Removing %s %s' % (donefile, msg)))
os.remove(donefile)
else:
if verbose:
print(('%s %s' % (donefile, msg)))
sys.exit(0)
|
def mark_job_done(directory):
with open(os.path.join(directory, 'done.txt'), 'w') as f:
f.write(('done by %d@%s %s at %s' % (os.getpid(), socket.gethostname(), os.getenv('STY', ''), time.strftime('%c'))))
|
def pidfile_taken(path, verbose=False, force=False):
"\n Usage. To grab an exclusive lock for the remaining duration of the\n current process (and exit if another process already has the lock),\n do this:\n\n if pidfile_taken('job_423/lockfile.pid', verbose=True):\n sys.exit(0)\n\n To do a batch of jobs, just run a script that does them all on\n each available machine, sharing a network filesystem. When each\n job grabs a lock, then this will automatically distribute the\n jobs so that each one is done just once on one machine.\n "
try:
os.makedirs(os.path.dirname(path), exist_ok=True)
fd = os.open(path, ((os.O_CREAT | os.O_EXCL) | os.O_RDWR))
except OSError as e:
if (e.errno == errno.EEXIST):
conflicter = 'race'
try:
with open(path, 'r') as lockfile:
conflicter = (lockfile.read().strip() or 'empty')
except:
pass
if force:
if verbose:
print(('Removing %s from %s' % (path, conflicter)))
os.remove(path)
return pidfile_taken(path, verbose=verbose, force=False)
if verbose:
print(('%s held by %s' % (path, conflicter)))
return conflicter
else:
raise
lockfile = os.fdopen(fd, 'r+')
atexit.register(delete_pidfile, lockfile, path)
lockfile.write(('%d@%s %s\n' % (os.getpid(), socket.gethostname(), os.getenv('STY', ''))))
lockfile.flush()
os.fsync(lockfile)
return None
|
def delete_pidfile(lockfile, path):
'\n Runs at exit after pidfile_taken succeeds.\n '
if (lockfile is not None):
try:
lockfile.close()
except:
pass
try:
os.unlink(path)
except:
pass
|
def blocks(obj, space=''):
return IPython.display.HTML(space.join(blocks_tags(obj)))
|
def rows(obj, space=''):
return IPython.display.HTML(space.join(rows_tags(obj)))
|
def rows_tags(obj):
if isinstance(obj, dict):
obj = obj.items()
results = []
results.append('<table style="display:inline-table">')
for row in obj:
results.append('<tr style="padding:0">')
for item in row:
results.append(('<td style="text-align:left; vertical-align:top;' + 'padding:1px">'))
results.extend(blocks_tags(item))
results.append('</td>')
results.append('</tr>')
results.append('</table>')
return results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.