code stringlengths 17 6.64M |
|---|
def tta_backward(x_aug):
'\n Inverts `tta_forward` and averages the 8 images.\n\n Parameters\n ----------\n x_aug: stack of 8-fold augmented images.\n\n Returns\n -------\n average of de-augmented x_aug.\n '
x_deaug = [x_aug[0], np.rot90(x_aug[1], (- 1)), np.rot90(x_aug[2], (- 2)), np.rot90(x_aug[3], (- 3)), np.fliplr(x_aug[4]), np.rot90(np.fliplr(x_aug[5]), (- 1)), np.rot90(np.fliplr(x_aug[6]), (- 2)), np.rot90(np.fliplr(x_aug[7]), (- 3))]
return np.mean(x_deaug, 0)
|
def test_tifffile():
current = Path(__file__)
image_lzw = Path(current.parent, 'test_data/flybrain_lzw.tiff')
image = tifffile.imread(image_lzw)
assert (image.shape == (256, 256, 3))
|
def _int64_feature(value):
if (not isinstance(value, Iterable)):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
|
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
|
def dump(fn_root, tfrecord_dir, max_res, expected_images, shards, write):
'Main converter function.'
resolution_log2 = int(np.log2(max_res))
tfr_prefix = os.path.join(tfrecord_dir, os.path.basename(tfrecord_dir))
print('Checking in', fn_root)
img_fn_list = os.listdir(fn_root)
img_fn_list = [img_fn for img_fn in img_fn_list if img_fn.endswith('.png')]
num_examples = len(img_fn_list)
print('Found', num_examples)
assert (num_examples == expected_images)
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
p_shard = np.array_split(np.random.permutation(expected_images), shards)
img_to_shard = np.zeros(expected_images, dtype=np.int)
writers = []
for shard in range(shards):
img_to_shard[p_shard[shard]] = shard
tfr_file = (tfr_prefix + ('-r%02d-s-%04d-of-%04d.tfrecords' % (resolution_log2, shard, shards)))
writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
counts = np.unique(img_to_shard, return_counts=True)[1]
assert (len(counts) == shards)
print('Smallest and largest shards have size', np.min(counts), np.max(counts))
for (example_idx, img_fn) in enumerate(tqdm(img_fn_list)):
shard = img_to_shard[example_idx]
img = scipy.ndimage.imread(os.path.join(fn_root, img_fn))
rows = img.shape[0]
cols = img.shape[1]
depth = img.shape[2]
shape = (rows, cols, depth)
img = img.astype('uint8')
img = img.tostring()
example = tf.train.Example(features=tf.train.Features(feature={'shape': _int64_feature(shape), 'data': _bytes_feature(img), 'label': _int64_feature(0)}))
if write:
writers[shard].write(example.SerializeToString())
print(('%-40s\r' % 'Flushing data...'), end='', flush=True)
for writer in writers:
writer.close()
print(('%-40s\r' % ''), end='', flush=True)
print(('Added %d images.' % num_examples))
|
def parse_tfrecord_tf(record, res, rnd_crop):
features = tf.parse_single_example(record, features={'shape': tf.FixedLenFeature([3], tf.int64), 'data': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([1], tf.int64)})
(data, label, shape) = (features['data'], features['label'], features['shape'])
label = tf.cast(tf.reshape(label, shape=[]), dtype=tf.int32)
img = tf.decode_raw(data, tf.uint8)
if rnd_crop:
img = tf.reshape(img, shape)
img = tf.random_crop(img, [res, res, 3])
img = tf.reshape(img, [res, res, 3])
return (img, label)
|
def input_fn(tfr_file, shards, rank, pmap, fmap, n_batch, resolution, rnd_crop, is_training):
files = tf.data.Dataset.list_files(tfr_file)
if (('lsun' not in tfr_file) or is_training):
files = files.shard(shards, rank)
if is_training:
files = files.shuffle(buffer_size=_FILES_SHUFFLE)
dset = files.apply(tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, cycle_length=fmap))
if is_training:
dset = dset.shuffle(buffer_size=(n_batch * _SHUFFLE_FACTOR))
dset = dset.repeat()
dset = dset.map((lambda x: parse_tfrecord_tf(x, resolution, rnd_crop)), num_parallel_calls=pmap)
dset = dset.batch(n_batch)
dset = dset.prefetch(1)
itr = dset.make_one_shot_iterator()
return itr
|
def get_tfr_file(data_dir, split, res_lg2):
data_dir = os.path.join(data_dir, split)
tfr_prefix = os.path.join(data_dir, os.path.basename(data_dir))
tfr_file = (tfr_prefix + ('-r%02d-s-*-of-*.tfrecords' % res_lg2))
files = glob.glob(tfr_file)
assert (len(files) == int(files[0].split('-')[(- 1)].split('.')[0])), ('Not all tfrecords files present at %s' % tfr_prefix)
return tfr_file
|
def get_data(sess, data_dir, shards, rank, pmap, fmap, n_batch_train, n_batch_test, n_batch_init, resolution, rnd_crop):
assert (resolution == (2 ** int(np.log2(resolution))))
train_file = get_tfr_file(data_dir, 'train', int(np.log2(resolution)))
valid_file = get_tfr_file(data_dir, 'validation', int(np.log2(resolution)))
train_itr = input_fn(train_file, shards, rank, pmap, fmap, n_batch_train, resolution, rnd_crop, True)
valid_itr = input_fn(valid_file, shards, rank, pmap, fmap, n_batch_test, resolution, rnd_crop, False)
data_init = make_batch(sess, train_itr, n_batch_train, n_batch_init)
return (train_itr, valid_itr, data_init)
|
def make_batch(sess, itr, itr_batch_size, required_batch_size):
(ib, rb) = (itr_batch_size, required_batch_size)
k = int(np.ceil((rb / ib)))
(xs, ys) = ([], [])
data = itr.get_next()
for i in range(k):
(x, y) = sess.run(data)
xs.append(x)
ys.append(y)
(x, y) = (np.concatenate(xs)[:rb], np.concatenate(ys)[:rb])
return {'x': x, 'y': y}
|
def downsample(x, resolution):
assert (x.dtype == np.float32)
assert ((x.shape[1] % resolution) == 0)
assert ((x.shape[2] % resolution) == 0)
if (x.shape[1] == x.shape[2] == resolution):
return x
s = x.shape
x = np.reshape(x, [s[0], resolution, (s[1] // resolution), resolution, (s[2] // resolution), s[3]])
x = np.mean(x, (2, 4))
return x
|
def x_to_uint8(x):
x = np.clip(np.floor(x), 0, 255)
return x.astype(np.uint8)
|
def shard(data, shards, rank):
(x, y) = data
assert (x.shape[0] == y.shape[0])
assert ((x.shape[0] % shards) == 0)
assert (0 <= rank < shards)
size = (x.shape[0] // shards)
ind = (rank * size)
return (x[ind:(ind + size)], y[ind:(ind + size)])
|
def get_data(problem, shards, rank, data_augmentation_level, n_batch_train, n_batch_test, n_batch_init, resolution):
if (problem == 'mnist'):
from keras.datasets import mnist
((x_train, y_train), (x_test, y_test)) = mnist.load_data()
y_train = np.reshape(y_train, [(- 1)])
y_test = np.reshape(y_test, [(- 1)])
x_train = np.lib.pad(x_train, ((0, 0), (2, 2), (2, 2)), 'minimum')
x_test = np.lib.pad(x_test, ((0, 0), (2, 2), (2, 2)), 'minimum')
x_train = np.tile(np.reshape(x_train, ((- 1), 32, 32, 1)), (1, 1, 1, 3))
x_test = np.tile(np.reshape(x_test, ((- 1), 32, 32, 1)), (1, 1, 1, 3))
elif (problem == 'cifar10'):
from keras.datasets import cifar10
((x_train, y_train), (x_test, y_test)) = cifar10.load_data()
y_train = np.reshape(y_train, [(- 1)])
y_test = np.reshape(y_test, [(- 1)])
else:
raise Exception()
print('n_train:', x_train.shape[0], 'n_test:', x_test.shape[0])
(x_train, y_train) = shard((x_train, y_train), shards, rank)
(x_test, y_test) = shard((x_test, y_test), shards, rank)
print('n_shard_train:', x_train.shape[0], 'n_shard_test:', x_test.shape[0])
from keras.preprocessing.image import ImageDataGenerator
datagen_test = ImageDataGenerator()
if (data_augmentation_level == 0):
datagen_train = ImageDataGenerator()
elif (problem == 'mnist'):
datagen_train = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1)
elif (problem == 'cifar10'):
if (data_augmentation_level == 1):
datagen_train = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1)
elif (data_augmentation_level == 2):
datagen_train = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, rotation_range=15, zoom_range=0.1, shear_range=0.02)
else:
raise Exception()
else:
raise Exception()
datagen_train.fit(x_train)
datagen_test.fit(x_test)
train_flow = datagen_train.flow(x_train, y_train, n_batch_train)
test_flow = datagen_test.flow(x_test, y_test, n_batch_test, shuffle=True)
def make_iterator(flow, resolution):
def iterator():
(x_full, y) = flow.next()
x_full = x_full.astype(np.float32)
x = downsample(x_full, resolution)
x = x_to_uint8(x)
return (x, y)
return iterator
train_iterator = make_iterator(train_flow, resolution)
test_iterator = make_iterator(test_flow, resolution)
data_init = make_batch(train_iterator, n_batch_train, n_batch_init)
return (train_iterator, test_iterator, data_init)
|
def make_batch(iterator, iterator_batch_size, required_batch_size):
(ib, rb) = (iterator_batch_size, required_batch_size)
k = int(np.ceil((rb / ib)))
(xs, ys) = ([], [])
for i in range(k):
(x, y) = iterator()
xs.append(x)
ys.append(y)
(x, y) = (np.concatenate(xs)[:rb], np.concatenate(ys)[:rb])
return {'x': x, 'y': y}
|
def gradients_speed(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs)
|
def gradients_memory(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs)
|
def gradients_collection(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs)
|
def gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs):
'\n Authors: Tim Salimans & Yaroslav Bulatov\n\n memory efficient gradient implementation inspired by "Training Deep Nets with Sublinear Memory Cost"\n by Chen et al. 2016 (https://arxiv.org/abs/1604.06174)\n\n ys,xs,grad_ys,kwargs are the arguments to standard tensorflow tf.gradients\n (https://www.tensorflow.org/versions/r0.12/api_docs/python/train.html#gradients)\n\n \'checkpoints\' can either be\n - a list consisting of tensors from the forward pass of the neural net\n that we should re-use when calculating the gradients in the backward pass\n all other tensors that do not appear in this list will be re-computed\n - a string specifying how this list should be determined. currently we support\n - \'speed\': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive,\n so checkpointing them maximizes the running speed\n (this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory)\n - \'memory\': try to minimize the memory usage\n (currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint)\n - \'collection\': look for a tensorflow collection named \'checkpoints\', which holds the tensors to checkpoint\n '
if (not isinstance(ys, list)):
ys = [ys]
if (not isinstance(xs, list)):
xs = [xs]
bwd_ops = ge.get_backward_walk_ops([y.op for y in ys], inclusive=True)
debug_print('bwd_ops: %s', bwd_ops)
fwd_ops = ge.get_forward_walk_ops([x.op for x in xs], inclusive=True, within_ops=bwd_ops)
debug_print('fwd_ops: %s', fwd_ops)
fwd_ops = [op for op in fwd_ops if op.inputs]
xs_ops = _to_ops(xs)
fwd_ops = [op for op in fwd_ops if (not (op in xs_ops))]
fwd_ops = [op for op in fwd_ops if (not ('/assign' in op.name))]
fwd_ops = [op for op in fwd_ops if (not ('/Assign' in op.name))]
fwd_ops = [op for op in fwd_ops if (not ('/read' in op.name))]
ts_all = ge.filter_ts(fwd_ops, True)
ts_all = [t for t in ts_all if ('/read' not in t.name)]
ts_all = ((set(ts_all) - set(xs)) - set(ys))
if (type(checkpoints) is not list):
if (checkpoints == 'collection'):
checkpoints = tf.get_collection('checkpoints')
elif (checkpoints == 'speed'):
checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')
elif (checkpoints == 'memory'):
def fixdims(t):
try:
return [int((e if (e.value is not None) else 64)) for e in t]
except:
return [0]
ts_all = [t for t in ts_all if (np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE)]
ts_all = [t for t in ts_all if ('L2Loss' not in t.name)]
ts_all = [t for t in ts_all if ('entropy' not in t.name)]
ts_all = [t for t in ts_all if ('FusedBatchNorm' not in t.name)]
ts_all = [t for t in ts_all if ('Switch' not in t.name)]
ts_all = [t for t in ts_all if ('dropout' not in t.name)]
with util.capture_ops() as bwd_ops:
tf_gradients(ys, xs, grad_ys, **kwargs)
bwd_inputs = [t for op in bwd_ops for t in op.inputs]
ts_filtered = list(set(bwd_inputs).intersection(ts_all))
debug_print('Using tensors %s', ts_filtered)
for ts in [ts_filtered, ts_all]:
bottleneck_ts = []
for t in ts:
b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops))
f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops))
b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)
f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)
if ((not set(b_inp).intersection(f_inp)) and ((len(b_inp) + len(f_inp)) >= len(ts_all))):
bottleneck_ts.append(t)
else:
debug_print('Rejected bottleneck candidate and ops %s', ([t] + list(((set(ts_all) - set(b_inp)) - set(f_inp)))))
if (len(bottleneck_ts) >= np.sqrt(len(ts_filtered))):
break
if (not bottleneck_ts):
raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints="speed".')
bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops)
sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts]
N = len(ts_filtered)
if (len(bottleneck_ts) <= np.ceil(np.sqrt(N))):
checkpoints = sorted_bottlenecks
else:
step = int(np.ceil((len(bottleneck_ts) / np.sqrt(N))))
checkpoints = sorted_bottlenecks[step::step]
else:
raise Exception(('%s is unsupported input for "checkpoints"' % (checkpoints,)))
checkpoints = list(set(checkpoints).intersection(ts_all))
assert isinstance(checkpoints, list)
debug_print('Checkpoint nodes used: %s', checkpoints)
xs_intersect_checkpoints = set(xs).intersection(set(checkpoints))
if xs_intersect_checkpoints:
debug_print('Warning, some input nodes are also checkpoint nodes: %s', xs_intersect_checkpoints)
ys_intersect_checkpoints = set(ys).intersection(set(checkpoints))
debug_print('ys: %s, checkpoints: %s, intersect: %s', ys, checkpoints, ys_intersect_checkpoints)
if ys_intersect_checkpoints:
debug_print('Warning, some output nodes are also checkpoints nodes: %s', format_ops(ys_intersect_checkpoints))
checkpoints = list(((set(checkpoints) - set(ys)) - set(xs)))
if (not checkpoints):
raise Exception('no checkpoints nodes found or given as input! ')
checkpoints_disconnected = {}
for x in checkpoints:
if (x.op and (x.op.name is not None)):
grad_node = tf.stop_gradient(x, name=(x.op.name + '_sg'))
else:
grad_node = tf.stop_gradient(x)
checkpoints_disconnected[x] = grad_node
ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys], stop_at_ts=checkpoints, within_ops=fwd_ops)
debug_print('Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s', len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints)
debug_print('ops_to_copy = %s', ops_to_copy)
debug_print('Processing list %s', ys)
(copied_sgv, info) = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
copied_ops = info._transformed_ops.values()
debug_print('Copied %s to %s', ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops)
debug_print('Rewired %s in place of %s restricted to %s', checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops)
copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys]
boundary = list(checkpoints_disconnected.values())
dv = tf_gradients(ys=copied_ys, xs=(boundary + xs), grad_ys=grad_ys, **kwargs)
debug_print('Got gradients %s', dv)
debug_print('for %s', copied_ys)
debug_print('with respect to %s', (boundary + xs))
inputs_to_do_before = [y.op for y in ys]
if (grad_ys is not None):
inputs_to_do_before += grad_ys
wait_to_do_ops = (list(copied_ops) + [g.op for g in dv if (g is not None)])
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
d_checkpoints = {r: dr for (r, dr) in zip(checkpoints_disconnected.keys(), dv[:len(checkpoints_disconnected)])}
d_xs = dv[len(checkpoints_disconnected):]
checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops)
for ts in checkpoints_sorted_lists[::(- 1)]:
debug_print('Processing list %s', ts)
checkpoints_other = [r for r in checkpoints if (r not in ts)]
checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other]
ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other)
debug_print('Found %s ops to copy within %s, seed %s, stop_at %s', len(ops_to_copy), fwd_ops, [r.op for r in ts], checkpoints_other)
debug_print('ops_to_copy = %s', ops_to_copy)
if (not ops_to_copy):
break
(copied_sgv, info) = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
copied_ops = info._transformed_ops.values()
debug_print('Copied %s to %s', ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)
debug_print('Rewired %s in place of %s restricted to %s', checkpoints_disconnected_other, checkpoints_other, copied_ops)
boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts]
substitute_backprops = [d_checkpoints[r] for r in ts]
dv = tf_gradients(boundary, (checkpoints_disconnected_other + xs), grad_ys=substitute_backprops, **kwargs)
debug_print('Got gradients %s', dv)
debug_print('for %s', boundary)
debug_print('with respect to %s', (checkpoints_disconnected_other + xs))
debug_print('with boundary backprop substitutions %s', substitute_backprops)
inputs_to_do_before = [d_checkpoints[r].op for r in ts]
wait_to_do_ops = (list(copied_ops) + [g.op for g in dv if (g is not None)])
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
for (r, dr) in zip(checkpoints_other, dv[:len(checkpoints_other)]):
if (dr is not None):
if (d_checkpoints[r] is None):
d_checkpoints[r] = dr
else:
d_checkpoints[r] += dr
d_xs_new = dv[len(checkpoints_other):]
for j in range(len(xs)):
if (d_xs_new[j] is not None):
if (d_xs[j] is None):
d_xs[j] = d_xs_new[j]
else:
d_xs[j] += d_xs_new[j]
return d_xs
|
def tf_toposort(ts, within_ops=None):
all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops)
deps = {}
for op in all_ops:
for o in op.outputs:
deps[o] = set(op.inputs)
sorted_ts = toposort(deps)
ts_sorted_lists = []
for l in sorted_ts:
keep = list(set(l).intersection(ts))
if keep:
ts_sorted_lists.append(keep)
return ts_sorted_lists
|
def fast_backward_ops(within_ops, seed_ops, stop_at_ts):
bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts))
ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts])
return list(ops)
|
@contextlib.contextmanager
def capture_ops():
'Decorator to capture ops created in the block.\n with capture_ops() as ops:\n # create some ops\n print(ops) # => prints ops created.\n '
micros = int((time.time() * (10 ** 6)))
scope_name = str(micros)
op_list = []
with tf.name_scope(scope_name):
(yield op_list)
g = tf.get_default_graph()
op_list.extend(ge.select_ops((scope_name + '/.*'), graph=g))
|
def _to_op(tensor_or_op):
if hasattr(tensor_or_op, 'op'):
return tensor_or_op.op
return tensor_or_op
|
def _to_ops(iterable):
if (not _is_iterable(iterable)):
return iterable
return [_to_op(i) for i in iterable]
|
def _is_iterable(o):
try:
_ = iter(o)
except Exception:
return False
return True
|
def debug_print(s, *args):
'Like logger.log, but also replaces all TensorFlow ops/tensors with their\n names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug\n\n Usage:\n debug_print("see tensors %s for %s", tensorlist, [1,2,3])\n '
if DEBUG_LOGGING:
formatted_args = [format_ops(arg) for arg in args]
print(('DEBUG ' + (s % tuple(formatted_args))))
|
def format_ops(ops, sort_outputs=True):
'Helper method for printing ops. Converts Tensor/Operation op to op.name,\n rest to str(op).'
if (hasattr(ops, '__iter__') and (not isinstance(ops, str))):
l = [(op.name if hasattr(op, 'name') else str(op)) for op in ops]
if sort_outputs:
return sorted(l)
return l
else:
return (ops.name if hasattr(ops, 'name') else str(ops))
|
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
for op in wait_to_do_ops:
ci = [i for i in inputs_to_do_before if ((op.control_inputs is None) or (i not in op.control_inputs))]
ge.add_control_inputs(op, ci)
|
def polyak(params, beta):
ema = tf.train.ExponentialMovingAverage(decay=beta, zero_debias=True)
avg_op = tf.group(ema.apply(params))
updates = []
for i in range(len(params)):
p = params[i]
avg = ema.average(p)
tmp = (0.0 + (avg * 1.0))
with tf.control_dependencies([tmp]):
update1 = avg.assign(p)
with tf.control_dependencies([update1]):
update2 = p.assign(tmp)
updates += [update1, update2]
swap_op = tf.group(*updates)
return (avg_op, swap_op, ema)
|
def adam(params, cost_or_grads, alpha=0.0003, hps=None, epsilon=1e-08):
updates = []
if (type(cost_or_grads) is not list):
gs = tf.gradients(cost_or_grads, params)
else:
gs = cost_or_grads
beta2 = (1 - (1.0 / (hps.train_its * hps.polyak_epochs)))
grads = [Z.allreduce_mean(g) for g in gs]
t = tf.Variable(1.0, 'adam_t')
alpha_t = ((alpha * tf.sqrt((1.0 - tf.pow(beta2, t)))) / (1.0 - tf.pow(hps.beta1, t)))
updates.append(t.assign_add(1))
for (w, g) in zip(params, grads):
mom2 = tf.Variable(tf.zeros(w.get_shape()), (w.name + '_adam_m2'))
if (hps.beta1 > 0):
mom1 = tf.Variable(tf.zeros(w.get_shape()), (w.name + '_adam_m1'))
mom1_new = ((hps.beta1 * mom1) + ((1.0 - hps.beta1) * g))
updates.append(mom1.assign(mom1_new))
else:
mom1_new = g
m2_new = ((beta2 * mom2) + ((1.0 - beta2) * tf.square(g)))
delta_t = (mom1_new / (tf.sqrt(m2_new) + epsilon))
w_new = ((hps.weight_decay * w) - (alpha_t * delta_t))
updates.append(mom2.assign(m2_new))
updates.append(w.assign(w_new))
(polyak_avg_op, polyak_swap_op, ema) = polyak(params, beta2)
train_op = tf.group(polyak_avg_op, *updates)
return (train_op, polyak_swap_op, ema)
|
def adam2(params, cost_or_grads, alpha=0.0003, hps=None, epsilon=1e-08):
updates = []
if (type(cost_or_grads) is not list):
gs = tf.gradients(cost_or_grads, params)
else:
gs = cost_or_grads
beta2 = (1 - (1.0 / (hps.train_its * hps.polyak_epochs)))
grads1 = [Z.allreduce_mean(g) for g in gs]
grads2 = [Z.allreduce_mean((g ** 2)) for g in gs]
t = tf.Variable(1.0, 'adam_t')
alpha_t = ((alpha * tf.sqrt((1.0 - tf.pow(beta2, t)))) / (1.0 - tf.pow(hps.beta1, t)))
updates.append(t.assign_add(1))
for (w, g1, g2) in zip(params, grads1, grads2):
mom2 = tf.Variable(tf.zeros(w.get_shape()), (w.name + '_adam_m2'))
if (hps.beta1 > 0):
mom1 = tf.Variable(tf.zeros(w.get_shape()), (w.name + '_adam_m1'))
mom1_new = ((hps.beta1 * mom1) + ((1.0 - hps.beta1) * g1))
updates.append(mom1.assign(mom1_new))
else:
mom1_new = g1
m2_new = ((beta2 * mom2) + ((1.0 - beta2) * g2))
delta_t = (mom1_new / (tf.sqrt(m2_new) + epsilon))
w_new = ((hps.weight_decay * w) - (alpha_t * delta_t))
updates.append(mom2.assign(m2_new))
updates.append(w.assign(w_new))
(polyak_avg_op, polyak_swap_op, ema) = polyak(params, beta2)
train_op = tf.group(polyak_avg_op, *updates)
return (train_op, polyak_swap_op, ema)
|
def adam2_old(params, cost_or_grads, lr=0.0003, mom1=0.9, mom2=0.999, epsilon=1e-08):
updates = []
if (type(cost_or_grads) is not list):
gs = tf.gradients(cost_or_grads, params)
else:
gs = cost_or_grads
grads1 = [Z.allreduce_mean(g) for g in gs]
grads2 = [Z.allreduce_mean(tf.square(g)) for g in gs]
mom2 = tf.maximum(0.0, (1.0 - (hvd.size() * (1 - mom2))))
t = tf.Variable(1.0, 'adam_t')
lr_t = ((lr * tf.sqrt((1.0 - tf.pow(mom2, t)))) / (1.0 - tf.pow(mom1, t)))
updates.append(t.assign_add(1))
for (p, g1, g2) in zip(params, grads1, grads2):
mg = tf.Variable(tf.zeros(p.get_shape()), (p.name + '_adam_mg'))
if (mom1 > 0):
v = tf.Variable(tf.zeros(p.get_shape()), (p.name + '_adam_v'))
v_t = ((mom1 * v) + ((1.0 - mom1) * g1))
updates.append(v.assign(v_t))
else:
v_t = g1
mg_t = ((mom2 * mg) + ((1.0 - mom2) * g2))
delta_t = (v_t / (tf.sqrt(mg_t) + epsilon))
p_t = (p - (lr_t * delta_t))
updates.append(mg.assign(mg_t))
updates.append(p.assign(p_t))
return tf.group(*updates)
|
def adamax(params, cost_or_grads, alpha=0.0003, hps=None, epsilon=1e-08):
updates = []
if (type(cost_or_grads) is not list):
gs = tf.gradients(cost_or_grads, params)
else:
gs = cost_or_grads
beta2 = (1 - (1.0 / (hps.train_its * hps.polyak_epochs)))
grads = [Z.allreduce_mean(g) for g in gs]
t = tf.Variable(1.0, 'adam_t')
alpha_t = ((alpha * tf.sqrt((1.0 - tf.pow(beta2, t)))) / (1.0 - tf.pow(hps.beta1, t)))
updates.append(t.assign_add(1))
for (w, g) in zip(params, grads):
mom2 = tf.Variable(tf.zeros(w.get_shape()), (w.name + '_adam_m2'))
if (hps.beta1 > 0):
mom1 = tf.Variable(tf.zeros(w.get_shape()), (w.name + '_adam_m1'))
mom1_new = ((hps.beta1 * mom1) + ((1.0 - hps.beta1) * g))
updates.append(mom1.assign(mom1_new))
else:
mom1_new = g
m2_new = tf.maximum((beta2 * mom2), abs(g))
delta_t = (mom1_new / (m2_new + epsilon))
w_new = ((hps.weight_decay * w) - (alpha_t * delta_t))
updates.append(mom2.assign(m2_new))
updates.append(w.assign(w_new))
(polyak_avg_op, polyak_swap_op, ema) = polyak(params, beta2)
train_op = tf.group(polyak_avg_op, *updates)
return (train_op, polyak_swap_op, ema)
|
def _print(*args, **kwargs):
if (hvd.rank() == 0):
print(*args, **kwargs)
|
def init_visualizations(hps, model, logdir):
def sample_batch(y, eps):
n_batch = hps.local_batch_train
xs = []
for i in range(int(np.ceil((len(eps) / n_batch)))):
xs.append(model.sample(y[(i * n_batch):((i * n_batch) + n_batch)], eps[(i * n_batch):((i * n_batch) + n_batch)]))
return np.concatenate(xs)
def draw_samples(epoch):
if (hvd.rank() != 0):
return
rows = (10 if (hps.image_size <= 64) else 4)
cols = rows
n_batch = (rows * cols)
y = np.asarray([(_y % hps.n_y) for _y in (list(range(cols)) * rows)], dtype='int32')
temperatures = [0.0, 0.25, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
x_samples = []
x_samples.append(sample_batch(y, ([0.0] * n_batch)))
x_samples.append(sample_batch(y, ([0.25] * n_batch)))
x_samples.append(sample_batch(y, ([0.5] * n_batch)))
x_samples.append(sample_batch(y, ([0.6] * n_batch)))
x_samples.append(sample_batch(y, ([0.7] * n_batch)))
x_samples.append(sample_batch(y, ([0.8] * n_batch)))
x_samples.append(sample_batch(y, ([0.9] * n_batch)))
x_samples.append(sample_batch(y, ([1.0] * n_batch)))
for i in range(len(x_samples)):
x_sample = np.reshape(x_samples[i], (n_batch, hps.image_size, hps.image_size, 3))
graphics.save_raster(x_sample, (logdir + 'epoch_{}_sample_{}.png'.format(epoch, i)))
return draw_samples
|
def get_data(hps, sess):
if (hps.image_size == (- 1)):
hps.image_size = {'mnist': 32, 'cifar10': 32, 'imagenet-oord': 64, 'imagenet': 256, 'celeba': 256, 'lsun_realnvp': 64, 'lsun': 256}[hps.problem]
if (hps.n_test == (- 1)):
hps.n_test = {'mnist': 10000, 'cifar10': 10000, 'imagenet-oord': 50000, 'imagenet': 50000, 'celeba': 3000, 'lsun_realnvp': (300 * hvd.size()), 'lsun': (300 * hvd.size())}[hps.problem]
hps.n_y = {'mnist': 10, 'cifar10': 10, 'imagenet-oord': 1000, 'imagenet': 1000, 'celeba': 1, 'lsun_realnvp': 1, 'lsun': 1}[hps.problem]
if (hps.data_dir == ''):
hps.data_dir = {'mnist': None, 'cifar10': None, 'imagenet-oord': '/mnt/host/imagenet-oord-tfr', 'imagenet': '/mnt/host/imagenet-tfr', 'celeba': 'data/celeba-tfr', 'lsun_realnvp': '/mnt/host/lsun_realnvp', 'lsun': '/mnt/host/lsun'}[hps.problem]
if (hps.problem == 'lsun_realnvp'):
hps.rnd_crop = True
else:
hps.rnd_crop = False
if hps.category:
hps.data_dir += ('/%s' % hps.category)
s = hps.anchor_size
hps.local_batch_train = (((hps.n_batch_train * s) * s) // (hps.image_size * hps.image_size))
hps.local_batch_test = {64: 50, 32: 25, 16: 10, 8: 5, 4: 2, 2: 2, 1: 1}[hps.local_batch_train]
hps.local_batch_init = (((hps.n_batch_init * s) * s) // (hps.image_size * hps.image_size))
print('Rank {} Batch sizes Train {} Test {} Init {}'.format(hvd.rank(), hps.local_batch_train, hps.local_batch_test, hps.local_batch_init))
if (hps.problem in ['imagenet-oord', 'imagenet', 'celeba', 'lsun_realnvp', 'lsun']):
hps.direct_iterator = True
import data_loaders.get_data as v
(train_iterator, test_iterator, data_init) = v.get_data(sess, hps.data_dir, hvd.size(), hvd.rank(), hps.pmap, hps.fmap, hps.local_batch_train, hps.local_batch_test, hps.local_batch_init, hps.image_size, hps.rnd_crop)
elif (hps.problem in ['mnist', 'cifar10']):
hps.direct_iterator = False
import data_loaders.get_mnist_cifar as v
(train_iterator, test_iterator, data_init) = v.get_data(hps.problem, hvd.size(), hvd.rank(), hps.dal, hps.local_batch_train, hps.local_batch_test, hps.local_batch_init, hps.image_size)
else:
raise Exception()
return (train_iterator, test_iterator, data_init)
|
def process_results(results):
stats = ['loss', 'bits_x', 'bits_y', 'pred_loss']
assert (len(stats) == results.shape[0])
res_dict = {}
for i in range(len(stats)):
res_dict[stats[i]] = '{:.4f}'.format(results[i])
return res_dict
|
def main(hps):
hvd.init()
sess = tensorflow_session()
tf.set_random_seed((hvd.rank() + (hvd.size() * hps.seed)))
np.random.seed((hvd.rank() + (hvd.size() * hps.seed)))
(train_iterator, test_iterator, data_init) = get_data(hps, sess)
(hps.train_its, hps.test_its, hps.full_test_its) = get_its(hps)
logdir = (os.path.abspath(hps.logdir) + '/')
if (not os.path.exists(logdir)):
try:
os.mkdir(logdir)
except:
pass
import model
model = model.model(sess, hps, train_iterator, test_iterator, data_init)
visualise = init_visualizations(hps, model, logdir)
if (not hps.inference):
train(sess, model, hps, logdir, visualise)
else:
infer(sess, model, hps, test_iterator)
|
def infer(sess, model, hps, iterator):
if hps.direct_iterator:
iterator = iterator.get_next()
print('Running inference on {} data points'.format((hps.full_test_its * hps.n_batch_test)))
logpz = []
grad_logpz = []
for it in range(hps.full_test_its):
if hps.direct_iterator:
(x, y) = sess.run(iterator)
else:
(x, y) = iterator()
x = ((x / 256.0) - 0.5)
logpz.append(model.logprob(x, y))
grad_logpz.append(model.grad_logprob(x, y))
logpz = np.concatenate(logpz, axis=0)
np.save('logs/logpz.npy', logpz)
grad_logpz = np.concatenate(grad_logpz, axis=0)
np.save('logs/grad_logpz.npy', grad_logpz)
print('NLL = {}'.format(((- logpz.mean()) / (((32 * 32) * 3) * np.log(2)))))
|
def train(sess, model, hps, logdir, visualise):
_print(hps)
_print('Starting training. Logging to', logdir)
_print('epoch n_processed n_images ips dtrain dtest dsample dtot train_results test_results msg')
sess.graph.finalize()
n_processed = 0
n_images = 0
train_time = 0.0
test_loss_best = 999999
if (hvd.rank() == 0):
train_logger = ResultLogger((logdir + 'train.txt'), **hps.__dict__)
test_logger = ResultLogger((logdir + 'test.txt'), **hps.__dict__)
tcurr = time.time()
for epoch in range(1, hps.epochs):
t = time.time()
train_results = []
for it in range(hps.train_its):
lr = (hps.lr * min(1.0, (n_processed / (hps.n_train * hps.epochs_warmup))))
_t = time.time()
train_results += [model.train(lr)]
if (hps.verbose and (hvd.rank() == 0)):
_print(n_processed, (time.time() - _t), train_results[(- 1)])
sys.stdout.flush()
n_processed += (hvd.size() * hps.n_batch_train)
n_images += (hvd.size() * hps.local_batch_train)
train_results = np.mean(np.asarray(train_results), axis=0)
dtrain = (time.time() - t)
ips = (((hps.train_its * hvd.size()) * hps.local_batch_train) / dtrain)
train_time += dtrain
if (hvd.rank() == 0):
train_logger.log(epoch=epoch, n_processed=n_processed, n_images=n_images, train_time=int(train_time), **process_results(train_results))
if ((epoch < 10) or ((epoch < 50) and ((epoch % 10) == 0)) or ((epoch % hps.epochs_full_valid) == 0)):
test_results = []
msg = ''
t = time.time()
if ((epoch % hps.epochs_full_valid) == 0):
for it in range(hps.full_test_its):
test_results += [model.test()]
test_results = np.mean(np.asarray(test_results), axis=0)
if (hvd.rank() == 0):
test_logger.log(epoch=epoch, n_processed=n_processed, n_images=n_images, **process_results(test_results))
if (test_results[0] < test_loss_best):
test_loss_best = test_results[0]
model.save((logdir + 'model_best_loss.ckpt'))
msg += ' *'
dtest = (time.time() - t)
t = time.time()
if ((epoch == 1) or (epoch == 10) or ((epoch % hps.epochs_full_sample) == 0)):
visualise(epoch)
dsample = (time.time() - t)
if (hvd.rank() == 0):
dcurr = (time.time() - tcurr)
tcurr = time.time()
_print(epoch, n_processed, n_images, '{:.1f} {:.1f} {:.1f} {:.1f} {:.1f}'.format(ips, dtrain, dtest, dsample, dcurr), train_results, test_results, msg)
if (hvd.rank() == 0):
_print('Finished!')
|
def get_its(hps):
train_its = int(np.ceil((hps.n_train / (hps.n_batch_train * hvd.size()))))
test_its = int(np.ceil((hps.n_test / (hps.n_batch_train * hvd.size()))))
train_epoch = ((train_its * hps.n_batch_train) * hvd.size())
if (hvd.rank() == 0):
print(hps.n_test, hps.local_batch_test, hvd.size())
assert ((hps.n_test % (hps.local_batch_test * hvd.size())) == 0)
full_test_its = (hps.n_test // (hps.local_batch_test * hvd.size()))
if (hvd.rank() == 0):
print(('Train epoch size: ' + str(train_epoch)))
return (train_its, test_its, full_test_its)
|
def tensorflow_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
return sess
|
class ResultLogger(object):
def __init__(self, path, *args, **kwargs):
self.f_log = open(path, 'w')
self.f_log.write((json.dumps(kwargs) + '\n'))
def log(self, **kwargs):
self.f_log.write((json.dumps(kwargs) + '\n'))
self.f_log.flush()
def close(self):
self.f_log.close()
|
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if ((pbar.total is None) and total_size):
pbar.total = total_size
progress_bytes = (count * block_size)
pbar.update((progress_bytes - pbar.n))
return bar_update
|
def check_integrity(fpath, md5=None):
if (md5 is None):
return True
if (not os.path.isfile(fpath)):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter((lambda : f.read((1024 * 1024))), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if (md5c != md5):
return False
return True
|
def makedir_exist_ok(dirpath):
'\n Python2 support for os.makedirs(.., exist_ok=True)\n '
try:
os.makedirs(dirpath)
except OSError as e:
if (e.errno == errno.EEXIST):
pass
else:
raise
|
def download_url(url, root, filename=None, md5=None):
'Download a file from a url and place it in root.\n\n Args:\n url (str): URL to download file from\n root (str): Directory to place downloaded file in\n filename (str, optional): Name to save the file under. If None, use the basename of the URL\n md5 (str, optional): MD5 checksum of the download. If None, do not check\n '
from six.moves import urllib
root = os.path.expanduser(root)
if (not filename):
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if (os.path.isfile(fpath) and check_integrity(fpath, md5)):
print(('Using downloaded and verified file: ' + fpath))
else:
try:
print(((('Downloading ' + url) + ' to ') + fpath))
urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater())
except OSError:
if (url[:5] == 'https'):
url = url.replace('https:', 'http:')
print(((('Failed download. Trying https -> http instead. Downloading ' + url) + ' to ') + fpath))
urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater())
|
def list_dir(root, prefix=False):
'List all directories at a given root\n\n Args:\n root (str): Path to directory whose folders need to be listed\n prefix (bool, optional): If true, prepends the path to each result, otherwise\n only returns the name of the directories found\n '
root = os.path.expanduser(root)
directories = list(filter((lambda p: os.path.isdir(os.path.join(root, p))), os.listdir(root)))
if (prefix is True):
directories = [os.path.join(root, d) for d in directories]
return directories
|
def list_files(root, suffix, prefix=False):
'List all files ending with a suffix at a given root\n\n Args:\n root (str): Path to directory whose folders need to be listed\n suffix (str or tuple): Suffix of the files to match, e.g. \'.png\' or (\'.jpg\', \'.png\').\n It uses the Python "str.endswith" method and is passed directly\n prefix (bool, optional): If true, prepends the path to each result, otherwise\n only returns the name of the files found\n '
root = os.path.expanduser(root)
files = list(filter((lambda p: (os.path.isfile(os.path.join(root, p)) and p.endswith(suffix))), os.listdir(root)))
if (prefix is True):
files = [os.path.join(root, d) for d in files]
return files
|
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
'Download a Google Drive file from and place it in root.\n\n Args:\n file_id (str): id of file to be downloaded\n root (str): Directory to place downloaded file in\n filename (str, optional): Name to save the file under. If None, use the id of the file.\n md5 (str, optional): MD5 checksum of the download. If None, do not check\n '
import requests
url = 'https://docs.google.com/uc?export=download'
root = os.path.expanduser(root)
if (not filename):
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if (os.path.isfile(fpath) and check_integrity(fpath, md5)):
print(('Using downloaded and verified file: ' + fpath))
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
|
def _get_confirm_token(response):
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, 'wb') as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
progress += len(chunk)
pbar.update((progress - pbar.n))
pbar.close()
|
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(self, root):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
head = ('Dataset ' + self.__class__.__name__)
body = ['Number of datapoints: {}'.format(self.__len__())]
if (self.root is not None):
body.append('Root location: {}'.format(self.root))
body += self.extra_repr().splitlines()
if (hasattr(self, 'transform') and (self.transform is not None)):
body += self._format_transform_repr(self.transform, 'Transforms: ')
if (hasattr(self, 'target_transform') and (self.target_transform is not None)):
body += self._format_transform_repr(self.target_transform, 'Target transforms: ')
lines = ([head] + [((' ' * self._repr_indent) + line) for line in body])
return '\n'.join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return (['{}{}'.format(head, lines[0])] + ['{}{}'.format((' ' * len(head)), line) for line in lines[1:]])
def extra_repr(self):
return ''
|
def dsm(energy_net, samples, sigma=1):
samples.requires_grad_(True)
vector = (torch.randn_like(samples) * sigma)
perturbed_inputs = (samples + vector)
logp = (- energy_net(perturbed_inputs))
dlogp = ((sigma ** 2) * autograd.grad(logp.sum(), perturbed_inputs, create_graph=True)[0])
kernel = vector
loss = (torch.norm((dlogp + kernel), dim=(- 1)) ** 2)
loss = (loss.mean() / 2.0)
return loss
|
def dsm_score_estimation(scorenet, samples, sigma=0.01):
perturbed_samples = (samples + (torch.randn_like(samples) * sigma))
target = (((- 1) / (sigma ** 2)) * (perturbed_samples - samples))
scores = scorenet(perturbed_samples)
target = target.view(target.shape[0], (- 1))
scores = scores.view(scores.shape[0], (- 1))
loss = ((1 / 2.0) * ((scores - target) ** 2).sum(dim=(- 1)).mean(dim=0))
return loss
|
def anneal_dsm_score_estimation(scorenet, samples, labels, sigmas, anneal_power=2.0):
used_sigmas = sigmas[labels].view(samples.shape[0], *([1] * len(samples.shape[1:])))
perturbed_samples = (samples + (torch.randn_like(samples) * used_sigmas))
target = (((- 1) / (used_sigmas ** 2)) * (perturbed_samples - samples))
scores = scorenet(perturbed_samples, labels)
target = target.view(target.shape[0], (- 1))
scores = scores.view(scores.shape[0], (- 1))
loss = (((1 / 2.0) * ((scores - target) ** 2).sum(dim=(- 1))) * (used_sigmas.squeeze() ** anneal_power))
return loss.mean(dim=0)
|
def single_sliced_score_matching(energy_net, samples, noise=None, detach=False, noise_type='radermacher'):
samples.requires_grad_(True)
if (noise is None):
vectors = torch.randn_like(samples)
if (noise_type == 'radermacher'):
vectors = vectors.sign()
elif (noise_type == 'sphere'):
vectors = ((vectors / torch.norm(vectors, dim=(- 1), keepdim=True)) * np.sqrt(vectors.shape[(- 1)]))
elif (noise_type == 'gaussian'):
pass
else:
raise ValueError('Noise type not implemented')
else:
vectors = noise
logp = (- energy_net(samples).sum())
grad1 = autograd.grad(logp, samples, create_graph=True)[0]
gradv = torch.sum((grad1 * vectors))
loss1 = ((torch.sum((grad1 * vectors), dim=(- 1)) ** 2) * 0.5)
if detach:
loss1 = loss1.detach()
grad2 = autograd.grad(gradv, samples, create_graph=True)[0]
loss2 = torch.sum((vectors * grad2), dim=(- 1))
if detach:
loss2 = loss2.detach()
loss = (loss1 + loss2).mean()
return (loss, grad1, grad2)
|
def partial_sliced_score_matching(energy_net, samples, noise=None, detach=False, noise_type='radermacher'):
samples.requires_grad_(True)
if (noise is None):
vectors = torch.randn_like(samples)
if (noise_type == 'radermacher'):
vectors = vectors.sign()
elif (noise_type == 'gaussian'):
pass
else:
raise ValueError('Noise type not implemented')
else:
vectors = noise
logp = (- energy_net(samples).sum())
grad1 = autograd.grad(logp, samples, create_graph=True)[0]
gradv = torch.sum((grad1 * vectors))
loss1 = ((torch.norm(grad1, dim=(- 1)) ** 2) * 0.5)
if detach:
loss1 = loss1.detach()
grad2 = autograd.grad(gradv, samples, create_graph=True)[0]
loss2 = torch.sum((vectors * grad2), dim=(- 1))
if detach:
loss2 = loss2.detach()
loss = (loss1 + loss2).mean()
return (loss, grad1, grad2)
|
def sliced_score_matching(energy_net, samples, n_particles=1):
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view((- 1), *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
vectors = (vectors / torch.norm(vectors, dim=(- 1), keepdim=True))
logp = (- energy_net(dup_samples).sum())
grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
gradv = torch.sum((grad1 * vectors))
loss1 = ((torch.sum((grad1 * vectors), dim=(- 1)) ** 2) * 0.5)
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
loss2 = torch.sum((vectors * grad2), dim=(- 1))
loss1 = loss1.view(n_particles, (- 1)).mean(dim=0)
loss2 = loss2.view(n_particles, (- 1)).mean(dim=0)
loss = (loss1 + loss2)
return (loss.mean(), loss1.mean(), loss2.mean())
|
def sliced_score_matching_vr(energy_net, samples, n_particles=1):
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view((- 1), *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
logp = (- energy_net(dup_samples).sum())
grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
loss1 = (torch.sum((grad1 * grad1), dim=(- 1)) / 2.0)
gradv = torch.sum((grad1 * vectors))
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
loss2 = torch.sum((vectors * grad2), dim=(- 1))
loss1 = loss1.view(n_particles, (- 1)).mean(dim=0)
loss2 = loss2.view(n_particles, (- 1)).mean(dim=0)
loss = (loss1 + loss2)
return (loss.mean(), loss1.mean(), loss2.mean())
|
def sliced_score_estimation(score_net, samples, n_particles=1):
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view((- 1), *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
vectors = (vectors / torch.norm(vectors, dim=(- 1), keepdim=True))
grad1 = score_net(dup_samples)
gradv = torch.sum((grad1 * vectors))
loss1 = ((torch.sum((grad1 * vectors), dim=(- 1)) ** 2) * 0.5)
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
loss2 = torch.sum((vectors * grad2), dim=(- 1))
loss1 = loss1.view(n_particles, (- 1)).mean(dim=0)
loss2 = loss2.view(n_particles, (- 1)).mean(dim=0)
loss = (loss1 + loss2)
return (loss.mean(), loss1.mean(), loss2.mean())
|
def sliced_score_estimation_vr(score_net, samples, n_particles=1):
'\n Be careful if the shape of samples is not B x x_dim!!!!\n '
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view((- 1), *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
grad1 = score_net(dup_samples)
gradv = torch.sum((grad1 * vectors))
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
grad1 = grad1.view(dup_samples.shape[0], (- 1))
loss1 = (torch.sum((grad1 * grad1), dim=(- 1)) / 2.0)
loss2 = torch.sum((vectors * grad2).view(dup_samples.shape[0], (- 1)), dim=(- 1))
loss1 = loss1.view(n_particles, (- 1)).mean(dim=0)
loss2 = loss2.view(n_particles, (- 1)).mean(dim=0)
loss = (loss1 + loss2)
return (loss.mean(), loss1.mean(), loss2.mean())
|
def anneal_sliced_score_estimation_vr(scorenet, samples, labels, sigmas, n_particles=1):
used_sigmas = sigmas[labels].view(samples.shape[0], *([1] * len(samples.shape[1:])))
perturbed_samples = (samples + (torch.randn_like(samples) * used_sigmas))
dup_samples = perturbed_samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view((- 1), *samples.shape[1:])
dup_labels = labels.unsqueeze(0).expand(n_particles, *labels.shape).contiguous().view((- 1))
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
grad1 = scorenet(dup_samples, dup_labels)
gradv = torch.sum((grad1 * vectors))
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
grad1 = grad1.view(dup_samples.shape[0], (- 1))
loss1 = (torch.sum((grad1 * grad1), dim=(- 1)) / 2.0)
loss2 = torch.sum((vectors * grad2).view(dup_samples.shape[0], (- 1)), dim=(- 1))
loss1 = loss1.view(n_particles, (- 1)).mean(dim=0)
loss2 = loss2.view(n_particles, (- 1)).mean(dim=0)
loss = ((loss1 + loss2) * (used_sigmas.squeeze() ** 2))
return loss.mean(dim=0)
|
def parse_args_and_config():
parser = argparse.ArgumentParser(description=globals()['__doc__'])
parser.add_argument('--runner', type=str, default='AnnealRunner', help='The runner to execute')
parser.add_argument('--config', type=str, default='anneal.yml', help='Path to the config file')
parser.add_argument('--seed', type=int, default=1234, help='Random seed')
parser.add_argument('--run', type=str, default='run', help='Path for saving running related data.')
parser.add_argument('--doc', type=str, default='0', help='A string for documentation purpose')
parser.add_argument('--comment', type=str, default='', help='A string for experiment comment')
parser.add_argument('--verbose', type=str, default='info', help='Verbose level: info | debug | warning | critical')
parser.add_argument('--test', action='store_true', help='Whether to test the model')
parser.add_argument('--resume_training', action='store_true', help='Whether to resume training')
parser.add_argument('-o', '--image_folder', type=str, default='images', help='The directory of image outputs')
args = parser.parse_args()
run_id = str(os.getpid())
run_time = time.strftime('%Y-%b-%d-%H-%M-%S')
args.log = os.path.join(args.run, 'logs', args.doc)
if (not args.test):
with open(os.path.join('ncsn/configs', args.config), 'r') as f:
config = yaml.load(f)
new_config = dict2namespace(config)
else:
with open(os.path.join(args.log, 'config.yml'), 'r') as f:
config = yaml.load(f)
new_config = config
if (not args.test):
if (not args.resume_training):
if os.path.exists(args.log):
shutil.rmtree(args.log)
os.makedirs(args.log)
with open(os.path.join(args.log, 'config.yml'), 'w') as f:
yaml.dump(new_config, f, default_flow_style=False)
level = getattr(logging, args.verbose.upper(), None)
if (not isinstance(level, int)):
raise ValueError('level {} not supported'.format(args.verbose))
handler1 = logging.StreamHandler()
handler2 = logging.FileHandler(os.path.join(args.log, 'stdout.txt'))
formatter = logging.Formatter('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler1)
logger.addHandler(handler2)
logger.setLevel(level)
else:
level = getattr(logging, args.verbose.upper(), None)
if (not isinstance(level, int)):
raise ValueError('level {} not supported'.format(args.verbose))
handler1 = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')
handler1.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler1)
logger.setLevel(level)
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
logging.info('Using device: {}'.format(device))
new_config.device = device
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
return (args, new_config)
|
def dict2namespace(config):
namespace = argparse.Namespace()
for (key, value) in config.items():
if isinstance(value, dict):
new_value = dict2namespace(value)
else:
new_value = value
setattr(namespace, key, new_value)
return namespace
|
def main():
(args, config) = parse_args_and_config()
logging.info('Writing log file to {}'.format(args.log))
logging.info('Exp instance id = {}'.format(os.getpid()))
logging.info('Exp comment = {}'.format(args.comment))
logging.info('Config =')
print(('>' * 80))
print(config)
print(('<' * 80))
try:
runner = eval(args.runner)(args, config)
if (not args.test):
runner.train()
else:
runner.test()
except:
logging.error(traceback.format_exc())
return 0
|
class InceptionV3(nn.Module):
'Pretrained InceptionV3 network returning feature maps'
DEFAULT_BLOCK_INDEX = 3
BLOCK_INDEX_BY_DIM = {64: 0, 192: 1, 768: 2, 2048: 3}
def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False):
'Build pretrained InceptionV3\n\n Parameters\n ----------\n output_blocks : list of int\n Indices of blocks to return features of. Possible values are:\n - 0: corresponds to output of first max pooling\n - 1: corresponds to output of second max pooling\n - 2: corresponds to output which is fed to aux classifier\n - 3: corresponds to output of final average pooling\n resize_input : bool\n If true, bilinearly resizes input to width and height 299 before\n feeding input to model. As the network without fully connected\n layers is fully convolutional, it should be able to handle inputs\n of arbitrary size, so resizing might not be strictly needed\n normalize_input : bool\n If true, scales the input from range (0, 1) to the range the\n pretrained Inception network expects, namely (-1, 1)\n requires_grad : bool\n If true, parameters of the model require gradient. Possibly useful\n for finetuning the network\n '
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert (self.last_needed_block <= 3), 'Last possible output block index is 3'
self.blocks = nn.ModuleList()
inception = models.inception_v3(pretrained=True)
block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
self.blocks.append(nn.Sequential(*block0))
if (self.last_needed_block >= 1):
block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
self.blocks.append(nn.Sequential(*block1))
if (self.last_needed_block >= 2):
block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]
self.blocks.append(nn.Sequential(*block2))
if (self.last_needed_block >= 3):
block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
'Get Inception feature maps\n\n Parameters\n ----------\n inp : torch.autograd.Variable\n Input tensor of shape Bx3xHxW. Values are expected to be in\n range (0, 1)\n\n Returns\n -------\n List of torch.autograd.Variable, corresponding to the selected output\n block, sorted ascending by index\n '
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)
if self.normalize_input:
x = ((2 * x) - 1)
for (idx, block) in enumerate(self.blocks):
x = block(x)
if (idx in self.output_blocks):
outp.append(x)
if (idx == self.last_needed_block):
break
return outp
|
def get_norm_layer(norm_type='instance'):
'Return a normalization layer\n\n Parameters:\n norm_type (str) -- the name of the normalization layer: batch | instance | none\n\n For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n '
if (norm_type == 'batch'):
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif (norm_type == 'instance'):
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif (norm_type == 'none'):
norm_layer = None
else:
raise NotImplementedError(('normalization layer [%s] is not found' % norm_type))
return norm_layer
|
def get_scheduler(optimizer, opt):
"Return a learning rate scheduler\n\n Parameters:\n optimizer -- the optimizer of the network\n opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.\u3000\n opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine\n\n For 'linear', we keep the same learning rate for the first <opt.niter> epochs\n and linearly decay the rate to zero over the next <opt.niter_decay> epochs.\n For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.\n See https://pytorch.org/docs/stable/optim.html for more details.\n "
if (opt.lr_policy == 'linear'):
def lambda_rule(epoch):
lr_l = (1.0 - (max(0, ((epoch + opt.epoch_count) - opt.niter)) / float((opt.niter_decay + 1))))
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif (opt.lr_policy == 'step'):
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif (opt.lr_policy == 'plateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif (opt.lr_policy == 'cosine'):
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
|
def init_weights(net, init_type='normal', init_gain=0.02):
"Initialize network weights.\n\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n "
def init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
init.normal_(m.weight.data, 0.0, init_gain)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=init_gain)
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print(('initialize network with %s' % init_type))
net.apply(init_func)
|
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
'Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights\n Parameters:\n net (network) -- the network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Return an initialized network.\n '
if (len(gpu_ids) > 0):
assert torch.cuda.is_available()
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, init_gain=init_gain)
return net
|
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"Create a generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128\n norm (str) -- the name of normalization layers used in the network: batch | instance | none\n use_dropout (bool) -- if use dropout layers.\n init_type (str) -- the name of our initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a generator\n\n Our current implementation provides two types of generators:\n U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)\n The original U-Net paper: https://arxiv.org/abs/1505.04597\n\n Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)\n Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.\n We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).\n\n\n The generator has been initialized by <init_net>. It uses RELU for non-linearity.\n "
net = None
norm_layer = get_norm_layer(norm_type=norm)
if (netG == 'resnet_9blocks'):
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif (netG == 'resnet_6blocks'):
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif (netG == 'unet_128'):
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif (netG == 'unet_256'):
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError(('Generator model name [%s] is not recognized' % netG))
return init_net(net, init_type, init_gain, gpu_ids)
|
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"Create a discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the first conv layer\n netD (str) -- the architecture's name: basic | n_layers | pixel\n n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'\n norm (str) -- the type of normalization layers used in the network.\n init_type (str) -- the name of the initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a discriminator\n\n Our current implementation provides three types of discriminators:\n [basic]: 'PatchGAN' classifier described in the original pix2pix paper.\n It can classify whether 70×70 overlapping patches are real or fake.\n Such a patch-level discriminator architecture has fewer parameters\n than a full-image discriminator and can work on arbitrarily-sized images\n in a fully convolutional fashion.\n\n [n_layers]: With this mode, you cna specify the number of conv layers in the discriminator\n with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)\n\n [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.\n It encourages greater color diversity but has no effect on spatial statistics.\n\n The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.\n "
net = None
norm_layer = get_norm_layer(norm_type=norm)
if (netD == 'basic'):
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif (netD == 'n_layers'):
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif (netD == 'pixel'):
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % net))
return init_net(net, init_type, init_gain, gpu_ids)
|
class GANLoss(nn.Module):
'Define different GAN objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n '
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
' Initialize the GANLoss class.\n\n Parameters:\n gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n '
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if (gan_mode == 'lsgan'):
self.loss = nn.MSELoss()
elif (gan_mode == 'vanilla'):
self.loss = nn.BCEWithLogitsLoss()
elif (gan_mode in ['wgangp']):
self.loss = None
else:
raise NotImplementedError(('gan mode %s not implemented' % gan_mode))
def get_target_tensor(self, prediction, target_is_real):
'Create label tensors with the same size as the input.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n '
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"Calculate loss given Discriminator's output and grount truth labels.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction output from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n the calculated loss.\n "
if (self.gan_mode in ['lsgan', 'vanilla']):
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif (self.gan_mode == 'wgangp'):
if target_is_real:
loss = (- prediction.mean())
else:
loss = prediction.mean()
return loss
|
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028\n\n Arguments:\n netD (network) -- discriminator network\n real_data (tensor array) -- real images\n fake_data (tensor array) -- generated images from the generator\n device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n type (str) -- if we mix real and fake data or not [real | fake | mixed].\n constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2\n lambda_gp (float) -- weight for this loss\n\n Returns the gradient penalty loss\n "
if (lambda_gp > 0.0):
if (type == 'real'):
interpolatesv = real_data
elif (type == 'fake'):
interpolatesv = fake_data
elif (type == 'mixed'):
alpha = torch.rand(real_data.shape[0], 1)
alpha = alpha.expand(real_data.shape[0], (real_data.nelement() // real_data.shape[0])).contiguous().view(*real_data.shape)
alpha = alpha.to(device)
interpolatesv = ((alpha * real_data) + ((1 - alpha) * fake_data))
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, grad_outputs=torch.ones(disc_interpolates.size()).to(device), create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), (- 1))
gradient_penalty = ((((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp)
return (gradient_penalty, gradients)
else:
return (0.0, None)
|
class ResnetGenerator(nn.Module):
"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n "
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
'Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n '
assert (n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ELU()]
n_downsampling = 2
for i in range(n_downsampling):
mult = (2 ** i)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ELU()]
mult = (2 ** n_downsampling)
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = (2 ** (n_downsampling - i))
model += [nn.ConvTranspose2d((ngf * mult), int(((ngf * mult) / 2)), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(int(((ngf * mult) / 2))), nn.ELU()]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
self.model = nn.Sequential(*model)
def forward(self, input):
'Standard forward'
return self.model(input)
|
class ResnetBlock(nn.Module):
'Define a Resnet block'
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
'Initialize the Resnet block\n\n A resnet block is a conv block with skip connections\n We construct a conv block with build_conv_block function,\n and implement skip connections in <forward> function.\n Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf\n '
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
'Construct a convolutional block.\n\n Parameters:\n dim (int) -- the number of channels in the conv layer.\n padding_type (str) -- the name of padding layer: reflect | replicate | zero\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers.\n use_bias (bool) -- if the conv layer uses bias or not\n\n Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))\n '
conv_block = []
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ELU()]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
'Forward function (with skip connections)'
out = (x + self.conv_block(x))
return out
|
class UnetGenerator(nn.Module):
'Create a Unet-based generator'
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
'Construct a Unet generator\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,\n image of size 128x128 will become of size 1x1 # at the bottleneck\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n\n We construct the U-Net from the innermost layer to the outermost layer.\n It is a recursive process.\n '
super(UnetGenerator, self).__init__()
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range((num_downs - 5)):
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock((ngf * 4), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock((ngf * 2), (ngf * 4), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, (ngf * 2), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
def forward(self, input):
'Standard forward'
return self.model(input)
|
class UnetSkipConnectionBlockWithResNet(nn.Module):
'Defines the Unet submodule with skip connection.\n X -------------------identity----------------------\n |-- downsampling -- |submodule| -- upsampling --|\n '
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
'Construct a Unet submodule with skip connections.\n\n Parameters:\n outer_nc (int) -- the number of filters in the outer conv layer\n inner_nc (int) -- the number of filters in the inner conv layer\n input_nc (int) -- the number of channels in input images/features\n submodule (UnetSkipConnectionBlock) -- previously defined submodules\n outermost (bool) -- if this module is the outermost module\n innermost (bool) -- if this module is the innermost module\n norm_layer -- normalization layer\n user_dropout (bool) -- if use dropout layers.\n '
super().__init__()
self.outermost = outermost
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
if (input_nc is None):
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downres = ResnetBlock(inner_nc, padding_type='reflect', norm_layer=nn.InstanceNorm2d, use_dropout=False, use_bias=True)
upres = ResnetBlock(outer_nc, 'reflect', nn.InstanceNorm2d, False, True)
downrelu = nn.ELU()
downnorm = norm_layer(inner_nc)
uprelu = nn.ELU()
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv, downrelu, downres, downrelu]
up = [uprelu, upconv, uprelu, upres]
model = ((down + [submodule]) + up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv, downrelu, downres]
up = [uprelu, upconv, upnorm, uprelu, upres]
model = (down + up)
else:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm, downrelu, downres]
up = [uprelu, upconv, upnorm, uprelu, upres]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
|
class UnetSkipConnectionBlock(nn.Module):
'Defines the Unet submodule with skip connection.\n X -------------------identity----------------------\n |-- downsampling -- |submodule| -- upsampling --|\n '
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
'Construct a Unet submodule with skip connections.\n\n Parameters:\n outer_nc (int) -- the number of filters in the outer conv layer\n inner_nc (int) -- the number of filters in the inner conv layer\n input_nc (int) -- the number of channels in input images/features\n submodule (UnetSkipConnectionBlock) -- previously defined submodules\n outermost (bool) -- if this module is the outermost module\n innermost (bool) -- if this module is the innermost module\n norm_layer -- normalization layer\n user_dropout (bool) -- if use dropout layers.\n '
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
if (input_nc is None):
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.ELU()
downnorm = norm_layer(inner_nc)
uprelu = nn.ELU()
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv]
model = ((down + [submodule]) + up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = (down + up)
else:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
|
class NLayerDiscriminator(nn.Module):
'Defines a PatchGAN discriminator'
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
'Construct a PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n '
super(NLayerDiscriminator, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func != nn.BatchNorm2d)
else:
use_bias = (norm_layer != nn.BatchNorm2d)
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.ELU()]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.ELU()]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.ELU()]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
def forward(self, input):
'Standard forward.'
return self.model(input)
|
class PixelDiscriminator(nn.Module):
'Defines a 1x1 PatchGAN discriminator (pixelGAN)'
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
'Construct a 1x1 PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n '
super(PixelDiscriminator, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func != nn.InstanceNorm2d)
else:
use_bias = (norm_layer != nn.InstanceNorm2d)
self.net = [nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), nn.ELU(), nn.Conv2d(ndf, (ndf * 2), kernel_size=1, stride=1, padding=0, bias=use_bias), norm_layer((ndf * 2)), nn.ELU(), nn.Conv2d((ndf * 2), 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
'Standard forward.'
return self.net(input)
|
class ConvResBlock(nn.Module):
def __init__(self, in_channel, out_channel, resize=False, act='relu'):
super().__init__()
self.resize = resize
def get_act():
if (act == 'relu'):
return nn.ReLU(inplace=True)
elif (act == 'softplus'):
return nn.Softplus()
elif (act == 'elu'):
return nn.ELU()
elif (act == 'leakyrelu'):
return nn.LeakyReLU(0.2, inplace=True)
if (not resize):
self.main = nn.Sequential(nn.Conv2d(in_channel, out_channel, 3, stride=1, padding=1), nn.GroupNorm(8, out_channel), get_act(), nn.Conv2d(out_channel, out_channel, 3, stride=1, padding=1), nn.GroupNorm(8, out_channel))
else:
self.main = nn.Sequential(nn.Conv2d(in_channel, out_channel, 3, stride=2, padding=1), nn.GroupNorm(8, out_channel), get_act(), nn.Conv2d(out_channel, out_channel, 3, stride=1, padding=1), nn.GroupNorm(8, out_channel))
self.residual = nn.Conv2d(in_channel, out_channel, 3, stride=2, padding=1)
self.final_act = get_act()
def forward(self, inputs):
if (not self.resize):
h = self.main(inputs)
h += inputs
else:
h = self.main(inputs)
res = self.residual(inputs)
h += res
return self.final_act(h)
|
class DeconvResBlock(nn.Module):
def __init__(self, in_channel, out_channel, resize=False, act='relu'):
super().__init__()
self.resize = resize
def get_act():
if (act == 'relu'):
return nn.ReLU(inplace=True)
elif (act == 'softplus'):
return nn.Softplus()
elif (act == 'elu'):
return nn.ELU()
elif (act == 'leakyrelu'):
return nn.LeakyReLU(0.2, True)
if (not resize):
self.main = nn.Sequential(nn.ConvTranspose2d(in_channel, out_channel, 3, stride=1, padding=1), nn.GroupNorm(8, out_channel), get_act(), nn.ConvTranspose2d(out_channel, out_channel, 3, stride=1, padding=1), nn.GroupNorm(8, out_channel))
else:
self.main = nn.Sequential(nn.ConvTranspose2d(in_channel, out_channel, 3, stride=1, padding=1), nn.GroupNorm(8, out_channel), get_act(), nn.ConvTranspose2d(out_channel, out_channel, 3, stride=2, padding=1, output_padding=1), nn.GroupNorm(8, out_channel))
self.residual = nn.ConvTranspose2d(in_channel, out_channel, 3, stride=2, padding=1, output_padding=1)
self.final_act = get_act()
def forward(self, inputs):
if (not self.resize):
h = self.main(inputs)
h += inputs
else:
h = self.main(inputs)
res = self.residual(inputs)
h += res
return self.final_act(h)
|
class ResScore(nn.Module):
def __init__(self, config):
super().__init__()
self.nef = config.model.nef
self.ndf = config.model.ndf
act = 'elu'
self.convs = nn.Sequential(nn.Conv2d(3, self.nef, 3, 1, 1), ConvResBlock(self.nef, self.nef, act=act), ConvResBlock(self.nef, (2 * self.nef), resize=True, act=act), ConvResBlock((2 * self.nef), (2 * self.nef), act=act), ConvResBlock((2 * self.nef), (4 * self.nef), resize=True, act=act), ConvResBlock((4 * self.nef), (4 * self.nef), act=act))
self.deconvs = nn.Sequential(DeconvResBlock((4 * self.ndf), (4 * self.ndf), act=act), DeconvResBlock((4 * self.ndf), (2 * self.ndf), resize=True, act=act), DeconvResBlock((2 * self.ndf), (2 * self.ndf), act=act), DeconvResBlock((2 * self.ndf), self.ndf, resize=True, act=act), DeconvResBlock(self.ndf, self.ndf, act=act), nn.Conv2d(self.ndf, 3, 3, 1, 1))
def forward(self, x):
x = ((2 * x) - 1.0)
res = self.deconvs(self.convs(x))
return res
|
class ResNetScore(nn.Module):
"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n "
def __init__(self, config):
'Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n '
super().__init__()
input_nc = output_nc = config.data.channels
ngf = (config.model.ngf * 2)
n_blocks = 6
norm_layer = get_norm_layer('instance')
use_dropout = False
padding_type = 'reflect'
assert (n_blocks >= 0)
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ELU()]
n_downsampling = 1
for i in range(n_downsampling):
mult = (2 ** i)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ELU()]
mult = (2 ** n_downsampling)
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = (2 ** (n_downsampling - i))
model += [nn.ConvTranspose2d((ngf * mult), int(((ngf * mult) / 2)), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(int(((ngf * mult) / 2))), nn.ELU()]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
self.model = nn.Sequential(*model)
def forward(self, input):
'Standard forward'
input = ((2 * input) - 1.0)
return self.model(input)
|
class UNetResScore(nn.Module):
def __init__(self, config):
'Construct a Unet generator\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,\n image of size 128x128 will become of size 1x1 # at the bottleneck\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n\n We construct the U-Net from the innermost layer to the outermost layer.\n It is a recursive process.\n '
super().__init__()
input_nc = output_nc = config.data.channels
ngf = config.model.ngf
self.config = config
norm_layer = get_norm_layer('instance')
unet_block = UnetSkipConnectionBlockWithResNet((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
unet_block = UnetSkipConnectionBlockWithResNet((ngf * 4), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlockWithResNet((ngf * 2), (ngf * 4), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlockWithResNet(output_nc, (ngf * 2), input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
def forward(self, input):
'Standard forward'
if (not self.config.data.logit_transform):
input = ((2 * input) - 1.0)
return self.model(input)
|
class UNetScore(nn.Module):
def __init__(self, config):
'Construct a Unet generator\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,\n image of size 128x128 will become of size 1x1 # at the bottleneck\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n\n We construct the U-Net from the innermost layer to the outermost layer.\n It is a recursive process.\n '
super().__init__()
input_nc = output_nc = config.data.channels
ngf = config.model.ngf
self.config = config
norm_layer = get_norm_layer('instance')
if (config.data.image_size == 32):
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
elif (config.data.image_size == 16):
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
unet_block = UnetSkipConnectionBlock((ngf * 4), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock((ngf * 2), (ngf * 4), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, (ngf * 2), input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
def forward(self, input):
'Standard forward'
if (not self.config.data.logit_transform):
input = ((2 * input) - 1.0)
return self.model(input)
|
class ResEnergy(nn.Module):
def __init__(self, config):
super().__init__()
self.nef = config.model.nef
self.ndf = config.model.ndf
act = 'softplus'
self.convs = nn.Sequential(nn.Conv2d(1, self.nef, 3, 1, 1), ConvResBlock(self.nef, self.nef, act=act), ConvResBlock(self.nef, (2 * self.nef), resize=True, act=act), ConvResBlock((2 * self.nef), (2 * self.nef), act=act), ConvResBlock((2 * self.nef), (4 * self.nef), resize=True, act=act), ConvResBlock((4 * self.nef), (4 * self.nef), act=act))
def forward(self, x):
x = ((2 * x) - 1.0)
res = self.convs(x)
res = res.view(res.shape[0], (- 1)).mean(dim=(- 1))
return res
|
class MLPScore(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.main = nn.Sequential(nn.Linear((10 * 10), 1024), nn.LayerNorm(1024), nn.ELU(), nn.Linear(1024, 1024), nn.LayerNorm(1024), nn.ELU(), nn.Linear(1024, 512), nn.LayerNorm(512), nn.ELU(), nn.Linear(512, 100), nn.LayerNorm(100))
def forward(self, x):
x = x.view(x.shape[0], (- 1))
if (x.is_cuda and (self.config.training.ngpu > 1)):
score = nn.parallel.data_parallel(self.main, x, list(range(self.config.training.ngpu)))
else:
score = self.main(x)
return score.view(x.shape[0], 1, 10, 10)
|
class LargeScore(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
nef = config.model.nef
self.u_net = nn.Sequential(nn.Conv2d(config.data.channels, nef, 16, stride=2, padding=2), nn.GroupNorm(4, nef), nn.ELU(), nn.Conv2d(nef, (nef * 2), 4, stride=2, padding=1), nn.GroupNorm(4, (nef * 2)), nn.ELU(), nn.Conv2d((nef * 2), (nef * 4), 5, stride=1, padding=0), nn.GroupNorm(4, (nef * 4)), nn.ELU(), nn.ConvTranspose2d((nef * 4), (nef * 2), 5, stride=1, padding=0), nn.GroupNorm(4, (nef * 2)), nn.ELU(), nn.ConvTranspose2d((nef * 2), nef, 4, stride=2, padding=1), nn.GroupNorm(4, nef), nn.ELU(), nn.ConvTranspose2d(nef, config.data.channels, 4, stride=2, padding=1), nn.ELU())
self.fc = nn.Sequential(nn.Linear(((config.data.channels * 28) * 28), 1024), nn.LayerNorm(1024), nn.ELU(), nn.Linear(1024, ((config.data.channels * 28) * 28)))
def forward(self, x):
if (x.is_cuda and (self.config.training.ngpu > 1)):
score = nn.parallel.data_parallel(self.u_net, x, list(range(self.config.training.ngpu)))
else:
score = self.u_net(x)
score = self.fc(score.view(x.shape[0], (- 1))).view(x.shape[0], self.config.data.channels, 28, 28)
return score
|
class Score(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
nef = config.model.nef
self.u_net = nn.Sequential(nn.Conv2d(config.data.channels, nef, 4, stride=2, padding=1), nn.GroupNorm(4, nef), nn.ELU(), nn.Conv2d(nef, (nef * 2), 4, stride=2, padding=1), nn.GroupNorm(4, (nef * 2)), nn.ELU(), nn.Conv2d((nef * 2), (nef * 4), 5, stride=1, padding=0), nn.GroupNorm(4, (nef * 4)), nn.ELU(), nn.ConvTranspose2d((nef * 4), (nef * 2), 5, stride=1, padding=0), nn.GroupNorm(4, (nef * 2)), nn.ELU(), nn.ConvTranspose2d((nef * 2), nef, 4, stride=2, padding=1), nn.GroupNorm(4, nef), nn.ELU(), nn.ConvTranspose2d(nef, config.data.channels, 4, stride=2, padding=1), nn.ELU())
self.fc = nn.Sequential(nn.Linear(((config.data.channels * 28) * 28), 1024), nn.LayerNorm(1024), nn.ELU(), nn.Linear(1024, ((config.data.channels * 28) * 28)))
def forward(self, x):
if (x.is_cuda and (self.config.training.ngpu > 1)):
score = nn.parallel.data_parallel(self.u_net, x, list(range(self.config.training.ngpu)))
else:
score = self.u_net(x)
score = self.fc(score.view(x.shape[0], (- 1))).view(x.shape[0], self.config.data.channels, 28, 28)
return score
|
class SmallScore(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
nef = (config.model.nef * 4)
self.u_net = nn.Sequential(nn.Conv2d(config.data.channels, nef, 4, stride=2, padding=1), nn.GroupNorm(4, nef), nn.ELU(), nn.Conv2d(nef, (nef * 2), 3, stride=1, padding=1), nn.GroupNorm(4, (nef * 2)), nn.ELU(), nn.ConvTranspose2d((nef * 2), nef, 3, stride=1, padding=1), nn.GroupNorm(4, nef), nn.ELU(), nn.ConvTranspose2d(nef, config.data.channels, 4, stride=2, padding=1), nn.ELU())
self.fc = nn.Sequential(nn.Linear((config.data.channels * (10 ** 2)), 256), nn.LayerNorm(256), nn.ELU(), nn.Linear(256, (config.data.channels * (10 ** 2))))
def forward(self, x):
if (x.is_cuda and (self.config.training.ngpu > 1)):
score = nn.parallel.data_parallel(self.u_net, x, list(range(self.config.training.ngpu)))
else:
score = self.u_net(x)
score = self.fc(score.view(x.shape[0], (- 1))).view(x.shape[0], self.config.data.channels, 10, 10)
return score
|
class BaselineRunner():
def __init__(self, args, config):
self.args = args
self.config = config
def get_optimizer(self, parameters):
if (self.config.optim.optimizer == 'Adam'):
return optim.Adam(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay, betas=(self.config.optim.beta1, 0.999), amsgrad=self.config.optim.amsgrad)
elif (self.config.optim.optimizer == 'RMSProp'):
return optim.RMSprop(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay)
elif (self.config.optim.optimizer == 'SGD'):
return optim.SGD(parameters, lr=self.config.optim.lr, momentum=0.9)
else:
raise NotImplementedError('Optimizer {} not understood.'.format(self.config.optim.optimizer))
def logit_transform(self, image, lam=1e-06):
image = (lam + ((1 - (2 * lam)) * image))
return (torch.log(image) - torch.log1p((- image)))
def train(self):
if (self.config.data.random_flip is False):
tran_transform = test_transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()])
else:
tran_transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()])
test_transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()])
if (self.config.data.dataset == 'CIFAR10'):
dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True, transform=tran_transform)
test_dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10_test'), train=False, download=True, transform=test_transform)
elif (self.config.data.dataset == 'MNIST'):
dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True, transform=tran_transform)
test_dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist_test'), train=False, download=True, transform=test_transform)
elif (self.config.data.dataset == 'CELEBA'):
if self.config.data.random_flip:
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='train', transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor()]), download=True)
else:
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='train', transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.ToTensor()]), download=True)
test_dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba_test'), split='test', transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.ToTensor()]), download=True)
dataloader = DataLoader(dataset, batch_size=self.config.training.batch_size, shuffle=True, num_workers=4)
test_loader = DataLoader(test_dataset, batch_size=self.config.training.batch_size, shuffle=True, num_workers=4, drop_last=True)
test_iter = iter(test_loader)
self.config.input_dim = ((self.config.data.image_size ** 2) * self.config.data.channels)
tb_path = os.path.join(self.args.run, 'tensorboard', self.args.doc)
if os.path.exists(tb_path):
shutil.rmtree(tb_path)
tb_logger = tensorboardX.SummaryWriter(log_dir=tb_path)
score = RefineNetDilated(self.config).to(self.config.device)
score = torch.nn.DataParallel(score)
optimizer = self.get_optimizer(score.parameters())
if self.args.resume_training:
states = torch.load(os.path.join(self.args.log, 'checkpoint.pth'))
score.load_state_dict(states[0])
optimizer.load_state_dict(states[1])
step = 0
for epoch in range(self.config.training.n_epochs):
for (i, (X, y)) in enumerate(dataloader):
step += 1
score.train()
X = X.to(self.config.device)
X = (((X / 256.0) * 255.0) + (torch.rand_like(X) / 256.0))
if self.config.data.logit_transform:
X = self.logit_transform(X)
loss = dsm_score_estimation(score, X, sigma=0.01)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tb_logger.add_scalar('loss', loss, global_step=step)
logging.info('step: {}, loss: {}'.format(step, loss.item()))
if (step >= self.config.training.n_iters):
return 0
if ((step % 100) == 0):
score.eval()
try:
(test_X, test_y) = next(test_iter)
except StopIteration:
test_iter = iter(test_loader)
(test_X, test_y) = next(test_iter)
test_X = test_X.to(self.config.device)
test_X = (((test_X / 256.0) * 255.0) + (torch.rand_like(test_X) / 256.0))
if self.config.data.logit_transform:
test_X = self.logit_transform(test_X)
with torch.no_grad():
test_dsm_loss = dsm_score_estimation(score, test_X, sigma=0.01)
tb_logger.add_scalar('test_dsm_loss', test_dsm_loss, global_step=step)
if ((step % self.config.training.snapshot_freq) == 0):
states = [score.state_dict(), optimizer.state_dict()]
torch.save(states, os.path.join(self.args.log, 'checkpoint_{}.pth'.format(step)))
torch.save(states, os.path.join(self.args.log, 'checkpoint.pth'))
def Langevin_dynamics(self, x_mod, scorenet, n_steps=1000, step_lr=2e-05):
images = []
with torch.no_grad():
for _ in range(n_steps):
images.append(torch.clamp(x_mod, 0.0, 1.0).to('cpu'))
noise = (torch.randn_like(x_mod) * np.sqrt((step_lr * 2)))
grad = scorenet(x_mod)
x_mod = ((x_mod + (step_lr * grad)) + noise)
print('modulus of grad components: mean {}, max {}'.format(grad.abs().mean(), grad.abs().max()))
return images
def test(self):
states = torch.load(os.path.join(self.args.log, 'checkpoint.pth'), map_location=self.config.device)
score = RefineNetDilated(self.config).to(self.config.device)
score = torch.nn.DataParallel(score)
score.load_state_dict(states[0])
if (not os.path.exists(self.args.image_folder)):
os.makedirs(self.args.image_folder)
score.eval()
if ((self.config.data.dataset == 'MNIST') or (self.config.data.dataset == 'FashionMNIST')):
transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()])
if (self.config.data.dataset == 'MNIST'):
dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True, transform=transform)
else:
dataset = FashionMNIST(os.path.join(self.args.run, 'datasets', 'fmnist'), train=True, download=True, transform=transform)
dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=4)
data_iter = iter(dataloader)
(samples, _) = next(data_iter)
samples = samples.cuda()
samples = torch.rand_like(samples)
all_samples = self.Langevin_dynamics(samples, score, 1000, 2e-05)
for (i, sample) in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size, self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
elif (self.config.data.dataset == 'CELEBA'):
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='test', transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.ToTensor()]), download=True)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True, num_workers=4)
(samples, _) = next(iter(dataloader))
samples = torch.rand(100, 3, self.config.data.image_size, self.config.data.image_size, device=self.config.device)
all_samples = self.Langevin_dynamics(samples, score, 1000, 2e-05)
for (i, sample) in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size, self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
else:
transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()])
if (self.config.data.dataset == 'CIFAR10'):
dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True, transform=transform)
dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=4)
data_iter = iter(dataloader)
(samples, _) = next(data_iter)
samples = samples.cuda()
samples = torch.rand_like(samples)
all_samples = self.Langevin_dynamics(samples, score, 1000, 2e-05)
for (i, sample) in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size, self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
|
class ScoreNetRunner():
def __init__(self, args, config):
self.args = args
self.config = config
def get_optimizer(self, parameters):
if (self.config.optim.optimizer == 'Adam'):
return optim.Adam(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay, betas=(self.config.optim.beta1, 0.999))
elif (self.config.optim.optimizer == 'RMSProp'):
return optim.RMSprop(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay)
elif (self.config.optim.optimizer == 'SGD'):
return optim.SGD(parameters, lr=self.config.optim.lr, momentum=0.9)
else:
raise NotImplementedError('Optimizer {} not understood.'.format(self.config.optim.optimizer))
def logit_transform(self, image, lam=1e-06):
image = (lam + ((1 - (2 * lam)) * image))
return (torch.log(image) - torch.log1p((- image)))
def train(self):
transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()])
if (self.config.data.dataset == 'CIFAR10'):
dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True, transform=transform)
test_dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=False, download=True, transform=transform)
elif (self.config.data.dataset == 'MNIST'):
dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True, transform=transform)
num_items = len(dataset)
indices = list(range(num_items))
random_state = np.random.get_state()
np.random.seed(2019)
np.random.shuffle(indices)
np.random.set_state(random_state)
(train_indices, test_indices) = (indices[:int((num_items * 0.8))], indices[int((num_items * 0.8)):])
test_dataset = Subset(dataset, test_indices)
dataset = Subset(dataset, train_indices)
elif (self.config.data.dataset == 'CELEBA'):
dataset = ImageFolder(root=os.path.join(self.args.run, 'datasets', 'celeba'), transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
num_items = len(dataset)
indices = list(range(num_items))
random_state = np.random.get_state()
np.random.seed(2019)
np.random.shuffle(indices)
np.random.set_state(random_state)
(train_indices, test_indices) = (indices[:int((num_items * 0.7))], indices[int((num_items * 0.7)):int((num_items * 0.8))])
test_dataset = Subset(dataset, test_indices)
dataset = Subset(dataset, train_indices)
dataloader = DataLoader(dataset, batch_size=self.config.training.batch_size, shuffle=True, num_workers=4)
test_loader = DataLoader(test_dataset, batch_size=self.config.training.batch_size, shuffle=True, num_workers=4)
test_iter = iter(test_loader)
self.config.input_dim = ((self.config.data.image_size ** 2) * self.config.data.channels)
tb_path = os.path.join(self.args.run, 'tensorboard', self.args.doc)
if os.path.exists(tb_path):
shutil.rmtree(tb_path)
tb_logger = tensorboardX.SummaryWriter(log_dir=tb_path)
score = ResScore(self.config).to(self.config.device)
optimizer = self.get_optimizer(score.parameters())
if self.args.resume_training:
states = torch.load(os.path.join(self.args.log, 'checkpoint.pth'))
score.load_state_dict(states[0])
optimizer.load_state_dict(states[1])
step = 0
sigma = self.config.training.noise_std
for epoch in range(self.config.training.n_epochs):
for (i, (X, y)) in enumerate(dataloader):
step += 1
X = X.to(self.config.device)
if self.config.data.logit_transform:
X = self.logit_transform(X)
scaled_score = (lambda x: score(x))
if (self.config.training.algo == 'ssm'):
X = (X + (torch.randn_like(X) * sigma))
(loss, *_) = sliced_score_estimation_vr(scaled_score, X.detach(), n_particles=1)
elif (self.config.training.algo == 'dsm'):
loss = dsm_score_estimation(scaled_score, X, sigma=self.config.training.noise_std)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tb_logger.add_scalar('loss', loss, global_step=step)
tb_logger.add_scalar('sigma', sigma, global_step=step)
logging.info('step: {}, loss: {}, sigma: {}'.format(step, loss.item(), sigma))
if (step >= self.config.training.n_iters):
return 0
if ((step % 100) == 0):
try:
(test_X, test_y) = next(test_iter)
except StopIteration:
test_iter = iter(test_loader)
(test_X, test_y) = next(test_iter)
test_X = test_X.to(self.config.device)
if self.config.data.logit_transform:
test_X = self.logit_transform(test_X)
if (self.config.training.algo == 'ssm'):
test_X += (torch.randn_like(test_X) * self.config.training.noise_std)
(test_loss, *_) = sliced_score_estimation_vr(scaled_score, test_X.detach(), n_particles=1)
elif (self.config.training.algo == 'dsm'):
test_loss = dsm_score_estimation(scaled_score, test_X, sigma=self.config.training.noise_std)
tb_logger.add_scalar('test_loss', test_loss, global_step=step)
if ((step % self.config.training.snapshot_freq) == 0):
states = [score.state_dict(), optimizer.state_dict()]
torch.save(states, os.path.join(self.args.log, 'checkpoint_{}.pth'.format(step)))
torch.save(states, os.path.join(self.args.log, 'checkpoint.pth'))
if (step == self.config.training.n_iters):
return 0
|
class OnlineEvaluator(Callback):
'Attaches a classifier to evaluate a specific representation from the model during training.\n\n Args:\n optimizer: Config to instantiate an optimizer and optionally a scheduler.\n classifier: Config to instantiate a classifier.\n input_name: Name of the representation to evaluate from the model outputs.\n precision: Precision for the classifier that must match the model, if :math:`16` use automatic mixed precision.\n\n Example::\n\n optimizer = {...} # config to build an optimizer\n classifier = {...} # config to build a classifier\n trainer = Trainer(callbacks=[OnlineEvaluator(optimizer, classifier)])\n '
def __init__(self, optimizer: DictConfig, classifier: DictConfig, input_name: str='h', precision: int=32):
super().__init__()
self.input_name = input_name
self.classifier = hydra.utils.instantiate(classifier)
(self.optimizer, self.scheduler) = hydra.utils.instantiate(optimizer, model=self.classifier)
self.precision = precision
assert (precision in [16, 32])
self.use_amp = (self.precision == 16)
self.scaler = GradScaler(enabled=self.use_amp)
self._recovered_callback_state = None
@property
def learnable_params(self) -> List[Parameter]:
'List of learnable parameters.'
params = list(self.classifier.parameters())
return params
def on_fit_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self.classifier = self.classifier.to(pl_module.device)
accel = (trainer.accelerator_connector if hasattr(trainer, 'accelerator_connector') else trainer._accelerator_connector)
if accel.is_distributed:
if is_strategy_ddp(accel.strategy):
from torch.nn.parallel import DistributedDataParallel as DDP
self.classifier = DDP(self.classifier, device_ids=[pl_module.device])
else:
rank_zero_warn('Does not support this type of distributed accelerator. The online evaluator will not sync.')
if (self._recovered_callback_state is not None):
self.classifier.load_state_dict(self._recovered_callback_state['state_dict'])
self.optimizer.load_state_dict(self._recovered_callback_state['optimizer_state'])
self.scaler.load_state_dict(self._recovered_callback_state['scaler'])
def on_train_batch_end(self, trainer: Trainer, pl_module: LightningModule, outputs: Sequence, batch: Sequence, batch_idx: int) -> None:
targets = batch['label']
representations = outputs[self.input_name].clone().detach()
mask = (targets != (- 1))
with autocast(enabled=self.use_amp):
logits = self.classifier(representations[mask])
loss = nn.functional.cross_entropy(logits, targets[mask])
num_classes = trainer.datamodule.num_classes
task = ('binary' if (num_classes <= 2) else 'multiclass')
online_acc_1 = accuracy(logits, targets, num_classes=num_classes, task=task)
online_acc_5 = accuracy(logits, targets, num_classes=num_classes, task=task, top_k=5)
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
if (self.scheduler is not None):
self.scheduler.step()
pl_module.log('online/train_acc_1', online_acc_1, on_step=True, on_epoch=True, prog_bar=True)
pl_module.log('online/train_acc_5', online_acc_5, on_step=True, on_epoch=True)
pl_module.log('online/train_loss', loss, on_step=True, on_epoch=True)
def on_validation_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self.classifier.eval()
def on__validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
self.classifier.train()
def on_validation_batch_end(self, trainer: Trainer, pl_module: LightningModule, outputs: Sequence, batch: Sequence, batch_idx: int, dataloader_idx: int=0):
targets = batch['label']
representations = outputs[self.input_name].clone()
mask = (targets != (- 1))
with autocast(enabled=self.use_amp):
logits = self.classifier(representations[mask])
loss = nn.functional.cross_entropy(logits, targets[mask])
num_classes = trainer.datamodule.num_classes
task = ('binary' if (num_classes <= 2) else 'multiclass')
val_acc_1 = accuracy(logits, targets, num_classes=num_classes, task=task)
val_acc_5 = accuracy(logits, targets, num_classes=num_classes, task=task, top_k=5)
pl_module.log('online/val_acc_1', val_acc_1, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
pl_module.log('online/val_acc_5', val_acc_5, on_step=False, on_epoch=True, sync_dist=True)
pl_module.log('online/val_loss', loss, on_step=False, on_epoch=True, sync_dist=True)
def state_dict(self) -> Dict[(str, Any)]:
return {'state_dict': self.classifier.state_dict(), 'optimizer_state': self.optimizer.state_dict(), 'scaler': self.scaler.state_dict()}
def load_state_dict(self, state_dict: Dict[(str, Any)]) -> None:
self._recovered_callback_state = state_dict
|
class BaseDataModule(LightningDataModule, ABC):
"Abstract class that inherits from LightningDataModule to follow standardized preprocessing for all\n datamodules in eztorch.\n\n Args:\n datadir: Where to save/load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n\n .. warning::\n The loader subconfigurations must not contain 'batch_size' that is automatically computed from the 'global_batch_size' specified in the configuration.\n "
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None) -> None:
super().__init__()
self.datadir = Path(datadir)
train = self._validate_train_config(train)
val = self._validate_val_config(val)
test = self._validate_test_config(test)
self.train = train
self.val = val
self.test = test
self.train_dataset = None
self.val_dataset = None
self.test_dataset = None
if (train is None):
self.train_dataloader = super().train_dataloader
else:
self.traindir = (self.train.dataset.datadir if (self.train.get('dataset') and self.train.dataset.get('datadir')) else self.datadir)
if (val is None):
self.val_dataloader = super().val_dataloader
else:
self.valdir = (self.val.dataset.datadir if (self.val.get('dataset') and self.val.dataset.get('datadir')) else self.datadir)
if (test is None):
self.test_dataloader = super().test_dataloader
else:
self.testdir = (self.test.dataset.datadir if (self.test.get('dataset') and self.test.dataset.get('datadir')) else self.datadir)
def _validate_train_config(self, cfg: Optional[DictConfig]):
if (cfg is None):
return cfg
if cfg.get('loader'):
if cfg.loader.get('collate_fn'):
self.train_collate_fn = get_collate_fn(cfg.loader.collate_fn)
cfg.loader.pop('collate_fn')
else:
self.train_collate_fn = None
if cfg.loader.get('batch_size'):
cfg.loader.pop('batch_size')
rank_zero_warn('Batch size has been remove for train loader config because global_batch_size to train config should be passed.')
return cfg
def _validate_val_config(self, cfg: DictConfig):
if (cfg is None):
return cfg
if cfg.get('loader'):
if cfg.loader.get('collate_fn'):
self.val_collate_fn = get_collate_fn(cfg.loader.collate_fn)
cfg.loader.pop('collate_fn')
else:
self.val_collate_fn = None
if cfg.loader.get('batch_size'):
cfg.loader.pop('batch_size')
rank_zero_warn('Batch size has been remove for val loader config because global_batch_size to val config should be passed.')
return cfg
def _validate_test_config(self, cfg: DictConfig):
if (cfg is None):
return cfg
if cfg.get('loader'):
if cfg.loader.get('collate_fn'):
self.test_collate_fn = get_collate_fn(cfg.loader.collate_fn)
cfg.loader.pop('collate_fn')
else:
self.test_collate_fn = None
if cfg.loader.get('batch_size'):
cfg.loader.pop('batch_size')
rank_zero_warn('Batch size has been remove for test loader config because global_batch_size to test config should be passed.')
return cfg
@property
@abstractmethod
def num_classes(self) -> int:
'Number of classes that should be instantiated by relevant subclasses.'
return (- 1)
@property
def train_num_samples(self) -> int:
'Number of samples in the training dataset.'
return (len(self.train_dataset) if self.train_dataset else 0)
@property
def val_num_samples(self) -> int:
'Number of samples in the validation dataset.'
return (len(self.val_dataset) if self.val_dataset else 0)
@property
def test_num_samples(self) -> int:
'Number of samples in the testing dataset.'
return (len(self.test_dataset) if self.test_dataset else 0)
@property
def train_global_batch_size(self) -> int:
'Batch size across all processes for the training data.'
if (not self.train.get('global_batch_size')):
raise AttributeError('global_batch_size should be defined in train datamodule config.')
return self.train.global_batch_size
@property
def val_global_batch_size(self) -> int:
'Batch size across all processes for the validation data.'
if (not self.val.get('global_batch_size')):
raise AttributeError('global_batch_size should be defined in val datamodule config.')
return self.val.global_batch_size
@property
def test_global_batch_size(self) -> int:
'Batch size across all processes for the testing data.'
if (not self.test.get('global_batch_size')):
raise AttributeError('global_batch_size should be defined in test datamodule config.')
return self.test.global_batch_size
@property
def train_local_batch_size(self) -> int:
'Batch size of current process for the training data.'
if (self.trainer is not None):
return get_local_batch_size_in_trainer(self.train_global_batch_size, self.trainer)
else:
return self.train_global_batch_size
@property
def val_local_batch_size(self) -> int:
'Batch size of current process for the validation data.'
if (self.trainer is not None):
return get_local_batch_size_in_trainer(self.val_global_batch_size, self.trainer)
else:
return self.val_global_batch_size
@property
def test_local_batch_size(self) -> int:
'Batch size of current process for the testing data.'
if (self.trainer is not None):
return get_local_batch_size_in_trainer(self.test_global_batch_size, self.trainer)
else:
return self.test_global_batch_size
def train_dataloader(self) -> DataLoader:
if (self.train is None):
raise RuntimeError('No passed training configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.train_dataset, batch_size=self.train_local_batch_size, collate_fn=self.train_collate_fn, **self.train.loader)
return loader
def val_dataloader(self) -> DataLoader:
if (self.val is None):
raise RuntimeError('No passed validation configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.val_dataset, batch_size=self.val_local_batch_size, collate_fn=self.val_collate_fn, **self.val.loader)
return loader
def test_dataloader(self) -> DataLoader:
if (self.test is None):
raise RuntimeError('No passed testing configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.test_dataset, batch_size=self.test_local_batch_size, collate_fn=self.test_collate_fn, **self.test.loader)
return loader
|
class CIFARDataModule(BaseDataModule, ABC):
'Base datamodule for the CIFAR datasets.\n\n Args:\n datadir: Where to save/load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n num_classes_kept: Number of classes to use.\n split_train_ratio: If not ``None`` randomly split the train dataset in two with split_train_ration ratio for train.\n seed_for_split: Seed for the split.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, num_classes_kept: Optional[int]=None, split_train_ratio: Optional[float]=None, seed_for_split: int=42) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test)
self.num_classes_kept = num_classes_kept
self.split_train_ratio = split_train_ratio
self.seed_for_split = seed_for_split
assert ((num_classes_kept is None) or (num_classes_kept <= self.num_classes))
@abstractproperty
def DATASET(self) -> VisionDataset:
'Dataset class that should be defined by subclasses.'
return None
def prepare_data(self) -> None:
if (self.train is not None):
self.DATASET(self.datadir, train=True, download=True, transform=PILToTensor())
if ((self.val is not None) or (self.test is not None)):
self.DATASET(self.datadir, train=False, download=True, transform=PILToTensor())
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
self.train_transform = hydra.utils.instantiate(self.train.transform)
rank_zero_info(f'Train transform: {self.train_transform}')
if (self.split_train_ratio is None):
self.train_dataset = DictDataset(self.DATASET(self.datadir, train=True, download=False, transform=self.train_transform))
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
rank_zero_info(f'Val transform: {self.val_transform}')
self.val_dataset = DictDataset(self.DATASET(self.datadir, train=False, download=False, transform=self.val_transform))
else:
train_dataset = self.DATASET(self.datadir, train=True, download=False, transform=self.train_transform)
train_length = round((len(train_dataset) * self.split_train_ratio))
val_length = (len(train_dataset) - train_length)
(train_dataset, val_dataset) = random_split(train_dataset, [train_length, val_length], torch.Generator().manual_seed(self.seed_for_split))
self.train_dataset = DictDataset(train_dataset)
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
rank_zero_info(f'Val transform: {self.val_transform}')
self.val_dataset = val_dataset
self.val_dataset.transform = self.val_transform
self.val_dataset = DictDataset(val_dataset)
if (self.num_classes_kept is not None):
targets = torch.tensor(self.train_dataset.source_dataset.targets)
indices_to_keep = (targets < self.num_classes_kept)
self.train_dataset = torch.utils.data.Subset(self.train_dataset, indices_to_keep.nonzero())
if ((self.val is not None) and (self.num_classes_kept is not None)):
targets = torch.tensor(self.val_dataset.source_dataset.targets)
indices_to_keep = (targets < self.num_classes_kept)
self.val_dataset = torch.utils.data.Subset(self.val_dataset, indices_to_keep.nonzero())
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
self.test_transform = hydra.utils.instantiate(self.test.transform)
rank_zero_info(f'Test transform: {self.test_transform}')
self.test_dataset = DictDataset(self.DATASET(self.datadir, train=False, download=False, transform=self.test_transform))
if (self.test_dataset is not None):
targets = torch.tensor(self.test_dataset.source_dataset.targets)
indices_to_keep = (targets < self.num_classes_kept)
self.test_dataset = torch.utils.data.Subset(self.test_dataset, indices_to_keep.nonzero())
|
class CIFAR10DataModule(CIFARDataModule):
'Datamodule for the CIFAR10 dataset.\n\n Args:\n datadir: Where to save/load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n num_classes_kept: Number of classes to use.\n split_train_ratio: If not ``None`` randomly split the train dataset in two with split_train_ration ratio for train.\n seed_for_split: Seed for the split.\n\n Example::\n\n datamodule = CIFAR10DataModule(datadir)\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, num_classes_kept: Optional[int]=None, split_train_ratio: Optional[float]=None, seed_for_split: int=42) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, num_classes_kept=num_classes_kept, split_train_ratio=split_train_ratio, seed_for_split=seed_for_split)
@property
def DATASET(self) -> VisionDataset:
'Dataset class.'
return CIFAR10
@property
def num_classes(self) -> int:
'Number of classes.'
return (10 if (self.num_classes_kept is None) else self.num_classes_kept)
|
class CIFAR100DataModule(CIFARDataModule):
'Datamodule for the CIFAR100 dataset.\n\n Args:\n datadir: Where to save/load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n num_classes_kept: Number of classes to use.\n split_train_ratio: If not ``None`` randomly split the train dataset in two with split_train_ration ratio for train.\n seed_for_split: Seed for the split.\n\n Example::\n\n datamodule = CIFAR100DataModule(datadir)\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, num_classes_kept: Optional[int]=None, split_train_ratio: Optional[float]=None, seed_for_split: int=42) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, num_classes_kept=num_classes_kept, split_train_ratio=split_train_ratio, seed_for_split=seed_for_split)
@property
def DATASET(self) -> VisionDataset:
'Dataset class.'
return CIFAR100
@property
def num_classes(self) -> int:
'Number of classes.'
return (100 if (self.num_classes_kept is None) else self.num_classes_kept)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.