repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
MENET
|
MENET-master/light/configuration.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/configuration.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import argparse
parser = argparse.ArgumentParser()
# Model specification
parser.add_argument("--in_channel", type=int, default=3)
parser.add_argument("--n_feats", type=int, default=32)
parser.add_argument("--num_of_down_scale", type=int, default=2)
parser.add_argument("--gen_resblocks", type=int, default=6)
parser.add_argument("--discrim_blocks", type=int, default=3)
parser.add_argument("--model_name", type=str, default="shallow_edge_lossbalance", help="deep_new_ca_edge_gram_gradbalance/deep_new_ca_edge_gram_lossbalance")
# Data specification
parser.add_argument('--original_image_dir', type=str, default="/dataset/cvpr2017_derain_dataset/testing_data",
help='training/testing image files base dir')
parser.add_argument('--sub_dir', type=str, default="Rain100L",
help='training_data{RainTrainL, RainTrainH}, testing_data{Rain100L,Rain100H}')
parser.add_argument('--blend_mode', type=str, default="linear", help='`linear` or `screen`')
parser.add_argument('--crop_size', type=int, default=224, help='')
parser.add_argument('--horizontal_flip', type=bool, default=True, help='')
# Training or test specification
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--batch_size', type=int, default=4, help='')
parser.add_argument('--epochs', type=int, default=100, help='')
parser.add_argument('--decay_epochs', type=int, default=40, help='')
parser.add_argument('--decay_factor', type=float, default=1e-1, help='learning rate decay factor')
parser.add_argument('--num_examples_per_epoch', type=int, default=1e5,
help='Number of examples per epoch of training dataset')
parser.add_argument('--vgg_dir', type=str, default="/dataset/pretrained_model",
help='dir of vgg pre-trained params file')
parser.add_argument('--critic_updates', type=int, default=5,
help='Number of updates of critic')
parser.add_argument('--max_checkpoints_to_keep', type=int, default=1, help='')
parser.add_argument('--num_steps_per_display', type=int, default=10, help='')
parser.add_argument('--train_dir', type=str, default="/dataset/derain_h5", help=' h5py format dataset directory.')
parser.add_argument('--test_dir', type=str, default="/dataset/derain_h5", help='')
parser.add_argument('--data_filename', type=str, default="Rain100L.h5", help=' h5py format train/test dataset file name.')
parser.add_argument('--tensorboard', type=str, default="tensorboard", help='')
parser.add_argument('--model_dir', type=str, default="model_params", help='')
parser.add_argument('--gpu_id', type=str, default="0", help='')
parser.add_argument('--metric_dir', type=str, default="metric", help='')
parser.add_argument('--infer_in_dir', type=str, default="img/examples", help='')
parser.add_argument('--infer_out_dir', type=str, default="img/results", help='')
parser.add_argument('--scale_ratio', type=int, default=16, help='down sampling scale ratio, for inference image resize')
parser.add_argument('--ext', type=str, default=".png", help='`.jpg` or `.png`. In the inference stage, the extension of the picture')
args = parser.parse_args()
class ModelConfig(object):
"""Wrapper class for configuring model parameters."""
def __init__(self):
self.in_channel = args.in_channel
self.n_feats = args.n_feats
self.num_of_down_scale = args.num_of_down_scale
self.gen_resblocks = args.gen_resblocks
self.discrim_blocks = args.discrim_blocks
self.model_name = args.model_name
self.original_image_dir = args.original_image_dir
self.sub_dir = args.sub_dir
self.blend_mode = args.blend_mode
self.crop_size = args.crop_size
self.horizontal_flip = args.horizontal_flip
self.lr = args.lr
self.batch_size = args.batch_size
self.epochs = args.epochs
self.decay_epochs = args.decay_epochs
self.decay_factor = args.decay_factor
self.num_examples_per_epoch = args.num_examples_per_epoch
self.vgg_dir = args.vgg_dir
self.critic_updates = args.critic_updates
self.max_checkpoints_to_keep = args.max_checkpoints_to_keep
self.num_steps_per_display = args.num_steps_per_display
self.train_dir = args.train_dir
self.test_dir = args.test_dir
self.data_filename = args.data_filename
self.tensorboard = args.tensorboard
self.model_dir = args.model_dir
self.gpu_id = args.gpu_id
self.metric_dir = args.metric_dir
self.infer_in_dir = args.infer_in_dir
self.infer_out_dir = args.infer_out_dir
self.scale_ratio = args.scale_ratio
self.ext = args.ext
cfg = ModelConfig()
if __name__ == '__main__':
for name in args.__dict__:
print("self.{}=args.{}".format(name, name))
| 5,050
| 44.918182
| 157
|
py
|
MENET
|
MENET-master/light/vgg19.py
|
import inspect
import os
import time
import numpy as np
import tensorflow as tf
# VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg19:
def __init__(self, vgg19_npy_path=None):
if vgg19_npy_path is None:
path = inspect.getfile(Vgg19)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "vgg19.npy")
vgg19_npy_path = path
print(vgg19_npy_path)
self.data_dict = np.load(os.path.join(vgg19_npy_path, "vgg19.npy"), encoding='latin1', allow_pickle=True).item()
self.vgg_mean = tf.reshape(tf.convert_to_tensor([103.939, 116.779, 123.68], tf.float32), (1, 1, 1, 3))
print("npy file loaded")
def build(self, rgb):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 255]
"""
start_time = time.time()
print("build model started")
bgr = rgb[:, :, :, ::-1]
bgr = bgr - self.vgg_mean
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4")
# self.pool3 = self.max_pool(self.conv3_4, 'pool3')
#
# self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
# self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
# self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
# self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
# self.pool4 = self.max_pool(self.conv4_4, 'pool4')
#
# self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
# self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
# self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
# self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
# self.pool5 = self.max_pool(self.conv5_4, 'pool5')
#
# self.fc6 = self.fc_layer(self.pool5, "fc6")
# assert self.fc6.get_shape().as_list()[1:] == [4096]
# self.relu6 = tf.nn.relu(self.fc6)
#
# self.fc7 = self.fc_layer(self.relu6, "fc7")
# self.relu7 = tf.nn.relu(self.fc7)
#
# self.fc8 = self.fc_layer(self.relu7, "fc8")
#
# self.prob = tf.nn.softmax(self.fc8, name="prob")
#
# self.data_dict = None
print(("build model finished: %ds" % (time.time() - start_time)))
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.data_dict[name][0], name="weights")
| 4,400
| 35.675
| 120
|
py
|
MENET
|
MENET-master/light/net.py
|
# -*- coding: utf-8 -*-
# @File : MENET/net.py
# @Info : @ TSMC-SIGGRAPH, 2019/8/10
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
from configuration import cfg
from template import menet_shallow_new, menet_shallow_new_ca
from template import menet_shallow_new_edge_lossbalance, menet_shallow_new_edge_gradbalance, menet_shallow_new_edge_fixed
from template import menet_shallow_new_edge_gram_gradbalance, menet_shallow_new_edge_gram_lossbalance
from template import menet_shallow_new_ca, menet_shallow_new_ca_edge_gram_gradbalance, menet_shallow_new_ca_edge_gram_lossbalance
from template import menet_deep_new_ca_edge_gram_gradbalance, menet_deep_new_ca_edge_gram_lossbalance
from template import menet_shallow_new_vgg_fixed
models = {
"menet_shallow_new": menet_shallow_new.ModelShallowNew,
"menet_shallow_new_edge_fixed": menet_shallow_new_edge_fixed.ModelShallowNewEdgeFixed,
"menet_shallow_new_edge_lossbalance": menet_shallow_new_edge_lossbalance.ModelShallowNewEdgeLossBalance,
"menet_shallow_new_edge_gradbalance": menet_shallow_new_edge_gradbalance.ModelShallowNewEdgeGradBalance,
"menet_shallow_new_edge_gram_lossbalance":menet_shallow_new_edge_gram_lossbalance.ModelShallowNewEdgeGramLossBalance,
"menet_shallow_new_edge_gram_gradbalance": menet_shallow_new_edge_gram_gradbalance.ModelShallowNewEdgeGramGradBalance,
"menet_shallow_new_ca": menet_shallow_new_ca.ModelShallowNewCa,
"menet_shallow_new_ca_edge_gram_lossbalance": menet_shallow_new_ca_edge_gram_lossbalance.ModelShallowNewCaEdgeGramLossBalance,
"menet_shallow_new_ca_edge_gram_gradbalance": menet_shallow_new_ca_edge_gram_gradbalance.ModelShallowNewCaEdgeGramGradBalance,
"menet_deep_new_ca_edge_gram_lossbalance": menet_deep_new_ca_edge_gram_lossbalance.ModelDeepNewCaEdgeGramLossBalance,
"menet_deep_new_ca_edge_gram_gradbalance": menet_deep_new_ca_edge_gram_gradbalance.ModelDeepNewCaEdgeGramGradBalance,
"menet_shallow_new_vgg_fixed": menet_shallow_new_vgg_fixed.ModelShallowNewVGGFixed,
}
Model = models[cfg.model_name]
| 2,110
| 54.552632
| 130
|
py
|
MENET
|
MENET-master/light/data_helper.py
|
# -*- coding: utf-8 -*-
# @File : derain_gradnorm_tf/data_helper.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
from random import shuffle
import h5py
import numpy as np
from configuration import cfg
# from matplotlib import pyplot as plt
def get_batch(filename="dataset/derain_h5/RainTrainL.h5", batch_size=cfg.batch_size, is_shuffle=True):
f = h5py.File(filename, "r")
inputs = f["syn"]
labels = f["bg"]
num_samples = inputs.len()
num_batches = num_samples // batch_size
if num_samples % batch_size != 0:
num_samples = num_batches * batch_size
inputs = inputs[:num_samples, ...]
labels = labels[:num_samples, ...]
cfg.num_examples_per_epoch = num_samples
print("[get_batch] processing {} samples, batch_size {}, batches {}".format(num_samples, batch_size, num_batches))
idx = np.arange(num_samples)
if is_shuffle:
shuffle(idx)
# Note: to avoid OOM, it is not recommended to shuffle the data in the following deprecated way
# Deprecation method example: inp = np.take(inputs, idx, 0)
for i in range(num_batches):
# np.sort() for avoiding TypeError: Indexing elements must be in increasing order.
batch_x = inputs[np.sort(idx[i * batch_size:i * batch_size + batch_size]), ...]
batch_y = labels[np.sort(idx[i * batch_size:i * batch_size + batch_size]), ...]
yield batch_x, batch_y
if __name__ == '__main__':
for batch_x, batch_y in get_batch("dataset/RainTrainL.h5", 4):
# for i in range(4):
# a = plt.subplot(2, 4, i + 1)
# a.imshow(batch_x[i])
# a.axis('off')
# for i in range(4):
# a = plt.subplot(2, 4, i + 5)
# a.imshow(batch_y[i])
# a.axis('off')
# plt.show()
break
| 1,905
| 30.766667
| 118
|
py
|
MENET
|
MENET-master/light/train.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/train.py
# @Info : @ TSMC-SIGGRAPH, 2019/8/10
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
import numpy as np
import tensorflow as tf
from configuration import cfg
from data_helper import get_batch
from net import Model
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu_id
def main(_):
# build model
model = Model("train")
model.build()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=cfg.max_checkpoints_to_keep)
if os.path.exists(os.path.join(cfg.model_dir, model.nickname, "checkpoint")):
model_file = tf.train.latest_checkpoint(os.path.join(cfg.model_dir, model.nickname))
saver.restore(sess, model_file)
else:
if not os.path.exists(os.path.join(cfg.model_dir, model.nickname)):
os.makedirs(os.path.join(cfg.model_dir, model.nickname))
# training loop
for epoch in range(cfg.epochs):
# iterate the whole dataset n epochs
print("iterate the whole dataset {} epochs".format(cfg.epochs))
for i, samples in enumerate(get_batch(os.path.join(cfg.train_dir, cfg.data_filename), cfg.batch_size, True)):
batch_syn, batch_bg = samples
step = tf.train.global_step(sess, model.global_step)
batch_syn = np.asarray(batch_syn, "float32")
batch_bg = np.asarray(batch_bg, "float32")
feed_dict = {model.bg_img: batch_bg, model.syn_img: batch_syn}
if step % cfg.num_steps_per_display == 0:
_, lr, total_loss, mse, ssim, psnr = sess.run([model.train_op, model.lr, model.total_loss, model.mse,
model.ssim, model.psnr],
feed_dict=feed_dict)
print("[{}/{}] lr: {:.8f}, total_loss: {:.6f}, mse: {:.6f}, ssim: {:.4f}, "
"psnr: {:.4f}".format(epoch, step, lr, total_loss, mse, ssim, psnr))
else:
sess.run(model.train_op, feed_dict=feed_dict)
saver.save(sess, os.path.join(cfg.model_dir, model.nickname, 'model.epoch-{}'.format(epoch)))
saver.save(sess, os.path.join(cfg.model_dir, model.nickname, 'model.final-{}'.format(cfg.epochs)))
print(" ------ Arriving at the end of data ------ ")
if __name__ == '__main__':
tf.app.run()
| 2,689
| 40.384615
| 121
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_edge_gradbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_shallow_edge_gradbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
class ModelShallowNewEdgeGradBalance(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeGradBalance, self).__init__(mode)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.content_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss
def build_optimizer(self):
# the loss ratio for task i at time t
tvars = tf.trainable_variables(scope="derain/layer5")
mse_grads = tf.gradients(self.mse, tvars)
G1 = tf.norm(mse_grads)
closs_grads = tf.gradients(self.content_loss, tvars)
G2 = tf.norm(closs_grads)
G = G1 + G2
w_1 = tf.stop_gradient(1. - G1 / G) # num_tasks * (1 - task_i/tasks)
w_2 = tf.stop_gradient(1. - G2 / G)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 2,524
| 39.725806
| 104
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_ca.py
|
# -*- coding: utf-8 -*-
# @File : light/net_shallow_new_ca.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc : deep model (16 residual blocks), spatial pyramid attention
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from template import net_base
class ModelShallowNewCa(net_base.ModelBase):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewCa, self).__init__(mode)
def channel_attention_layer(self, name, x):
with tf.variable_scope(name):
in_channel = x.get_shape()[-1]
down_scale1 = self.bn_relu(self.conv2(tf.space_to_depth(x, 2, name="att_desubpixel_1"), in_channel, 1, 1, name="squeeze_1"))
down_scale1 = self.bn_relu(self.conv2(down_scale1, in_channel, 3, name="layer_1"))
down_scale2 = self.bn_relu(self.conv2(tf.space_to_depth(down_scale1, 2, name="att_desubpixel_2"), in_channel, 1, 1, name="squeeze_2"))
down_scale2 = self.bn_relu(self.conv2(down_scale2, in_channel, 3, name="layer_2"))
net = self.bn(self.conv2(down_scale2, in_channel, 3, 1, name="excitation_1"))
channel_feat = tf.nn.sigmoid(tf.reduce_mean(net, [1, 2], keepdims=True))
return tf.add(x, tf.multiply(x, channel_feat))
def build_model(self):
with tf.variable_scope("derain"):
net = tf.space_to_depth(self.syn_img, 2, name="desubpixel_1")
net_1 = self.bn(self.conv2(net, 16, 3, name="layer1"))
net = tf.nn.relu(net_1)
net = tf.space_to_depth(net, 2, name="desubpixel_2")
net_2 = self.bn(self.conv2(net, 64, 3, name="layer2"))
net = tf.nn.relu(net_2)
net = self.channel_attention_layer("ca", net)
for i in range(8):
res = net
net = self.bn_relu(self.conv2(net, 64, 3, 1, name='res_{}_a'.format(i)))
net = self.bn(self.conv2(net, 64, 3, 1, name='res_{}_b'.format(i)))
if i <7:
net = tf.nn.relu(tf.add(net, res)) # skip-connect
else:
net = tf.add(net, res)
net = self.bn(self.conv2(tf.add(net_2, net), 64, 3, name="layer3"))
net = tf.depth_to_space(net, 2, "pixel_shuffle_1")
net = self.bn(self.conv2(tf.add(net_1, net), 16, 3, name="layer4"))
net = self.conv2(net, 12, 3, name="layer5")
net = tf.depth_to_space(net, 2, "pixel_shuffle_2")
bg_hat = tf.add(self.syn_img, net)
self.output = tf.clip_by_value(bg_hat, 0.0, 255.0, name="output") # BReLU
| 2,722
| 42.222222
| 146
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_edge_gram_gradbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_edge_gradbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
# from vgg19 import Vgg19
class ModelShallowNewEdgeGramGradBalance(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeGramGradBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
tvars = tf.trainable_variables(scope="derain/layer5")
mse_grads = tf.gradients(self.mse, tvars)
G1 = tf.norm(mse_grads)
closs_grads = tf.gradients(self.content_loss, tvars)
G2 = tf.norm(closs_grads)
edge_grads = tf.gradients(self.edge_loss, tvars)
G3 = tf.norm(edge_grads)
G = G1 + G2 + G3
w_1 = tf.stop_gradient(1. - G1 / G) # num_tasks * (1 - task_i/tasks)
w_2 = tf.stop_gradient(1. - G2 / G)
w_3 = tf.stop_gradient(1. - G3 / G)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 4,430
| 47.163043
| 105
|
py
|
MENET
|
MENET-master/light/template/menet_deep_new_ca_edge_gram_lossbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_spa_edge_lossbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_deep_new_ca
class ModelDeepNewCaEdgeGramLossBalance(menet_deep_new_ca.ModelDeepNewCa):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelDeepNewCaEdgeGramLossBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# def texture_matching_loss(self, labels, predictions):
# labels_reshape = tf.reshape(tf.extract_image_patches(labels,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# predictions_reshape = tf.reshape(tf.extract_image_patches(predictions,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
w_1 = tf.stop_gradient(1. - self.mse / self.total_loss)
w_2 = tf.stop_gradient(1. - self.content_loss / self.total_loss)
w_3 = tf.stop_gradient(1. - self.edge_loss / self.total_loss)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 4,887
| 53.311111
| 145
|
py
|
MENET
|
MENET-master/light/template/menet_deep_new_ca_edge_gram_gradbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_spa_edge_gradbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_deep_new_ca
class ModelDeepNewCaEdgeGramGradBalance(menet_deep_new_ca.ModelDeepNewCa):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelDeepNewCaEdgeGramGradBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# def texture_matching_loss(self, labels, predictions):
# labels_reshape = tf.reshape(tf.extract_image_patches(labels,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# predictions_reshape = tf.reshape(tf.extract_image_patches(predictions,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
tvars = tf.trainable_variables(scope="derain/layer5")
mse_grads = tf.gradients(self.mse, tvars)
G1 = tf.norm(mse_grads)
closs_grads = tf.gradients(self.content_loss, tvars)
G2 = tf.norm(closs_grads)
edge_grads = tf.gradients(self.edge_loss, tvars)
G3 = tf.norm(edge_grads)
G = G1 + G2 + G3
w_1 = tf.stop_gradient(1. - G1 / G) # num_tasks * (1 - task_i/tasks)
w_2 = tf.stop_gradient(1. - G2 / G)
w_3 = tf.stop_gradient(1. - G3 / G)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 5,208
| 48.141509
| 145
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_edge_lossbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_shallow_edge_lossbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
class ModelShallowNewEdgeLossBalance(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeLossBalance, self).__init__(mode)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.content_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss
def build_optimizer(self):
# the loss ratio for task i at time t
w_1 = tf.stop_gradient(1. - self.mse / self.total_loss)
w_2 = tf.stop_gradient(1. - self.content_loss / self.total_loss)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 2,278
| 41.203704
| 104
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_vgg_fixed.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_shallow_vgg_fixed.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
from vgg19 import Vgg19
class ModelShallowNewVGGFixed(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewVGGFixed, self).__init__(mode)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
perceptron = Vgg19(cfg.vgg_dir)
perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
self.content_loss = tf.losses.mean_squared_error(perceptron.conv3_4[:cfg.batch_size],
perceptron.conv3_4[cfg.batch_size:])
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + 1e-3 * self.content_loss
| 1,288
| 35.828571
| 93
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_ca_edge_gram_gradbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_spa_edge_gradbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new_ca
class ModelShallowNewCaEdgeGramGradBalance(menet_shallow_new_ca.ModelShallowNewCa):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewCaEdgeGramGradBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# def texture_matching_loss(self, labels, predictions):
# labels_reshape = tf.reshape(tf.extract_image_patches(labels,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# predictions_reshape = tf.reshape(tf.extract_image_patches(predictions,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
tvars = tf.trainable_variables(scope="derain/layer5")
mse_grads = tf.gradients(self.mse, tvars)
G1 = tf.norm(mse_grads)
closs_grads = tf.gradients(self.content_loss, tvars)
G2 = tf.norm(closs_grads)
edge_grads = tf.gradients(self.edge_loss, tvars)
G3 = tf.norm(edge_grads)
G = G1 + G2 + G3
w_1 = tf.stop_gradient(1. - G1 / G) # num_tasks * (1 - task_i/tasks)
w_2 = tf.stop_gradient(1. - G2 / G)
w_3 = tf.stop_gradient(1. - G3 / G)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 5,223
| 48.283019
| 145
|
py
|
MENET
|
MENET-master/light/template/net_base.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_base.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
class ModelBase(object):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
assert mode in ["train", "eval", "inference"]
self.initializer = tf.initializers.variance_scaling(scale=1.0, mode="fan_in")
self.mode = mode
# A float32 tensor with shape [batch_size, height, width, channels].
self.bg_img = None # clean background image
self.syn_img = None # synthesis rainy image
self.r_img = None # rain layer
# Outputs of de-rain model
self.output = None
self.r_hat = None
self.syn_hat = None
# A float32 scalar tensor; the total loss for the trainer to optimize.
self.total_loss = None
self.mse = None
self.content_loss = None
self.contexture_loss = None
self.ssim = None
self.psnr = None
self.lr = None
# optimizer
self.train_op = None
# Global step tensor.
self.global_step = None
# class name
self.nickname = self.__class__.__name__
def is_training(self):
"""returns true if the model is built for training mode."""
return self.mode == "train"
def build_inputs(self):
"""Input prefetching, preprocessing and batching.
:return:
self.images: A tensor of shape [batch_size, height, width, channels].
"""
if self.mode == "inference":
# # In inference mode, images are fed via placeholders.
self.syn_img = tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3], name="image_feed")
# No target of input rainy image in inference mode.
self.bg_img = None
self.r_img = None
else:
# from h5py get batch-data
self.bg_img = tf.placeholder(tf.float32, shape=(cfg.batch_size, cfg.crop_size, cfg.crop_size, 3),
name='bg')
# self.r_img = tf.placeholder(tf.float32, shape=(cfg.batch_size, cfg.crop_size, cfg.crop_size, 1), name='r')
self.syn_img = tf.placeholder(tf.float32, shape=(cfg.batch_size, cfg.crop_size, cfg.crop_size, 3),
name='syn')
def conv2(self, inputs, filters, kernel_size, strides=1, dilation_rate=1, activation=None, padding="SAME", name=None):
with tf.variable_scope(name):
assert type(strides) == int
assert type(kernel_size) == int
return tf.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, dilation_rate=dilation_rate, activation=activation,
kernel_initializer=self.initializer, use_bias=False, name=name + "_conv")
@staticmethod
def instance_norm(inputs):
ins_mean, ins_sigma = tf.nn.moments(inputs, axes=[1, 2], keep_dims=True)
return (inputs - ins_mean) / (tf.sqrt(ins_sigma + 1e-5))
def bn(self, inputs):
return tf.layers.batch_normalization(inputs=inputs, training=self.is_training())
def bn_relu(self, inputs):
return tf.nn.relu(tf.layers.batch_normalization(inputs=inputs, training=self.is_training()))
def bn_lrelu(self, inputs):
return tf.nn.leaky_relu(tf.layers.batch_normalization(inputs=inputs, training=self.is_training()))
def in_relu(self, inputs):
return tf.nn.relu(self.instance_norm(inputs))
def in_lrelu(self, inputs):
return tf.nn.leaky_relu(self.instance_norm(inputs))
def build_model(self):
pass
@staticmethod
def tf_summary_image(name, img_tensor, img_size=cfg.crop_size):
v = tf.reshape(img_tensor[:4, :, :, :], [2, 2, img_size, img_size, 3])
v = tf.transpose(v, [0, 2, 1, 3, 4])
v = tf.reshape(v, [-1, 2 * img_size, 2 * img_size, 3])
tf.summary.image(name, v)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse
def setup_global_step(self):
"""Sets up the global step tensor."""
global_step = tf.Variable(initial_value=0, trainable=False, name="global_step",
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.global_step = global_step
def build_optimizer(self):
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
#optimizer = tf.train.MomentumOptimizer(lr,0.9)
#self.lr = optimizer._learning_rate
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
def build(self):
"""Creates all ops for training and evaluation."""
self.build_inputs()
self.build_model()
self.setup_global_step()
if self.mode != "inference":
self.build_loss()
self.build_optimizer()
| 6,054
| 38.575163
| 122
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc : deep model (16 residual blocks), spatial pyramid attention
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from template import net_base
class ModelShallowNew(net_base.ModelBase):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNew, self).__init__(mode)
def channel_attention_layer(self, name, x):
with tf.variable_scope(name):
in_channel = x.get_shape()[-1]
down_scale1 = self.bn_relu(self.conv2(tf.space_to_depth(x, 2, name="att_desubpixel_1"), in_channel, 1, 1, name="squeeze_1"))
down_scale1 = self.bn_relu(self.conv2(down_scale1, in_channel, 3, name="layer_1"))
down_scale2 = self.bn_relu(self.conv2(tf.space_to_depth(down_scale1, 2, name="att_desubpixel_2"), in_channel, 1, 1, name="squeeze_2"))
down_scale2 = self.bn_relu(self.conv2(down_scale2, in_channel, 3, name="layer_2"))
net = self.bn(self.conv2(down_scale2, in_channel, 3, 1, name="excitation_1"))
channel_feat = tf.nn.sigmoid(tf.reduce_mean(net, [1, 2], keepdims=True))
return tf.multiply(x, channel_feat)
def spatial_attention_layer(self, name, x):
with tf.variable_scope(name):
in_channel = x.get_shape()[-1]
down_scale1 = self.bn(self.conv2(tf.space_to_depth(x, 2, name="desubpixel_1"), in_channel, 1, 1, name="squeeze_1"))
net = self.bn_relu(self.conv2(tf.nn.relu(down_scale1), in_channel, 3, name="layer_1"))
down_scale2 = self.bn(self.conv2(tf.space_to_depth(net, 2, name="desubpixel_2"), in_channel, 1, 1, name="squeeze_2"))
net = self.bn_relu(self.conv2(tf.nn.relu(down_scale2), in_channel, 3, name="res_1"))
net = self.bn(self.conv2(net, in_channel, 3, name="res_2"))
up_scale1 = self.bn_relu(self.conv2(tf.depth_to_space(tf.nn.relu(tf.add(down_scale2, net)), 2, "subpixel_1"), in_channel, 1, 1, name="excitation_1"))
net = self.bn(self.conv2(up_scale1, in_channel, 3, name="layer_2"))
up_scale2 = self.bn_relu(self.conv2(tf.depth_to_space(tf.nn.relu(tf.add(down_scale1, net)), 2, "subpixel_2"), in_channel, 1, 1, name="excitation_2"))
net = self.bn(self.conv2(up_scale2, in_channel, 3, name="layer_3"))
spatial_feat = tf.nn.sigmoid(net)
return tf.add(tf.multiply(x, spatial_feat), net)
def build_model(self):
with tf.variable_scope("derain"):
net = tf.space_to_depth(self.syn_img, 2, name="desubpixel_1")
net_1 = self.bn(self.conv2(net, 16, 3, name="layer1"))
net = tf.nn.relu(net_1)
net = tf.space_to_depth(net, 2, name="desubpixel_2")
net_2 = self.bn(self.conv2(net, 64, 3, name="layer2"))
net = tf.nn.relu(net_2)
for i in range(8):
res = net
net = self.bn_relu(self.conv2(net, 64, 3, 1, name='res_{}_a'.format(i)))
net = self.bn(self.conv2(net, 64, 3, 1, name='res_{}_b'.format(i)))
if i <7:
net = tf.nn.relu(tf.add(net, res )) # skip-connect
else:
net = tf.add(net, res)
net = self.bn(self.conv2(tf.add(net_2, net), 64, 3, name="layer3"))
net = tf.depth_to_space(net, 2, "pixel_shuffle_1")
net = self.bn(self.conv2(tf.add(net_1, net), 16, 3, name="layer4"))
net = self.conv2(net, 12, 3, name="layer5")
net = tf.depth_to_space(net, 2, "pixel_shuffle_2")
bg_hat = tf.add(self.syn_img, net)
self.output = tf.clip_by_value(bg_hat, 0.0, 255.0, name="output") # BReLU
| 3,896
| 49.61039
| 161
|
py
|
MENET
|
MENET-master/light/template/menet_deep_new_ca.py
|
# -*- coding: utf-8 -*-
# @File : light/net_shallow_new_ca.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc : deep model (16 residual blocks), spatial pyramid attention
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from template import net_base
class ModelDeepNewCa(net_base.ModelBase):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelDeepNewCa, self).__init__(mode)
def channel_attention_layer(self, name, x):
with tf.variable_scope(name):
in_channel = x.get_shape()[-1]
down_scale1 = self.bn_relu(self.conv2(tf.space_to_depth(x, 2, name="att_desubpixel_1"), in_channel, 1, 1, name="squeeze_1"))
down_scale1 = self.bn_relu(self.conv2(down_scale1, in_channel, 3, name="layer_1"))
down_scale2 = self.bn_relu(self.conv2(tf.space_to_depth(down_scale1, 2, name="att_desubpixel_2"), in_channel, 1, 1, name="squeeze_2"))
down_scale2 = self.bn_relu(self.conv2(down_scale2, in_channel, 3, name="layer_2"))
net = self.bn(self.conv2(down_scale2, in_channel, 3, 1, name="excitation_1"))
channel_feat = tf.nn.sigmoid(tf.reduce_mean(net, [1, 2], keepdims=True))
return tf.add(x, tf.multiply(x, channel_feat))
def build_model(self):
with tf.variable_scope("derain"):
net = tf.space_to_depth(self.syn_img, 2, name="desubpixel_1")
net_1 = self.bn(self.conv2(net, 16, 3, name="layer1"))
net = tf.nn.relu(net_1)
net = tf.space_to_depth(net, 2, name="desubpixel_2")
net_2 = self.bn(self.conv2(net, 64, 3, name="layer2"))
net = tf.nn.relu(net_2)
net = self.channel_attention_layer("ca", net)
for i in range(16):
res = net
net = self.bn_relu(self.conv2(net, 64, 3, 1, name='res_{}_a'.format(i)))
net = self.bn(self.conv2(net, 64, 3, 1, name='res_{}_b'.format(i)))
if i <15:
net = tf.nn.relu(tf.add(net, res)) # skip-connect
else:
net = tf.add(net, res)
net = self.bn(self.conv2(tf.add(net_2, net), 64, 3, name="layer3"))
net = tf.depth_to_space(net, 2, "pixel_shuffle_1")
net = self.bn(self.conv2(tf.add(net_1, net), 16, 3, name="layer4"))
net = self.conv2(net, 12, 3, name="layer5")
net = tf.depth_to_space(net, 2, "pixel_shuffle_2")
bg_hat = tf.add(self.syn_img, net)
self.output = tf.clip_by_value(bg_hat, 0.0, 255.0, name="output") # BReLU
| 2,718
| 42.15873
| 146
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_ca_edge_gram_lossbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_spa_edge_lossbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new_ca
class ModelShallowNewCaEdgeGramLossBalance(menet_shallow_new_ca.ModelShallowNewCa):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewCaEdgeGramLossBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# def texture_matching_loss(self, labels, predictions):
# labels_reshape = tf.reshape(tf.extract_image_patches(labels,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# predictions_reshape = tf.reshape(tf.extract_image_patches(predictions,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
w_1 = tf.stop_gradient(1. - self.mse / self.total_loss)
w_2 = tf.stop_gradient(1. - self.content_loss / self.total_loss)
w_3 = tf.stop_gradient(1. - self.edge_loss / self.total_loss)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 4,902
| 53.477778
| 145
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_edge_fixed.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_shallow_edge_fixed.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
class ModelShallowNewEdgeFixed(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeFixed, self).__init__(mode)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.content_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + 1e-2 * self.content_loss
| 1,244
| 36.727273
| 96
|
py
|
MENET
|
MENET-master/light/template/menet_shallow_new_edge_gram_lossbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_color_lossbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/26
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
# from vgg19 import Vgg19
class ModelShallowNewEdgeGramLossBalance(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeGramLossBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
w_1 = tf.stop_gradient(1. - self.mse / self.total_loss)
w_2 = tf.stop_gradient(1. - self.content_loss / self.total_loss)
w_3 = tf.stop_gradient(1. - self.edge_loss / self.total_loss)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 4,115
| 49.814815
| 105
|
py
|
MENET
|
MENET-master/light/utils/inference_wrapper.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/inference_wrapper.py
# @Info : @ TSMC-SIGGRAPH, 2018/7/12
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
from net import Model
from utils import inference_wrapper_base
class InferenceWrapper(inference_wrapper_base.InferenceWrapperBase):
"""Model wrapper class for performing inference with a ShowAndTellModel."""
def __init__(self):
super(InferenceWrapper, self).__init__()
self.nickname = self.build_model().nickname
def build_model(self):
model = Model(mode="inference")
model.build()
return model
# def inference_step(self, sess, input_feed, img_size_feed):
# b_output, r_output = sess.run(
# fetches=["derain/bg_hat:0", "derain/r_hat:0"],
# feed_dict={
# "image_feed:0": input_feed,
# "img_size_feed:0": img_size_feed,
# })
# return b_output, r_output
def inference_step(self, sess, input_feed):
output = sess.run(
fetches="derain/output:0",
feed_dict={
"image_feed:0": input_feed
})
return output
| 1,225
| 29.65
| 81
|
py
|
MENET
|
MENET-master/light/utils/inference_wrapper_base.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/inference_wrapper_base.py
# @Info : @ TSMC-SIGGRAPH, 2018/7/12
# @Desc : refer to google's im2txt
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os.path
import tensorflow as tf
# pylint: disable=unused-argument
class InferenceWrapperBase(object):
"""Base wrapper class for performing inference with an image-to-text model."""
def __init__(self):
pass
def build_model(self):
"""Builds the model for inference.
Args:
model_config: Object containing configuration for building the model.
Returns:
model: The model object.
"""
tf.logging.fatal("Please implement build_model in subclass")
def _create_restore_fn(self, checkpoint_path, saver):
"""Creates a function that restores a model from checkpoint.
Args:
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
saver: Saver for restoring variables from the checkpoint file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
Raises:
ValueError: If checkpoint_path does not refer to a checkpoint file or a
directory containing a checkpoint file.
"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_path:
raise ValueError("No checkpoint file found in: %s" % checkpoint_path)
def _restore_fn(sess):
tf.logging.info("Loading model from checkpoint: %s", checkpoint_path)
saver.restore(sess, checkpoint_path)
tf.logging.info("Successfully loaded checkpoint: %s",
os.path.basename(checkpoint_path))
print("Successfully loaded checkpoint: ", os.path.basename(checkpoint_path))
return _restore_fn
def build_graph_from_config(self, checkpoint_path):
"""Builds the inference graph from a configuration object.
Args:
model_config: Object containing configuration for building the model.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
"""
tf.logging.info("Building model.")
# self.build_model() # move to inference_warpper.__init__ for get class name
saver = tf.train.Saver()
return self._create_restore_fn(checkpoint_path, saver)
# def inference_step(self, sess, input_feed, img_size_feed):
# """Runs one step of inference.
# Args:
# sess: TensorFlow Session object.
# input_feed: A numpy array of shape [batch_size].
# img_size_feed: A list of image height and width
# Returns:
# rain_layer: A numpy array of shape [N,H,W,C].
# background_layer: A numpy array of shape [N,H,W,C].
#
# """
# tf.logging.fatal("Please implement inference_step in subclass")
def inference_step(self, sess, input_feed):
"""Runs one step of inference.
Args:
sess: TensorFlow Session object.
input_feed: A numpy array of shape [batch_size].
Returns:
rain_layer: A numpy array of shape [N,H,W,C].
background_layer: A numpy array of shape [N,H,W,C].
"""
tf.logging.fatal("Please implement inference_step in subclass")
| 3,683
| 37.375
| 88
|
py
|
MENET
|
MENET-master/light/utils/__init__.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/__init__.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
| 202
| 24.375
| 81
|
py
|
MENET
|
MENET-master/light/utils/transforms.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/transforms.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc : @ sumihui : refer to pytorch
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import numpy as np
from PIL import Image
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>> transforms.Compose([
>> transforms.FiveCrop(10),
>> lambda crops: np.stack([transforms.ToArray(crop) for crop in crops])
>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class FiveCrop(object):
"""Crop the given PIL Image into four corners and the central crop
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
horizontal_flip (bool): Whether use horizontal flipping or not
Example:
>> transform = Compose([
>> FiveCrop(size), # this is a list of PIL Images
>> lambda crops: np.stack([transforms.ToArray(crop) for crop in crops]) # returns a 4D ndarray
>> ])
>> #In your test loop you can do the following:
>> input, target = batch # input is a 5d tensor, target is 2d
>> bs, ncrops, h, w, c = input.size()
>> result = model(input.reshape(-1, h, w, c)) # fuse batch size and ncrops
"""
def __init__(self, size, horizontal_flip=False):
self.size = size
if isinstance(size, int):
self.size = (size, size)
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
self.horizontal_flip = horizontal_flip
def __call__(self, img):
"""
:param img: (PIL Image). Image to be cropped.
:return: return five_crop(img)
"""
if not isinstance(img, Image.Image):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
crops = self.five_crop(img)
if self.horizontal_flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
crops = crops + self.five_crop(img)
return crops
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
def five_crop(self, img):
"""Crop the given PIL Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center) corresponding top left,
top right, bottom left, bottom right and center crop.
"""
w, h = img.size
crop_h, crop_w = self.size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(self.size,
(h, w)))
tl = img.crop((0, 0, crop_w, crop_h))
tr = img.crop((w - crop_w, 0, w, crop_h))
bl = img.crop((0, h - crop_h, crop_w, h))
br = img.crop((w - crop_w, h - crop_h, w, h))
center = self.center_crop(img)
return (tl, tr, bl, br, center)
def center_crop(self, img):
"""
:param img:
:return: PIL Image: Cropped image.
"""
w, h = img.size
th, tw = self.size # Height/Width of the cropped image.
i = int(round((h - th) / 2.)) # Upper pixel coordinate.
j = int(round((w - tw) / 2.)) # Left pixel coordinate.
return img.crop((j, i, j + tw, i + th))
class ToArray(object):
"""Convert a ``PIL Image`` to ``numpy.ndarray``.
Converts a PIL Image (H x W x C) in the range
[0, 255] to a numpy.ndarray of shape (H x W x C) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image): Image to be converted to numpy.ndarray.
Returns:
numpy.ndarray: Converted image.
"""
return np.asarray(pic, "uint8") # note: 2019/05/29 uint8
def __repr__(self):
return self.__class__.__name__ + '()'
| 5,204
| 33.932886
| 106
|
py
|
MENET
|
MENET-master/heavy/inference.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/inference.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/30
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
from datetime import datetime
import cv2
import numpy as np
import tensorflow as tf
from configuration import cfg
from utils import inference_wrapper
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu_id # only /gpu:gpu_id is visible
def main(_):
# Build the inference graph.
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(os.path.join(cfg.model_dir, model.nickname))
g.finalize()
print("Restore model from directory: {}".format(os.path.join(cfg.model_dir, model.nickname)))
filenames = list(filter(lambda x: x.endswith(cfg.ext), os.listdir(cfg.infer_in_dir)))
filenames = [os.path.join(cfg.infer_in_dir, filename) for filename in filenames]
print("Running de-rain infer on %d files from directory: %s" % (len(filenames), cfg.infer_in_dir))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(graph=g, config=config) as sess:
# Load the model from checkpoint.
restore_fn(sess)
if not os.path.exists(cfg.infer_out_dir):
os.makedirs(cfg.infer_out_dir)
for i, filename in enumerate(filenames):
bgr = cv2.imread(filename)
h, w = bgr.shape[:2]
if w % cfg.scale_ratio != 0 or h % cfg.scale_ratio != 0:
aw = (cfg.scale_ratio - w % cfg.scale_ratio) % cfg.scale_ratio
ah = (cfg.scale_ratio - h % cfg.scale_ratio) % cfg.scale_ratio
bgr = cv2.resize(bgr, (w + aw, h + ah), interpolation=cv2.INTER_CUBIC)
rgb_array = np.expand_dims(np.asarray(bgr[..., ::-1], "float32"), 0)
rgb_array = model.inference_step(sess=sess, input_feed=rgb_array)[0]
basename = os.path.basename(filename).split(".")[0]
b_output = cv2.resize(rgb_array[..., ::-1], (w, h), interpolation=cv2.INTER_CUBIC)
print(basename, b_output.shape, np.max(b_output),np.min(b_output),np.mean(b_output))
cv2.imwrite(os.path.join(cfg.infer_out_dir,
"{}@{}_{}.png".format(basename, model.nickname, datetime.now().date())), b_output)
if __name__ == "__main__":
tf.app.run()
| 2,444
| 37.809524
| 119
|
py
|
MENET
|
MENET-master/heavy/build_h5_dataset.py
|
# -*- coding: utf-8 -*-
# @File : derain_gradnorm_tf/build_h5_dataset.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
from random import shuffle
import h5py
import numpy as np
from PIL import Image
from configuration import cfg
from utils import transforms
class DataLoader(object):
"""Construct a generator"""
def __init__(self, image_dir, crop_size=64, blend_mode="linear", horizontal_flip=False):
"""
:param image_dir (str): path of the images
:param crop_size (int or tuple): crop size, default is 64
:param blend_mode (str): pick on of two, `screen` or `linear`, represents image composition type
:param horizontal_flip (bool): Whether use horizontal flipping or not
"""
# super(DataLoader, self).__init__()
# 1. initialize file path or a list of file names.
assert blend_mode in ["screen", "linear"]
self.blend_mode = blend_mode
self.data_path = image_dir
self.all_filenames = os.listdir(self.data_path)
self.label_filenames = list(filter(lambda filename: filename.startswith("norain"), self.all_filenames))
self.num_files = len(self.label_filenames)
print("[DataLoader] preprocess {} files on dir `{}`".format(self.num_files, self.data_path))
self.transform = transforms.Compose([transforms.FiveCrop(crop_size, horizontal_flip), # tuple (tl, tr, bl, br, center)
lambda crops: np.stack([transforms.ToArray()(crop) for crop in crops])])
def __getitem__(self, item):
# 1. read one data from file (e.g. using PIL.Image.open).
# 2. Preprocess the data (e.g. Transform).
# 3. Return a data pair (e.g. image and label).
if self.blend_mode == "screen":
input_image = Image.open(os.path.join(self.data_path,
self.label_filenames[item].replace("norain", "screenrainy")))
else:
input_image = Image.open(os.path.join(self.data_path,
self.label_filenames[item].replace("norain", "rain")))
label_image = Image.open(os.path.join(self.data_path, self.label_filenames[item]))
noise_image = Image.open(os.path.join(self.data_path, self.label_filenames[item].replace("norain", "rainstreak")))
sample = {'syn': input_image, 'bg': label_image, 'r': noise_image}
if self.transform:
sample['syn'] = self.transform(sample['syn'])
sample['bg'] = self.transform(sample['bg'])
sample['r'] = self.transform(sample['r'])
return sample
def __len__(self):
# the total size of dataset.(number of samples)
return self.num_files
def save2h5(save_path="temp.h5", image_dir="/dataset/cvpr2017_derain_dataset/training_data/RainTrainL",
crop_size=224, blend_mode="linear", horizontal_flip=False):
dataloader = DataLoader(image_dir, crop_size, blend_mode, horizontal_flip)
img_pair = []
for samples in dataloader:
samples['r'] = np.expand_dims(samples['r'], -1)
img_pair.append(np.concatenate([samples['syn'], samples['bg'], samples['r']], -1))
img_pair_ndarray = np.concatenate(img_pair, 0)
idx = np.arange(img_pair_ndarray.shape[0])
shuffle(idx)
img_pair_ndarray = np.take(img_pair_ndarray, idx, 0)
input_ndarray, label_ndarray, noise_ndarray = np.split(img_pair_ndarray, [3, 6], -1)
dirname = os.path.dirname(save_path)
if not os.path.exists(dirname):
os.mkdir(dirname)
f = h5py.File(save_path, 'w')
_ = f.create_dataset("syn", data=input_ndarray, compression="gzip")
_ = f.create_dataset("bg", data=label_ndarray, compression="gzip")
_ = f.create_dataset("r", data=noise_ndarray, compression="gzip")
f.close()
if __name__ == '__main__':
img_dir = os.path.join(cfg.original_image_dir, cfg.sub_dir)
save2h5("{}.h5".format(os.path.join(cfg.test_dir, cfg.sub_dir)), img_dir, cfg.crop_size, "linear",
cfg.horizontal_flip)
| 4,172
| 42.926316
| 127
|
py
|
MENET
|
MENET-master/heavy/validation.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/validation.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/30
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
import platform
from datetime import datetime
from time import time
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from configuration import cfg
from data_helper import get_batch
from net import Model
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu_id
def main(_):
# build model
model = Model("eval")
model.build()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=cfg.max_checkpoints_to_keep)
if os.path.exists(os.path.join(cfg.model_dir, model.nickname, "checkpoint")):
model_file = tf.train.latest_checkpoint(os.path.join(cfg.model_dir, model.nickname))
saver.restore(sess, model_file)
else:
exit()
ssim_list = list()
psnr_list = list()
mse_list = list()
time_list = list()
for batch_syn, batch_bg in tqdm(get_batch(os.path.join(cfg.test_dir, cfg.data_filename), cfg.batch_size)):
batch_syn = np.asarray(batch_syn, "float32")
batch_bg = np.asarray(batch_bg, "float32")
feed_dict = {model.bg_img: batch_bg, model.syn_img: batch_syn}
start = time()
mse, ssim, psnr = sess.run([model.mse, model.ssim, model.psnr], feed_dict=feed_dict)
end = time()
ssim_list.append(ssim)
psnr_list.append(psnr)
mse_list.append(mse)
time_list.append(end - start)
avg_ssim = np.mean(ssim_list)
avg_psnr = np.mean(psnr_list)
avg_mse = np.mean(mse_list)
avg_time = np.mean(time_list) / cfg.batch_size
if not os.path.exists(cfg.metric_dir):
os.makedirs(cfg.metric_dir)
with open(os.path.join(cfg.metric_dir, 'metrics.txt'), 'a') as f:
f.write("os:\t{}\t\t\tdate:\t{}\n".format(platform.system(), datetime.now()))
f.write("model:\t{}\t\timage_size:\t{}\n".format(model.nickname, cfg.crop_size))
f.write("data:\t{}\t\tgpu_id:\t{}\n".format(cfg.data_filename, cfg.gpu_id))
f.write("speed:\t{:.8f} s/item\tmse:\t{:.8f}\n".format(avg_time, avg_mse))
f.write("ssim:\t{:.8f}\t\tpsnr:\t{:.8f}\n\n".format(avg_ssim, avg_psnr))
print(" ------ Arriving at the end of data ------ ")
if __name__ == '__main__':
tf.app.run()
| 2,654
| 31.777778
| 114
|
py
|
MENET
|
MENET-master/heavy/configuration.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/configuration.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import argparse
parser = argparse.ArgumentParser()
# Model specification
parser.add_argument("--in_channel", type=int, default=3)
parser.add_argument("--n_feats", type=int, default=32)
parser.add_argument("--num_of_down_scale", type=int, default=2)
parser.add_argument("--gen_resblocks", type=int, default=6)
parser.add_argument("--discrim_blocks", type=int, default=3)
parser.add_argument("--model_name", type=str, default="shallow_edge_lossbalance", help="deep_new_ca_edge_gram_gradbalance/deep_new_ca_edge_gram_lossbalance")
# Data specification
parser.add_argument('--original_image_dir', type=str, default="/dataset/cvpr2017_derain_dataset/testing_data",
help='training/testing image files base dir')
parser.add_argument('--sub_dir', type=str, default="Rain100L",
help='training_data{RainTrainL, RainTrainH}, testing_data{Rain100L,Rain100H}')
parser.add_argument('--blend_mode', type=str, default="linear", help='`linear` or `screen`')
parser.add_argument('--crop_size', type=int, default=224, help='')
parser.add_argument('--horizontal_flip', type=bool, default=True, help='')
# Training or test specification
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--batch_size', type=int, default=4, help='')
parser.add_argument('--epochs', type=int, default=100, help='')
parser.add_argument('--decay_epochs', type=int, default=40, help='')
parser.add_argument('--decay_factor', type=float, default=1e-1, help='learning rate decay factor')
parser.add_argument('--num_examples_per_epoch', type=int, default=1e5,
help='Number of examples per epoch of training dataset')
parser.add_argument('--vgg_dir', type=str, default="/dataset/pretrained_model",
help='dir of vgg pre-trained params file')
parser.add_argument('--critic_updates', type=int, default=5,
help='Number of updates of critic')
parser.add_argument('--max_checkpoints_to_keep', type=int, default=1, help='')
parser.add_argument('--num_steps_per_display', type=int, default=10, help='')
parser.add_argument('--train_dir', type=str, default="/dataset/derain_h5", help=' h5py format dataset directory.')
parser.add_argument('--test_dir', type=str, default="/dataset/derain_h5", help='')
parser.add_argument('--data_filename', type=str, default="Rain100L.h5", help=' h5py format train/test dataset file name.')
parser.add_argument('--tensorboard', type=str, default="tensorboard", help='')
parser.add_argument('--model_dir', type=str, default="model_params", help='')
parser.add_argument('--gpu_id', type=str, default="0", help='')
parser.add_argument('--metric_dir', type=str, default="metric", help='')
parser.add_argument('--infer_in_dir', type=str, default="img/examples", help='')
parser.add_argument('--infer_out_dir', type=str, default="img/results", help='')
parser.add_argument('--scale_ratio', type=int, default=16, help='down sampling scale ratio, for inference image resize')
parser.add_argument('--ext', type=str, default=".png", help='`.jpg` or `.png`. In the inference stage, the extension of the picture')
args = parser.parse_args()
class ModelConfig(object):
"""Wrapper class for configuring model parameters."""
def __init__(self):
self.in_channel = args.in_channel
self.n_feats = args.n_feats
self.num_of_down_scale = args.num_of_down_scale
self.gen_resblocks = args.gen_resblocks
self.discrim_blocks = args.discrim_blocks
self.model_name = args.model_name
self.original_image_dir = args.original_image_dir
self.sub_dir = args.sub_dir
self.blend_mode = args.blend_mode
self.crop_size = args.crop_size
self.horizontal_flip = args.horizontal_flip
self.lr = args.lr
self.batch_size = args.batch_size
self.epochs = args.epochs
self.decay_epochs = args.decay_epochs
self.decay_factor = args.decay_factor
self.num_examples_per_epoch = args.num_examples_per_epoch
self.vgg_dir = args.vgg_dir
self.critic_updates = args.critic_updates
self.max_checkpoints_to_keep = args.max_checkpoints_to_keep
self.num_steps_per_display = args.num_steps_per_display
self.train_dir = args.train_dir
self.test_dir = args.test_dir
self.data_filename = args.data_filename
self.tensorboard = args.tensorboard
self.model_dir = args.model_dir
self.gpu_id = args.gpu_id
self.metric_dir = args.metric_dir
self.infer_in_dir = args.infer_in_dir
self.infer_out_dir = args.infer_out_dir
self.scale_ratio = args.scale_ratio
self.ext = args.ext
cfg = ModelConfig()
if __name__ == '__main__':
for name in args.__dict__:
print("self.{}=args.{}".format(name, name))
| 5,050
| 44.918182
| 157
|
py
|
MENET
|
MENET-master/heavy/vgg19.py
|
import inspect
import os
import time
import numpy as np
import tensorflow as tf
# VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg19:
def __init__(self, vgg19_npy_path=None):
if vgg19_npy_path is None:
path = inspect.getfile(Vgg19)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "vgg19.npy")
vgg19_npy_path = path
print(vgg19_npy_path)
self.data_dict = np.load(os.path.join(vgg19_npy_path, "vgg19.npy"), encoding='latin1', allow_pickle=True).item()
self.vgg_mean = tf.reshape(tf.convert_to_tensor([103.939, 116.779, 123.68], tf.float32), (1, 1, 1, 3))
print("npy file loaded")
def build(self, rgb):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 255]
"""
start_time = time.time()
print("build model started")
bgr = rgb[:, :, :, ::-1]
bgr = bgr - self.vgg_mean
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4")
# self.pool3 = self.max_pool(self.conv3_4, 'pool3')
#
# self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
# self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
# self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
# self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
# self.pool4 = self.max_pool(self.conv4_4, 'pool4')
#
# self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
# self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
# self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
# self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
# self.pool5 = self.max_pool(self.conv5_4, 'pool5')
#
# self.fc6 = self.fc_layer(self.pool5, "fc6")
# assert self.fc6.get_shape().as_list()[1:] == [4096]
# self.relu6 = tf.nn.relu(self.fc6)
#
# self.fc7 = self.fc_layer(self.relu6, "fc7")
# self.relu7 = tf.nn.relu(self.fc7)
#
# self.fc8 = self.fc_layer(self.relu7, "fc8")
#
# self.prob = tf.nn.softmax(self.fc8, name="prob")
#
# self.data_dict = None
print(("build model finished: %ds" % (time.time() - start_time)))
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.data_dict[name][0], name="weights")
| 4,400
| 35.675
| 120
|
py
|
MENET
|
MENET-master/heavy/net.py
|
# -*- coding: utf-8 -*-
# @File : MENET/net.py
# @Info : @ TSMC-SIGGRAPH, 2019/8/10
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
from configuration import cfg
from template import menet_shallow_new, menet_shallow_new_ca
from template import menet_shallow_new_edge_lossbalance, menet_shallow_new_edge_gradbalance, menet_shallow_new_edge_fixed
from template import menet_shallow_new_edge_gram_gradbalance, menet_shallow_new_edge_gram_lossbalance
from template import menet_shallow_new_ca, menet_shallow_new_ca_edge_gram_gradbalance, menet_shallow_new_ca_edge_gram_lossbalance
from template import menet_deep_new_ca_edge_gram_gradbalance, menet_deep_new_ca_edge_gram_lossbalance
from template import menet_shallow_new_vgg_fixed
models = {
"menet_shallow_new": menet_shallow_new.ModelShallowNew,
"menet_shallow_new_edge_fixed": menet_shallow_new_edge_fixed.ModelShallowNewEdgeFixed,
"menet_shallow_new_edge_lossbalance": menet_shallow_new_edge_lossbalance.ModelShallowNewEdgeLossBalance,
"menet_shallow_new_edge_gradbalance": menet_shallow_new_edge_gradbalance.ModelShallowNewEdgeGradBalance,
"menet_shallow_new_edge_gram_lossbalance":menet_shallow_new_edge_gram_lossbalance.ModelShallowNewEdgeGramLossBalance,
"menet_shallow_new_edge_gram_gradbalance": menet_shallow_new_edge_gram_gradbalance.ModelShallowNewEdgeGramGradBalance,
"menet_shallow_new_ca": menet_shallow_new_ca.ModelShallowNewCa,
"menet_shallow_new_ca_edge_gram_lossbalance": menet_shallow_new_ca_edge_gram_lossbalance.ModelShallowNewCaEdgeGramLossBalance,
"menet_shallow_new_ca_edge_gram_gradbalance": menet_shallow_new_ca_edge_gram_gradbalance.ModelShallowNewCaEdgeGramGradBalance,
"menet_deep_new_ca_edge_gram_lossbalance": menet_deep_new_ca_edge_gram_lossbalance.ModelDeepNewCaEdgeGramLossBalance,
"menet_deep_new_ca_edge_gram_gradbalance": menet_deep_new_ca_edge_gram_gradbalance.ModelDeepNewCaEdgeGramGradBalance,
"menet_shallow_new_vgg_fixed": menet_shallow_new_vgg_fixed.ModelShallowNewVGGFixed,
}
Model = models[cfg.model_name]
| 2,110
| 54.552632
| 130
|
py
|
MENET
|
MENET-master/heavy/data_helper.py
|
# -*- coding: utf-8 -*-
# @File : derain_gradnorm_tf/data_helper.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
from random import shuffle
import h5py
import numpy as np
from configuration import cfg
# from matplotlib import pyplot as plt
def get_batch(filename="dataset/derain_h5/RainTrainL.h5", batch_size=cfg.batch_size, is_shuffle=True):
f = h5py.File(filename, "r")
inputs = f["syn"]
labels = f["bg"]
num_samples = inputs.len()
num_batches = num_samples // batch_size
if num_samples % batch_size != 0:
num_samples = num_batches * batch_size
inputs = inputs[:num_samples, ...]
labels = labels[:num_samples, ...]
cfg.num_examples_per_epoch = num_samples
print("[get_batch] processing {} samples, batch_size {}, batches {}".format(num_samples, batch_size, num_batches))
idx = np.arange(num_samples)
if is_shuffle:
shuffle(idx)
# Note: to avoid OOM, it is not recommended to shuffle the data in the following deprecated way
# Deprecation method example: inp = np.take(inputs, idx, 0)
for i in range(num_batches):
# np.sort() for avoiding TypeError: Indexing elements must be in increasing order.
batch_x = inputs[np.sort(idx[i * batch_size:i * batch_size + batch_size]), ...]
batch_y = labels[np.sort(idx[i * batch_size:i * batch_size + batch_size]), ...]
yield batch_x, batch_y
if __name__ == '__main__':
for batch_x, batch_y in get_batch("dataset/derain_h5/Rain100L.h5", 4):
# for i in range(4):
# a = plt.subplot(2, 4, i + 1)
# a.imshow(batch_x[i])
# a.axis('off')
# for i in range(4):
# a = plt.subplot(2, 4, i + 5)
# a.imshow(batch_y[i])
# a.axis('off')
# plt.show()
print("test data helper", batch_x.shape, batch_y.shape)
break
| 1,977
| 31.42623
| 118
|
py
|
MENET
|
MENET-master/heavy/train.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/train.py
# @Info : @ TSMC-SIGGRAPH, 2019/8/10
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
import numpy as np
import tensorflow as tf
from configuration import cfg
from data_helper import get_batch
from net import Model
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu_id
def main(_):
# build model
model = Model("train")
model.build()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=cfg.max_checkpoints_to_keep)
if os.path.exists(os.path.join(cfg.model_dir, model.nickname, "checkpoint")):
model_file = tf.train.latest_checkpoint(os.path.join(cfg.model_dir, model.nickname))
saver.restore(sess, model_file)
else:
if not os.path.exists(os.path.join(cfg.model_dir, model.nickname)):
os.makedirs(os.path.join(cfg.model_dir, model.nickname))
# training loop
for epoch in range(cfg.epochs):
# iterate the whole dataset n epochs
print("iterate the whole dataset {} epochs".format(cfg.epochs))
for i, samples in enumerate(get_batch(os.path.join(cfg.train_dir, cfg.data_filename), cfg.batch_size, True)):
batch_syn, batch_bg = samples
step = tf.train.global_step(sess, model.global_step)
batch_syn = np.asarray(batch_syn, "float32")
batch_bg = np.asarray(batch_bg, "float32")
feed_dict = {model.bg_img: batch_bg, model.syn_img: batch_syn}
if step % cfg.num_steps_per_display == 0:
_, lr, total_loss, mse, ssim, psnr = sess.run([model.train_op, model.lr, model.total_loss, model.mse,
model.ssim, model.psnr],
feed_dict=feed_dict)
print("[{}/{}] lr: {:.8f}, total_loss: {:.6f}, mse: {:.6f}, ssim: {:.4f}, "
"psnr: {:.4f}".format(epoch, step, lr, total_loss, mse, ssim, psnr))
else:
sess.run(model.train_op, feed_dict=feed_dict)
saver.save(sess, os.path.join(cfg.model_dir, model.nickname, 'model.epoch-{}'.format(epoch)))
saver.save(sess, os.path.join(cfg.model_dir, model.nickname, 'model.final-{}'.format(cfg.epochs)))
print(" ------ Arriving at the end of data ------ ")
if __name__ == '__main__':
tf.app.run()
| 2,689
| 40.384615
| 121
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_edge_gradbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_shallow_edge_gradbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
class ModelShallowNewEdgeGradBalance(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeGradBalance, self).__init__(mode)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.content_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss
def build_optimizer(self):
# the loss ratio for task i at time t
tvars = tf.trainable_variables(scope="derain/layer5")
mse_grads = tf.gradients(self.mse, tvars)
G1 = tf.norm(mse_grads)
closs_grads = tf.gradients(self.content_loss, tvars)
G2 = tf.norm(closs_grads)
G = G1 + G2
w_1 = tf.stop_gradient(1. - G1 / G) # num_tasks * (1 - task_i/tasks)
w_2 = tf.stop_gradient(1. - G2 / G)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 2,524
| 39.725806
| 104
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_ca.py
|
# -*- coding: utf-8 -*-
# @File : light/net_shallow_new_ca.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc : deep model (16 residual blocks), spatial pyramid attention
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from template import net_base
class ModelShallowNewCa(net_base.ModelBase):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewCa, self).__init__(mode)
def channel_attention_layer(self, name, x):
with tf.variable_scope(name):
in_channel = x.get_shape()[-1]
down_scale1 = self.bn_relu(self.conv2(tf.space_to_depth(x, 2, name="att_desubpixel_1"), in_channel, 1, 1, name="squeeze_1"))
down_scale1 = self.bn_relu(self.conv2(down_scale1, in_channel, 3, name="layer_1"))
down_scale2 = self.bn_relu(self.conv2(tf.space_to_depth(down_scale1, 2, name="att_desubpixel_2"), in_channel, 1, 1, name="squeeze_2"))
down_scale2 = self.bn_relu(self.conv2(down_scale2, in_channel, 3, name="layer_2"))
net = self.bn(self.conv2(down_scale2, in_channel, 3, 1, name="excitation_1"))
channel_feat = tf.nn.sigmoid(tf.reduce_mean(net, [1, 2], keepdims=True))
return tf.add(x, tf.multiply(x, channel_feat))
def build_model(self):
with tf.variable_scope("derain"):
net = tf.space_to_depth(self.syn_img, 2, name="desubpixel_1")
net_1 = self.bn(self.conv2(net, 16, 3, name="layer1"))
net = tf.nn.relu(net_1)
net = tf.space_to_depth(net, 2, name="desubpixel_2")
net_2 = self.bn(self.conv2(net, 64, 3, name="layer2"))
net = tf.nn.relu(net_2)
net = self.channel_attention_layer("ca", net)
for i in range(8):
res = net
net = self.bn_relu(self.conv2(net, 64, 3, 1, name='res_{}_a'.format(i)))
net = self.bn(self.conv2(net, 64, 3, 1, name='res_{}_b'.format(i)))
if i <7:
net = tf.nn.relu(tf.add(net, res)) # skip-connect
else:
net = tf.add(net, res)
net = self.bn(self.conv2(tf.add(net_2, net), 64, 3, name="layer3"))
net = tf.depth_to_space(net, 2, "pixel_shuffle_1")
net = self.bn(self.conv2(tf.add(net_1, net), 16, 3, name="layer4"))
net = self.conv2(net, 12, 3, name="layer5")
net = tf.depth_to_space(net, 2, "pixel_shuffle_2")
bg_hat = tf.add(self.syn_img, net)
self.output = tf.clip_by_value(bg_hat, 0.0, 255.0, name="output") # BReLU
| 2,722
| 42.222222
| 146
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_edge_gram_gradbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_edge_gradbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
# from vgg19 import Vgg19
class ModelShallowNewEdgeGramGradBalance(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeGramGradBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
tvars = tf.trainable_variables(scope="derain/layer5")
mse_grads = tf.gradients(self.mse, tvars)
G1 = tf.norm(mse_grads)
closs_grads = tf.gradients(self.content_loss, tvars)
G2 = tf.norm(closs_grads)
edge_grads = tf.gradients(self.edge_loss, tvars)
G3 = tf.norm(edge_grads)
G = G1 + G2 + G3
w_1 = tf.stop_gradient(1. - G1 / G) # num_tasks * (1 - task_i/tasks)
w_2 = tf.stop_gradient(1. - G2 / G)
w_3 = tf.stop_gradient(1. - G3 / G)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 4,430
| 47.163043
| 105
|
py
|
MENET
|
MENET-master/heavy/template/menet_deep_new_ca_edge_gram_lossbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_spa_edge_lossbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_deep_new_ca
class ModelDeepNewCaEdgeGramLossBalance(menet_deep_new_ca.ModelDeepNewCa):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelDeepNewCaEdgeGramLossBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# def texture_matching_loss(self, labels, predictions):
# labels_reshape = tf.reshape(tf.extract_image_patches(labels,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# predictions_reshape = tf.reshape(tf.extract_image_patches(predictions,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
w_1 = tf.stop_gradient(1. - self.mse / self.total_loss)
w_2 = tf.stop_gradient(1. - self.content_loss / self.total_loss)
w_3 = tf.stop_gradient(1. - self.edge_loss / self.total_loss)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 4,887
| 53.311111
| 145
|
py
|
MENET
|
MENET-master/heavy/template/menet_deep_new_ca_edge_gram_gradbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_spa_edge_gradbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_deep_new_ca
class ModelDeepNewCaEdgeGramGradBalance(menet_deep_new_ca.ModelDeepNewCa):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelDeepNewCaEdgeGramGradBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# def texture_matching_loss(self, labels, predictions):
# labels_reshape = tf.reshape(tf.extract_image_patches(labels,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# predictions_reshape = tf.reshape(tf.extract_image_patches(predictions,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
tvars = tf.trainable_variables(scope="derain/layer5")
mse_grads = tf.gradients(self.mse, tvars)
G1 = tf.norm(mse_grads)
closs_grads = tf.gradients(self.content_loss, tvars)
G2 = tf.norm(closs_grads)
edge_grads = tf.gradients(self.edge_loss, tvars)
G3 = tf.norm(edge_grads)
G = G1 + G2 + G3
w_1 = tf.stop_gradient(1. - G1 / G) # num_tasks * (1 - task_i/tasks)
w_2 = tf.stop_gradient(1. - G2 / G)
w_3 = tf.stop_gradient(1. - G3 / G)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 5,208
| 48.141509
| 145
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_edge_lossbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_shallow_edge_lossbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
class ModelShallowNewEdgeLossBalance(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeLossBalance, self).__init__(mode)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.content_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss
def build_optimizer(self):
# the loss ratio for task i at time t
w_1 = tf.stop_gradient(1. - self.mse / self.total_loss)
w_2 = tf.stop_gradient(1. - self.content_loss / self.total_loss)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 2,278
| 41.203704
| 104
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_vgg_fixed.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_shallow_vgg_fixed.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
from vgg19 import Vgg19
class ModelShallowNewVGGFixed(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewVGGFixed, self).__init__(mode)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
perceptron = Vgg19(cfg.vgg_dir)
perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
self.content_loss = tf.losses.mean_squared_error(perceptron.conv3_4[:cfg.batch_size],
perceptron.conv3_4[cfg.batch_size:])
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + 1e-3 * self.content_loss
| 1,288
| 35.828571
| 93
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_ca_edge_gram_gradbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_spa_edge_gradbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new_ca
class ModelShallowNewCaEdgeGramGradBalance(menet_shallow_new_ca.ModelShallowNewCa):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewCaEdgeGramGradBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# def texture_matching_loss(self, labels, predictions):
# labels_reshape = tf.reshape(tf.extract_image_patches(labels,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# predictions_reshape = tf.reshape(tf.extract_image_patches(predictions,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
tvars = tf.trainable_variables(scope="derain/layer5")
mse_grads = tf.gradients(self.mse, tvars)
G1 = tf.norm(mse_grads)
closs_grads = tf.gradients(self.content_loss, tvars)
G2 = tf.norm(closs_grads)
edge_grads = tf.gradients(self.edge_loss, tvars)
G3 = tf.norm(edge_grads)
G = G1 + G2 + G3
w_1 = tf.stop_gradient(1. - G1 / G) # num_tasks * (1 - task_i/tasks)
w_2 = tf.stop_gradient(1. - G2 / G)
w_3 = tf.stop_gradient(1. - G3 / G)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 5,223
| 48.283019
| 145
|
py
|
MENET
|
MENET-master/heavy/template/net_base.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_base.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
class ModelBase(object):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
assert mode in ["train", "eval", "inference"]
self.initializer = tf.initializers.variance_scaling(scale=1.0, mode="fan_in")
self.mode = mode
# A float32 tensor with shape [batch_size, height, width, channels].
self.bg_img = None # clean background image
self.syn_img = None # synthesis rainy image
self.r_img = None # rain layer
# Outputs of de-rain model
self.output = None
self.r_hat = None
self.syn_hat = None
# A float32 scalar tensor; the total loss for the trainer to optimize.
self.total_loss = None
self.mse = None
self.content_loss = None
self.contexture_loss = None
self.ssim = None
self.psnr = None
self.lr = None
# optimizer
self.train_op = None
# Global step tensor.
self.global_step = None
# class name
self.nickname = self.__class__.__name__
def is_training(self):
"""returns true if the model is built for training mode."""
return self.mode == "train"
def build_inputs(self):
"""Input prefetching, preprocessing and batching.
:return:
self.images: A tensor of shape [batch_size, height, width, channels].
"""
if self.mode == "inference":
# # In inference mode, images are fed via placeholders.
self.syn_img = tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3], name="image_feed")
# No target of input rainy image in inference mode.
self.bg_img = None
self.r_img = None
else:
# from h5py get batch-data
self.bg_img = tf.placeholder(tf.float32, shape=(cfg.batch_size, cfg.crop_size, cfg.crop_size, 3),
name='bg')
# self.r_img = tf.placeholder(tf.float32, shape=(cfg.batch_size, cfg.crop_size, cfg.crop_size, 1), name='r')
self.syn_img = tf.placeholder(tf.float32, shape=(cfg.batch_size, cfg.crop_size, cfg.crop_size, 3),
name='syn')
def conv2(self, inputs, filters, kernel_size, strides=1, dilation_rate=1, activation=None, padding="SAME", name=None):
with tf.variable_scope(name):
assert type(strides) == int
assert type(kernel_size) == int
return tf.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, dilation_rate=dilation_rate, activation=activation,
kernel_initializer=self.initializer, use_bias=False, name=name + "_conv")
@staticmethod
def instance_norm(inputs):
ins_mean, ins_sigma = tf.nn.moments(inputs, axes=[1, 2], keep_dims=True)
return (inputs - ins_mean) / (tf.sqrt(ins_sigma + 1e-5))
def bn(self, inputs):
return tf.layers.batch_normalization(inputs=inputs, training=self.is_training())
def bn_relu(self, inputs):
return tf.nn.relu(tf.layers.batch_normalization(inputs=inputs, training=self.is_training()))
def bn_lrelu(self, inputs):
return tf.nn.leaky_relu(tf.layers.batch_normalization(inputs=inputs, training=self.is_training()))
def in_relu(self, inputs):
return tf.nn.relu(self.instance_norm(inputs))
def in_lrelu(self, inputs):
return tf.nn.leaky_relu(self.instance_norm(inputs))
def build_model(self):
pass
@staticmethod
def tf_summary_image(name, img_tensor, img_size=cfg.crop_size):
v = tf.reshape(img_tensor[:4, :, :, :], [2, 2, img_size, img_size, 3])
v = tf.transpose(v, [0, 2, 1, 3, 4])
v = tf.reshape(v, [-1, 2 * img_size, 2 * img_size, 3])
tf.summary.image(name, v)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse
def setup_global_step(self):
"""Sets up the global step tensor."""
global_step = tf.Variable(initial_value=0, trainable=False, name="global_step",
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.global_step = global_step
def build_optimizer(self):
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
#optimizer = tf.train.MomentumOptimizer(lr,0.9)
#self.lr = optimizer._learning_rate
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
def build(self):
"""Creates all ops for training and evaluation."""
self.build_inputs()
self.build_model()
self.setup_global_step()
if self.mode != "inference":
self.build_loss()
self.build_optimizer()
| 6,054
| 38.575163
| 122
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc : deep model (16 residual blocks), spatial pyramid attention
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from template import net_base
class ModelShallowNew(net_base.ModelBase):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNew, self).__init__(mode)
def channel_attention_layer(self, name, x):
with tf.variable_scope(name):
in_channel = x.get_shape()[-1]
down_scale1 = self.bn_relu(self.conv2(tf.space_to_depth(x, 2, name="att_desubpixel_1"), in_channel, 1, 1, name="squeeze_1"))
down_scale1 = self.bn_relu(self.conv2(down_scale1, in_channel, 3, name="layer_1"))
down_scale2 = self.bn_relu(self.conv2(tf.space_to_depth(down_scale1, 2, name="att_desubpixel_2"), in_channel, 1, 1, name="squeeze_2"))
down_scale2 = self.bn_relu(self.conv2(down_scale2, in_channel, 3, name="layer_2"))
net = self.bn(self.conv2(down_scale2, in_channel, 3, 1, name="excitation_1"))
channel_feat = tf.nn.sigmoid(tf.reduce_mean(net, [1, 2], keepdims=True))
return tf.multiply(x, channel_feat)
def spatial_attention_layer(self, name, x):
with tf.variable_scope(name):
in_channel = x.get_shape()[-1]
down_scale1 = self.bn(self.conv2(tf.space_to_depth(x, 2, name="desubpixel_1"), in_channel, 1, 1, name="squeeze_1"))
net = self.bn_relu(self.conv2(tf.nn.relu(down_scale1), in_channel, 3, name="layer_1"))
down_scale2 = self.bn(self.conv2(tf.space_to_depth(net, 2, name="desubpixel_2"), in_channel, 1, 1, name="squeeze_2"))
net = self.bn_relu(self.conv2(tf.nn.relu(down_scale2), in_channel, 3, name="res_1"))
net = self.bn(self.conv2(net, in_channel, 3, name="res_2"))
up_scale1 = self.bn_relu(self.conv2(tf.depth_to_space(tf.nn.relu(tf.add(down_scale2, net)), 2, "subpixel_1"), in_channel, 1, 1, name="excitation_1"))
net = self.bn(self.conv2(up_scale1, in_channel, 3, name="layer_2"))
up_scale2 = self.bn_relu(self.conv2(tf.depth_to_space(tf.nn.relu(tf.add(down_scale1, net)), 2, "subpixel_2"), in_channel, 1, 1, name="excitation_2"))
net = self.bn(self.conv2(up_scale2, in_channel, 3, name="layer_3"))
spatial_feat = tf.nn.sigmoid(net)
return tf.add(tf.multiply(x, spatial_feat), net)
def build_model(self):
with tf.variable_scope("derain"):
net = tf.space_to_depth(self.syn_img, 2, name="desubpixel_1")
net_1 = self.bn(self.conv2(net, 16, 3, name="layer1"))
net = tf.nn.relu(net_1)
net = tf.space_to_depth(net, 2, name="desubpixel_2")
net_2 = self.bn(self.conv2(net, 64, 3, name="layer2"))
net = tf.nn.relu(net_2)
for i in range(8):
res = net
net = self.bn_relu(self.conv2(net, 64, 3, 1, name='res_{}_a'.format(i)))
net = self.bn(self.conv2(net, 64, 3, 1, name='res_{}_b'.format(i)))
if i <7:
net = tf.nn.relu(tf.add(net, res )) # skip-connect
else:
net = tf.add(net, res)
net = self.bn(self.conv2(tf.add(net_2, net), 64, 3, name="layer3"))
net = tf.depth_to_space(net, 2, "pixel_shuffle_1")
net = self.bn(self.conv2(tf.add(net_1, net), 16, 3, name="layer4"))
net = self.conv2(net, 12, 3, name="layer5")
net = tf.depth_to_space(net, 2, "pixel_shuffle_2")
bg_hat = tf.add(self.syn_img, net)
self.output = tf.clip_by_value(bg_hat, 0.0, 255.0, name="output") # BReLU
| 3,896
| 49.61039
| 161
|
py
|
MENET
|
MENET-master/heavy/template/menet_deep_new_ca.py
|
# -*- coding: utf-8 -*-
# @File : light/net_shallow_new_ca.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/13
# @Desc : deep model (16 residual blocks), spatial pyramid attention
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from template import net_base
class ModelDeepNewCa(net_base.ModelBase):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelDeepNewCa, self).__init__(mode)
def channel_attention_layer(self, name, x):
with tf.variable_scope(name):
in_channel = x.get_shape()[-1]
down_scale1 = self.bn_relu(self.conv2(tf.space_to_depth(x, 2, name="att_desubpixel_1"), in_channel, 1, 1, name="squeeze_1"))
down_scale1 = self.bn_relu(self.conv2(down_scale1, in_channel, 3, name="layer_1"))
down_scale2 = self.bn_relu(self.conv2(tf.space_to_depth(down_scale1, 2, name="att_desubpixel_2"), in_channel, 1, 1, name="squeeze_2"))
down_scale2 = self.bn_relu(self.conv2(down_scale2, in_channel, 3, name="layer_2"))
net = self.bn(self.conv2(down_scale2, in_channel, 3, 1, name="excitation_1"))
channel_feat = tf.nn.sigmoid(tf.reduce_mean(net, [1, 2], keepdims=True))
return tf.add(x, tf.multiply(x, channel_feat))
def build_model(self):
with tf.variable_scope("derain"):
net = tf.space_to_depth(self.syn_img, 2, name="desubpixel_1")
net_1 = self.bn(self.conv2(net, 16, 3, name="layer1"))
net = tf.nn.relu(net_1)
net = tf.space_to_depth(net, 2, name="desubpixel_2")
net_2 = self.bn(self.conv2(net, 64, 3, name="layer2"))
net = tf.nn.relu(net_2)
net = self.channel_attention_layer("ca", net)
for i in range(16):
res = net
net = self.bn_relu(self.conv2(net, 64, 3, 1, name='res_{}_a'.format(i)))
net = self.bn(self.conv2(net, 64, 3, 1, name='res_{}_b'.format(i)))
if i <15:
net = tf.nn.relu(tf.add(net, res)) # skip-connect
else:
net = tf.add(net, res)
net = self.bn(self.conv2(tf.add(net_2, net), 64, 3, name="layer3"))
net = tf.depth_to_space(net, 2, "pixel_shuffle_1")
net = self.bn(self.conv2(tf.add(net_1, net), 16, 3, name="layer4"))
net = self.conv2(net, 12, 3, name="layer5")
net = tf.depth_to_space(net, 2, "pixel_shuffle_2")
bg_hat = tf.add(self.syn_img, net)
self.output = tf.clip_by_value(bg_hat, 0.0, 255.0, name="output") # BReLU
| 2,718
| 42.15873
| 146
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_ca_edge_gram_lossbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_spa_edge_lossbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new_ca
class ModelShallowNewCaEdgeGramLossBalance(menet_shallow_new_ca.ModelShallowNewCa):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewCaEdgeGramLossBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# def texture_matching_loss(self, labels, predictions):
# labels_reshape = tf.reshape(tf.extract_image_patches(labels,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# predictions_reshape = tf.reshape(tf.extract_image_patches(predictions,[1,5,5,1],[1,1,1,1],[1,1,1,1], "SAME"), [cfg.batch_size, -1, 75])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
w_1 = tf.stop_gradient(1. - self.mse / self.total_loss)
w_2 = tf.stop_gradient(1. - self.content_loss / self.total_loss)
w_3 = tf.stop_gradient(1. - self.edge_loss / self.total_loss)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 4,902
| 53.477778
| 145
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_edge_fixed.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_shallow_edge_fixed.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/15
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
class ModelShallowNewEdgeFixed(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeFixed, self).__init__(mode)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.content_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + 1e-2 * self.content_loss
| 1,244
| 36.727273
| 96
|
py
|
MENET
|
MENET-master/heavy/template/menet_shallow_new_edge_gram_lossbalance.py
|
# -*- coding: utf-8 -*-
# @File : derain_feqe_tf/net_deep_color_lossbalance.py
# @Info : @ TSMC-SIGGRAPH, 2019/11/26
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import tensorflow as tf
from configuration import cfg
from template import menet_shallow_new
# from vgg19 import Vgg19
class ModelShallowNewEdgeGramLossBalance(menet_shallow_new.ModelShallowNew):
def __init__(self, mode):
"""
:param mode: one of strings "train", "eval", "inference"
"""
super(ModelShallowNewEdgeGramLossBalance, self).__init__(mode)
self.edge_loss = None
# def texture_matching_loss(self):
# perceptron = Vgg19(cfg.vgg_dir)
# perceptron.build(tf.concat([self.bg_img, self.output], axis=0))
# labels_reshape = tf.reshape(perceptron.pool1[:cfg.batch_size], [cfg.batch_size, -1, 64])
# predictions_reshape = tf.reshape(perceptron.pool1[cfg.batch_size:], [cfg.batch_size, -1, 64])
# gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
# gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
# gram_labels = tf.reduce_mean(gram_labels, [1,2])
# gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# # texture_matching_loss
# return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
def texture_matching_loss(self, labels, predictions):
labels_reshape = tf.reshape(tf.space_to_depth(labels, 4), [cfg.batch_size, -1, 48])
predictions_reshape = tf.reshape(tf.space_to_depth(predictions, 4), [cfg.batch_size, -1, 48])
gram_labels = tf.matmul(tf.transpose(labels_reshape, [0, 2, 1]), labels_reshape)
gram_predictions = tf.matmul(tf.transpose(predictions_reshape, [0, 2, 1]), predictions_reshape)
gram_labels = tf.reduce_mean(gram_labels, [1,2])
gram_predictions = tf.reduce_mean(gram_predictions, [1,2])
# texture_matching_loss
return tf.losses.mean_squared_error(labels=gram_labels, predictions=gram_predictions)
# loss_layer
def build_loss(self):
# Compute losses.
self.mse = tf.losses.mean_squared_error(labels=self.bg_img, predictions=self.output)
edge_feat = tf.image.sobel_edges(tf.concat([self.bg_img, self.output], axis=0))
self.edge_loss = tf.losses.mean_squared_error(labels=edge_feat[:cfg.batch_size],
predictions=edge_feat[cfg.batch_size:])
# self.content_loss = self.texture_matching_loss()
self.content_loss = self.texture_matching_loss(labels=self.bg_img, predictions=self.output)
self.ssim = tf.reduce_mean(tf.image.ssim(self.bg_img, self.output, max_val=255.0))
self.psnr = tf.reduce_mean(tf.image.psnr(self.bg_img, self.output, max_val=255.0))
self.total_loss = self.mse + self.content_loss + self.edge_loss
def build_optimizer(self):
# the loss ratio for task i at time t
w_1 = tf.stop_gradient(1. - self.mse / self.total_loss)
w_2 = tf.stop_gradient(1. - self.content_loss / self.total_loss)
w_3 = tf.stop_gradient(1. - self.edge_loss / self.total_loss)
self.total_loss = w_1 * self.mse + w_2 * self.content_loss + w_3 * self.edge_loss
# note: cfg.num_examples_per_epoch now is `None`
lr = tf.train.exponential_decay(cfg.lr,
self.global_step,
cfg.num_examples_per_epoch // cfg.batch_size * cfg.decay_epochs,
cfg.decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
self.lr = optimizer._lr
# note: you must use the control dependency to update the BN parameters.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.total_loss, self.global_step)
| 4,115
| 49.814815
| 105
|
py
|
MENET
|
MENET-master/heavy/utils/inference_wrapper.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/inference_wrapper.py
# @Info : @ TSMC-SIGGRAPH, 2018/7/12
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
from net import Model
from utils import inference_wrapper_base
class InferenceWrapper(inference_wrapper_base.InferenceWrapperBase):
"""Model wrapper class for performing inference with a ShowAndTellModel."""
def __init__(self):
super(InferenceWrapper, self).__init__()
self.nickname = self.build_model().nickname
def build_model(self):
model = Model(mode="inference")
model.build()
return model
# def inference_step(self, sess, input_feed, img_size_feed):
# b_output, r_output = sess.run(
# fetches=["derain/bg_hat:0", "derain/r_hat:0"],
# feed_dict={
# "image_feed:0": input_feed,
# "img_size_feed:0": img_size_feed,
# })
# return b_output, r_output
def inference_step(self, sess, input_feed):
output = sess.run(
fetches="derain/output:0",
feed_dict={
"image_feed:0": input_feed
})
return output
| 1,225
| 29.65
| 81
|
py
|
MENET
|
MENET-master/heavy/utils/inference_wrapper_base.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/inference_wrapper_base.py
# @Info : @ TSMC-SIGGRAPH, 2018/7/12
# @Desc : refer to google's im2txt
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os.path
import tensorflow as tf
# pylint: disable=unused-argument
class InferenceWrapperBase(object):
"""Base wrapper class for performing inference with an image-to-text model."""
def __init__(self):
pass
def build_model(self):
"""Builds the model for inference.
Args:
model_config: Object containing configuration for building the model.
Returns:
model: The model object.
"""
tf.logging.fatal("Please implement build_model in subclass")
def _create_restore_fn(self, checkpoint_path, saver):
"""Creates a function that restores a model from checkpoint.
Args:
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
saver: Saver for restoring variables from the checkpoint file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
Raises:
ValueError: If checkpoint_path does not refer to a checkpoint file or a
directory containing a checkpoint file.
"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_path:
raise ValueError("No checkpoint file found in: %s" % checkpoint_path)
def _restore_fn(sess):
tf.logging.info("Loading model from checkpoint: %s", checkpoint_path)
saver.restore(sess, checkpoint_path)
tf.logging.info("Successfully loaded checkpoint: %s",
os.path.basename(checkpoint_path))
print("Successfully loaded checkpoint: ", os.path.basename(checkpoint_path))
return _restore_fn
def build_graph_from_config(self, checkpoint_path):
"""Builds the inference graph from a configuration object.
Args:
model_config: Object containing configuration for building the model.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
"""
tf.logging.info("Building model.")
# self.build_model() # move to inference_warpper.__init__ for get class name
saver = tf.train.Saver()
return self._create_restore_fn(checkpoint_path, saver)
# def inference_step(self, sess, input_feed, img_size_feed):
# """Runs one step of inference.
# Args:
# sess: TensorFlow Session object.
# input_feed: A numpy array of shape [batch_size].
# img_size_feed: A list of image height and width
# Returns:
# rain_layer: A numpy array of shape [N,H,W,C].
# background_layer: A numpy array of shape [N,H,W,C].
#
# """
# tf.logging.fatal("Please implement inference_step in subclass")
def inference_step(self, sess, input_feed):
"""Runs one step of inference.
Args:
sess: TensorFlow Session object.
input_feed: A numpy array of shape [batch_size].
Returns:
rain_layer: A numpy array of shape [N,H,W,C].
background_layer: A numpy array of shape [N,H,W,C].
"""
tf.logging.fatal("Please implement inference_step in subclass")
| 3,683
| 37.375
| 88
|
py
|
MENET
|
MENET-master/heavy/utils/__init__.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/__init__.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
| 202
| 24.375
| 81
|
py
|
MENET
|
MENET-master/heavy/utils/transforms.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/transforms.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc : @ sumihui : refer to pytorch
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import numpy as np
from PIL import Image
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>> transforms.Compose([
>> transforms.FiveCrop(10),
>> lambda crops: np.stack([transforms.ToArray(crop) for crop in crops])
>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class FiveCrop(object):
"""Crop the given PIL Image into four corners and the central crop
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
horizontal_flip (bool): Whether use horizontal flipping or not
Example:
>> transform = Compose([
>> FiveCrop(size), # this is a list of PIL Images
>> lambda crops: np.stack([transforms.ToArray(crop) for crop in crops]) # returns a 4D ndarray
>> ])
>> #In your test loop you can do the following:
>> input, target = batch # input is a 5d tensor, target is 2d
>> bs, ncrops, h, w, c = input.size()
>> result = model(input.reshape(-1, h, w, c)) # fuse batch size and ncrops
"""
def __init__(self, size, horizontal_flip=False):
self.size = size
if isinstance(size, int):
self.size = (size, size)
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
self.horizontal_flip = horizontal_flip
def __call__(self, img):
"""
:param img: (PIL Image). Image to be cropped.
:return: return five_crop(img)
"""
if not isinstance(img, Image.Image):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
crops = self.five_crop(img)
if self.horizontal_flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
crops = crops + self.five_crop(img)
return crops
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
def five_crop(self, img):
"""Crop the given PIL Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center) corresponding top left,
top right, bottom left, bottom right and center crop.
"""
w, h = img.size
crop_h, crop_w = self.size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(self.size,
(h, w)))
tl = img.crop((0, 0, crop_w, crop_h))
tr = img.crop((w - crop_w, 0, w, crop_h))
bl = img.crop((0, h - crop_h, crop_w, h))
br = img.crop((w - crop_w, h - crop_h, w, h))
center = self.center_crop(img)
return (tl, tr, bl, br, center)
def center_crop(self, img):
"""
:param img:
:return: PIL Image: Cropped image.
"""
w, h = img.size
th, tw = self.size # Height/Width of the cropped image.
i = int(round((h - th) / 2.)) # Upper pixel coordinate.
j = int(round((w - tw) / 2.)) # Left pixel coordinate.
return img.crop((j, i, j + tw, i + th))
class ToArray(object):
"""Convert a ``PIL Image`` to ``numpy.ndarray``.
Converts a PIL Image (H x W x C) in the range
[0, 255] to a numpy.ndarray of shape (H x W x C) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image): Image to be converted to numpy.ndarray.
Returns:
numpy.ndarray: Converted image.
"""
return np.asarray(pic, "uint8") # note: 2019/05/29 uint8
def __repr__(self):
return self.__class__.__name__ + '()'
| 5,204
| 33.932886
| 106
|
py
|
dMod
|
dMod-master/PEtabTests/0007/0007.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 7
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_b'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [10, 10],
MEASUREMENT: [0.2, 0.8]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_b'],
OBSERVABLE_FORMULA: ['A', 'B'],
OBSERVABLE_TRANSFORMATION: [LIN, LOG10],
NOISE_FORMULA: [0.5, 0.6]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2'],
PARAMETER_SCALE: [LIN] * 4,
LOWER_BOUND: [0] * 4,
UPPER_BOUND: [10] * 4,
NOMINAL_VALUE: [1, 0, 0.8, 0.6],
ESTIMATE: [1] * 4,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [
analytical_a(10, 1, 0, 0.8, 0.6),
analytical_b(10, 1, 0, 0.8, 0.6),
]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,785
| 24.15493
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0009/0009.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 9
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['preeq_c0', 'c0'],
'k1': [0.3, 0.8],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
PREEQUILIBRATION_CONDITION_ID: ['preeq_c0', 'preeq_c0'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [1, 10],
MEASUREMENT: [0.7, 0.1]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k2'],
PARAMETER_SCALE: [LIN] * 3,
LOWER_BOUND: [0] * 3,
UPPER_BOUND: [10] * 3,
NOMINAL_VALUE: [1, 0, 0.6],
ESTIMATE: [1] * 3,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
# simulate for far time point as steady state
steady_state_a = analytical_a(1000, 1, 0, 0.3, 0.6)
steady_state_b = analytical_b(1000, 1, 0, 0.3, 0.6)
# use steady state as initial state
simulation_df[SIMULATION] = [
analytical_a(t, steady_state_a, steady_state_b, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 2,008
| 25.786667
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0016/0016.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 16
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_b'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [10, 10],
MEASUREMENT: [0.2, 0.8]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_b'],
OBSERVABLE_FORMULA: ['A', 'B'],
OBSERVABLE_TRANSFORMATION: [LIN, LOG],
NOISE_FORMULA: [0.5, 0.7]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2'],
PARAMETER_SCALE: [LIN] * 4,
LOWER_BOUND: [0] * 4,
UPPER_BOUND: [10] * 4,
NOMINAL_VALUE: [1, 0, 0.8, 0.6],
ESTIMATE: [1] * 4,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [
analytical_a(10, 1, 0, 0.8, 0.6),
analytical_b(10, 1, 0, 0.8, 0.6),
]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,784
| 24.140845
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0008/0008.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 8
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0', 'c0'],
TIME: [0, 10, 10],
MEASUREMENT: [0.7, 0.1, 0.2]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2'],
PARAMETER_SCALE: [LIN] * 4,
LOWER_BOUND: [0] * 4,
UPPER_BOUND: [10] * 4,
NOMINAL_VALUE: [1, 0, 0.8, 0.6],
ESTIMATE: [1] * 4,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [analytical_a(t, 1, 0, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,756
| 24.838235
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0015/0015.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 15
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1],
NOISE_PARAMETERS: ['noise', 'noise']
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: ['noiseParameter1_obs_a']
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2', 'noise'],
PARAMETER_SCALE: [LIN] * 5,
LOWER_BOUND: [0] * 5,
UPPER_BOUND: [10] * 5,
NOMINAL_VALUE: [1, 0, 0.8, 0.6, 5],
ESTIMATE: [1] * 5,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [analytical_a(t, 1, 0, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,807
| 25.202899
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0014/0014.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 14
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1],
NOISE_PARAMETERS: ['0.5;2', '0.5;2']
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: ['noiseParameter1_obs_a + noiseParameter2_obs_a']
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2'],
PARAMETER_SCALE: [LIN] * 4,
LOWER_BOUND: [0] * 4,
UPPER_BOUND: [10] * 4,
NOMINAL_VALUE: [1, 0, 0.8, 0.6],
ESTIMATE: [1] * 4,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [analytical_a(t, 1, 0, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,819
| 25.376812
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0005/0005.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 5
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0', 'c1'],
'offset_A': ['offset_A_c0', 'offset_A_c1'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c1'],
TIME: [10, 10],
MEASUREMENT: [2.1, 3.2]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A + offset_A'],
NOISE_FORMULA: [1]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2', 'offset_A_c0', 'offset_A_c1'],
PARAMETER_SCALE: [LIN] * 6,
LOWER_BOUND: [0] * 6,
UPPER_BOUND: [10] * 6,
NOMINAL_VALUE: [1, 0, 0.8, 0.6, 2, 3],
ESTIMATE: [1] * 6,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df],
sbml_files=['conversion_modified.xml'])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [analytical_a(10, 1, 0, 0.8, 0.6) + offset
for offset in [2, 3]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,888
| 25.985714
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0010/0010.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 10
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['preeq_c0', 'c0'],
'k1': [0.3, 0.8],
'B': [0, 0],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
PREEQUILIBRATION_CONDITION_ID: ['preeq_c0', 'preeq_c0'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [1, 10],
MEASUREMENT: [0.7, 0.1]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['k2'],
PARAMETER_SCALE: [LIN],
LOWER_BOUND: [0],
UPPER_BOUND: [10],
NOMINAL_VALUE: [0.6],
ESTIMATE: [1],
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df],
sbml_files=['conversion_modified.xml'])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
# simulate for far time point as steady state
steady_state_a = analytical_a(1000, 1, 0, 0.3, 0.6)
# use steady state as initial state
simulation_df[SIMULATION] = [
analytical_a(t, steady_state_a, 0, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,981
| 25.078947
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0004/0004.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 4
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1],
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['scaling_A * A + offset_A'],
NOISE_FORMULA: [1]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2', 'scaling_A', 'offset_A'],
PARAMETER_SCALE: [LIN] * 6,
LOWER_BOUND: [0] * 6,
UPPER_BOUND: [10] * 6,
NOMINAL_VALUE: [1, 0, 0.8, 0.6, 0.5, 2],
ESTIMATE: [1] * 6,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [0.5 * analytical_a(t, 1, 0, 0.8, 0.6) + 2
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,797
| 25.441176
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0012/0012.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 12
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
'compartment': [3],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['k1', 'k2'],
PARAMETER_SCALE: [LIN] * 2,
LOWER_BOUND: [0] * 2,
UPPER_BOUND: [10] * 2,
NOMINAL_VALUE: [0.8, 0.6],
ESTIMATE: [1] * 2,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
# in the model, concentrations are used, which do not depend on the
# compartment size, so that the species values should stay the same
simulation_df[SIMULATION] = [analytical_a(t, 1, 1, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,876
| 25.43662
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0011/0011.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 11
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
'B': [2]
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['k1', 'k2'],
PARAMETER_SCALE: [LIN] * 2,
LOWER_BOUND: [0] * 2,
UPPER_BOUND: [10] * 2,
NOMINAL_VALUE: [0.8, 0.6],
ESTIMATE: [1] * 2,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df],
sbml_files=['conversion_modified.xml'])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [analytical_a(t, 1, 2, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,782
| 24.471429
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0002/0002.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 2
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0', 'c1'],
'a0': [0.8, 0.9]
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'] * 4,
SIMULATION_CONDITION_ID: ['c0', 'c0', 'c1', 'c1'],
TIME: [0, 10, 0, 10],
MEASUREMENT: [0.7, 0.1, 0.8, 0.2]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [1]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['b0', 'k1', 'k2'],
PARAMETER_SCALE: [LIN] * 3,
LOWER_BOUND: [0] * 3,
UPPER_BOUND: [10] * 3,
NOMINAL_VALUE: [0, 0.8, 0.6],
ESTIMATE: [1] * 3,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [*[analytical_a(t, 0.8, 0, 0.8, 0.6)
for t in [0, 10]],
*[analytical_a(t, 0.9, 0, 0.8, 0.6)
for t in [0, 10]]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,882
| 25.521127
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0001/0001.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 1
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2'],
PARAMETER_SCALE: [LIN] * 4,
LOWER_BOUND: [0] * 4,
UPPER_BOUND: [10] * 4,
NOMINAL_VALUE: [1, 0, 0.8, 0.6],
ESTIMATE: [1] * 4,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [analytical_a(t, 1, 0, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,732
| 24.485294
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0003/0003.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 3
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1],
OBSERVABLE_PARAMETERS: ['0.5;2', '0.5;2']
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['observableParameter1_obs_a * A + '
'observableParameter2_obs_a'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2'],
PARAMETER_SCALE: [LIN] * 4,
LOWER_BOUND: [0] * 4,
UPPER_BOUND: [10] * 4,
NOMINAL_VALUE: [1, 0, 0.8, 0.6],
ESTIMATE: [1] * 4,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [0.5 * analytical_a(t, 1, 0, 0.8, 0.6) + 2
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,875
| 25.8
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0006/0006.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 6
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1],
OBSERVABLE_PARAMETERS: [10, 15]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['observableParameter1_obs_a * A'],
NOISE_FORMULA: [1]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['a0', 'b0', 'k1', 'k2'],
PARAMETER_SCALE: [LIN] * 4,
LOWER_BOUND: [0] * 4,
UPPER_BOUND: [10] * 4,
NOMINAL_VALUE: [1, 0, 0.8, 0.6],
ESTIMATE: [1] * 4,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [10 * analytical_a(0, 1, 0, 0.8, 0.6),
15 * analytical_a(10, 1, 0, 0.8, 0.6)]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,811
| 25.26087
| 78
|
py
|
dMod
|
dMod-master/PEtabTests/0013/0013.py
|
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 13
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
'B': ['par'],
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['k1', 'k2', 'par'],
PARAMETER_SCALE: [LIN] * 3,
LOWER_BOUND: [0] * 3,
UPPER_BOUND: [10] * 3,
NOMINAL_VALUE: [0.8, 0.6, 7],
ESTIMATE: [1] * 3,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df],
sbml_files=['conversion_modified.xml'])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
# in the model, concentrations are used, which do not depend on the
# compartment size, so that the species values should stay the same
simulation_df[SIMULATION] = [analytical_a(t, 1, 7, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 1,934
| 25.875
| 78
|
py
|
dMod
|
dMod-master/inst/code/readData.py
|
# Author: Benjamin Merkt, Physikalisches Institut, Universitaet Freiburg
import csv
import sys
import sympy as spy
#from sympy.parsing.sympy_parser import parse_expr
from sympy.parsing.sympy_tokenize import tokenize
# try/except necessary for R interface (imports automatically and does not find other files)
try:
from functions import *
except:
pass
def readModel(fileName, delimT):
if delimT == 't':
delim = '\t'
else:
delim = delimT
variables = []
parameters = []
flows = []
stoichiometry = []
global l; l = -1
with open(fileName, 'rb') as defFile:
reader = csv.reader(defFile, delimiter=delim, quoting=csv.QUOTE_NONE)
row = reader.next()
for i in range(2,len(row)):
row[i] = row[i].replace('"','')
variables.append(giveVar(row[i]))
lines = 0
stoichiometryList = []
for row in reader:
row[1] = row[1].replace('"','')
row[1] = row[1].replace('^','**')
flows.append(row[1])
lines += 1
for i in range(2,len(row)):
if row[i] == '': num = 0
else:
row[i] = row[i].replace('"','')
if row[i] == '':
row[i] = 0
num = int(row[i])
stoichiometryList.append(num)
stoichiometryT = spy.Matrix(lines,len(variables),stoichiometryList)
stoichiometry = stoichiometryT.transpose()
def read():
global l
l += 1
if l >= len(flows): raise StopIteration
else: return flows[l]
def useToken(key, value, Coord1, Coord2, fullLine):
if key == 1:
parameters.append(giveVar(value))
tokenize(read,useToken)#get parameters from flows
parameters = sorted(list(set(parameters)), key=spy.default_sort_key)
for entry in variables:
if entry in parameters:
parameters.remove(entry)
for f in range(len(flows)):
flows[f] = giveParsed(flows[f])
return variables, parameters, spy.Matrix(len(flows),1,flows), stoichiometry
def readEquations(equationSource):
if isinstance(equationSource, basestring):
eq_file = open(equationSource,'r')
def read():
line = eq_file.readline()
line = line.replace('"','').replace(',','')
return line
else:
global l
l = 0
def read():
global l
if l == len(equationSource): raise StopIteration
line = equationSource[l]
line = line.replace('"','').replace(',','').strip()
l += 1
return line + '\n'
global newLine; newLine = True
global variables; variables = []
global functions; functions = []
global parameters; parameters = []
def useToken(key, value, Coord1, Coord2, fullLine):
global newLine, variables, obsFunctions, parameters
if key == 1: #1: NAME 2: NUMBER 51: OP 4: NEWLINE 0: ENDMARKER
if newLine == True:
variables.append(giveVar(value))
functions.append(giveParsed(fullLine[(fullLine.find('=')+1):len(fullLine)]))
else:
parameters.append(giveVar(value))
newLine = False
elif key == 4:
newLine = True
tokenize(read,useToken)
parameters = sorted(list(set(parameters)), key=spy.default_sort_key)
for entry in variables:
if entry in parameters:
parameters.remove(entry)
return variables, functions, parameters
def readObservation(observation_path, variables, parameters):
observables, obsFunctions, obsParameters = readEquations(observation_path)
#remove dynamic parameters and variables from observation Parameters
for var in variables:
if var in obsParameters:
obsParameters.remove(var)
for par in parameters:
if par in obsParameters:
obsParameters.remove(par)
return observables, obsFunctions, parameters+obsParameters
def readInitialValues(initial_path, variables, parameters):
initVars, initFunctions, initParameters = readEquations(initial_path)
o = len(initVars)
m = len(variables)
#remove variables and other parameters for initParameters
i = 0
while i < len(initParameters):
if initParameters[i] in variables+parameters:
initParameters.pop(i)
else:
i += 1
#if variabel not restricted, introduce inital value parameter
for i in range(o):
if initVars[i] == initFunctions[i]:
initFunctions[i] = giveVar(str(initVars[i])+'_0')
initParameters.append(initFunctions[i])
#subsitute dependence of other variables
substituted = True
counter = 0
while substituted:
substituted = False
for k in range(o):
for j in range(o):
if initFunctions[k].has(initVars[j]):
initFunctions[k] = initFunctions[k].subs(initVars[j],initFunctions[j])
substituted = True
counter += 1
if counter > 100:
raise(UserWarning('There seems to be an infinite recursion in the initial value functions'))
#order varaibels according to equations
initFunctionsOrdered = [0]*m
for i in range(m):
try: initFunctionsOrdered[i] = initFunctions[initVars.index(variables[i])]
except ValueError: #if not contained introduce new unconstrained parameter
initFunctionsOrdered[i] = giveVar(str(variables[i])+'_0')
initParameters.append(initFunctionsOrdered[i])
return initFunctionsOrdered, parameters+initParameters
def readPredictions(prediction_path, variables, parameters,):
predictions, predFunctions, predParameters = readEquations(prediction_path)
i = 0
while i < len(predParameters):
if predParameters[i] in variables+parameters:
predParameters.pop(i)
else:
i += 1
if len(predParameters) != 0:
raise(UserWarning('Error: New parameters occured in predictions: ' + str(predParameters)))
return predictions, predFunctions
| 5,411
| 25.271845
| 95
|
py
|
dMod
|
dMod-master/inst/code/AlyssaPetit_ver1.1.py
|
# AlyssaPetit version 1.1
# Use with python 3.x
import numpy
import sympy
from sympy import Matrix, simplify, expand, solve
from numpy import shape, zeros, concatenate
from numpy.linalg import matrix_rank
from sympy.parsing.sympy_parser import parse_expr
from sympy.matrices import *
from sympy.matrices import matrix_multiply_elementwise
import csv
import random
from random import shuffle
def LCS(s1, s2):
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def SolveSymbLES(A,b):
dim=shape(A)[0]
Asave=A[:]
Asave=Matrix(dim, dim, Asave)
#printmatrix(Asave)
#print(b)
determinant=Asave.det()
if(determinant==0):
#print('Determinant of LCL-calculation is zero! Try to specify LCLs yourself!')
return([])
result=[]
for i in range(dim):
A=Matrix(dim,dim,Asave)
A.col_del(i)
A=A.col_insert(i,b)
result.append(simplify(A.det()/determinant))
return(result)
def CutStringListatSymbol(liste, symbol):
out=[]
for el in liste:
if(symbol in el):
add=el.split(symbol)
else:
add=[el]
out=out+add
return(out)
def FillwithRanNum(M):
dimx=len(M.row(0))
dimy=len(M.col(0))
ranM=zeros(dimy, dimx)
parlist=[]
ranlist=[]
for i in M[:]:
if(i!=0):
if(str(i)[0]=='-'):
parlist.append(str(i)[1:])
else:
parlist.append(str(i))
parlist=list(set(parlist))
for symbol in [' - ', ' + ', '*', '/', '(',')']:
parlist=CutStringListatSymbol(parlist,symbol)
parlist=list(set(parlist))
temp=[]
for i in parlist:
if(i!=''):
if(not is_number(i)):
temp.append(i)
ranlist.append(random.random())
parlist=temp
for i in range(dimy):
for j in range(dimx):
ranM[i,j]=M[i,j]
if(ranM[i,j]!=0):
for p in range(len(parlist)):
ranM[i,j]=ranM[i,j].subs(parse_expr(parlist[p]),ranlist[p])
return(ranM)
def FindLinDep(M, tol=1e-12):
ranM=FillwithRanNum(M)
Q,R=numpy.linalg.qr(ranM)
for i in range(shape(R)[0]):
for j in range(shape(R)[1]):
if(abs(R[i,j]) < tol):
R[i,j]=0.0
LinDepList=[]
for i in range(shape(R)[0]):
if(R[i][i]==0):
LinDepList.append(i)
return(LinDepList)
def FindLCL(M, X):
LCL=[]
LinDepList=FindLinDep(M)
i=0
counter=0
deleted_rows=[]
states=Matrix(X[:])
while(LinDepList!=[]):
i=LinDepList[0]
testM=FillwithRanNum(M)
rowliste=list(numpy.nonzero(testM[:,i])[0])
colliste=[i]
for z in range(i):
for k in rowliste:
for j in range(i):
jliste=list(numpy.nonzero(testM[:,j])[0])
if(k in jliste):
rowliste=rowliste+jliste
colliste=colliste+[j]
rowliste=list(set(rowliste))
colliste=list(set(colliste))
rowliste.sort()
colliste.sort()
colliste.pop()
rowlisteTry=rowliste[0:(len(colliste))]
vec=SolveSymbLES(M[rowlisteTry,colliste],M[rowlisteTry,i])
shufflecounter=0
while(vec==[] and shufflecounter < 100):
shuffle(rowliste)
shufflecounter=shufflecounter+1
rowlisteTry=rowliste[0:(len(colliste))]
vec=SolveSymbLES(M[rowlisteTry,colliste],M[rowlisteTry,i])
if(shufflecounter==100):
print('Problems while finding conserved quantities!',flush=True)
return(0,0)
counter=counter+1
try:
mat=[states[l] for l in colliste]
test=parse_expr('0')
for v in range(0,len(vec)):
test=test-parse_expr(str(vec[v]))*parse_expr(str(mat[v]))
except:
return([],0)
partStr=str(test)+' + '+str(states[i])
partStr=partStr.split(' + ')
partStr2=[]
for index in range(len(partStr)):
partStr2=partStr2+partStr[index].split('-')
partStr=partStr2
if(len(partStr) > 1):
CLString=LCS(str(partStr[0]),str(partStr[1]))
for ps in range(2,len(partStr)):
CLString=LCS(CLString,str(partStr[ps]))
else:
CLString=str(partStr[0])
if(CLString==''):
CLString=str(counter)
LCL.append(str(test)+' + '+str(states[i])+' = '+'total'+CLString)
M.col_del(i)
states.row_del(i)
deleted_rows.append(i+counter-1)
LinDepList=FindLinDep(M)
return(LCL, deleted_rows)
def printmatrix(M):
lengths=[]
for i in range(len(M.row(0))):
lengths.append(0)
for j in range(len(M.col(0))):
lengths[i]=max(lengths[i],len(str(M.col(i)[j])))
string=''.ljust(5)
string2=''.ljust(5)
for j in range(len(M.row(0))):
string=string+(str(j)).ljust(lengths[j]+2)
for k in range(lengths[j]+2):
string2=string2+('-')
print(string)
print(string2)
for i in range(len(M.col(0))):
string=str(i).ljust(4) + '['
for j in range(len(M.row(0))):
if(j==len(M.row(0))-1):
string=string+str(M.row(i)[j]).ljust(lengths[j])
else:
string=string+(str(M.row(i)[j])+', ').ljust(lengths[j]+2)
print(string+']',flush=True)
return()
def printgraph(G):
for el in G:
print(el+': '+str(G[el]),flush==True)
return()
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def checkNegRows(M):
NegRows=[]
if((M==Matrix(0,0,[])) | (M==Matrix(0,1,[])) | (M==Matrix(1,0,[]))):
return(NegRows)
else:
for i in range(len(M.col(0))):
foundPos=False
for j in range(len(M.row(i))):
if(M[i,j]>0):
foundPos=True
if(foundPos==False):
NegRows.append(i)
return(NegRows)
def checkPosRows(M):
PosRows=[]
if((M==Matrix(0,0,[])) | (M==Matrix(0,1,[])) | (M==Matrix(1,0,[]))):
return(PosRows)
else:
for i in range(len(M.col(0))):
foundNeg=False
for j in range(len(M.row(i))):
if(M[i,j]<0):
foundNeg=True
if(foundNeg==False):
PosRows.append(i)
return(PosRows)
def DetermineGraphStructure(SM, F, X, neglect):
graph={}
for i in range(len(SM*F)):
liste=[]
for j in range(len(X)):
if((SM*F)[i]!=((SM*F)[i]).subs(X[j],1)):
if(j==i):
In=((SM*F)[i]).subs(X[j],0)
Out=simplify(((SM*F)[i]-In)/X[j])
if(Out!=Out.subs(X[j],1)):
liste.append(str(X[j]))
else:
liste.append(str(X[j]))
else:
if(j==i):
liste.append(str(X[j]))
graph[str(X[i])]=liste
#print(graph)
for el in neglect:
if(parse_expr(el) in X):
if not el in graph:
graph[el]=[el]
else:
if(el not in graph[el]):
graph[el].append(el)
return(graph)
def FindCycle(graph, X):
for el in X:
cycle=find_cycle(graph, str(el), str(el), path=[])
if(cycle!=None):
return(cycle)
return(None)
def find_cycle(graph, start, end, path=[]):
path = path + [start]
if not start in graph:
return None
if ((start == end) & (path!=[start])):
return path
for node in graph[start]:
if node==end:
return (path+[end])
if node not in path:
#print(node)
newpath = find_cycle(graph, node, end, path)
if newpath:
return newpath
return None
def GetBestPair(cycle, SM, fluxpars, X, LCLs, neglect):
for state in cycle:
for LCL in LCLs:
ls=parse_expr(LCL.split(' = ')[0])
if(ls.subs(parse_expr(state),1)!=ls):
return(0, state, None, False)
dimList=[]
signList=[]
for state in cycle:
dim, sign = GetDimension(state, X, SM, True)
signList.append(sign)
dimList.append(dim)
#minOfDimList=min(dimList)
beststate=None
bestflux=None
besttype=-1
n2beat=1000
signChanged=False
min2beat=max(dimList)+1
for i in range(len(dimList)):
if(dimList[i] < min2beat):
min2beat=dimList[i]
sign=signList[i]
appearList=[]
#print(sign)
if(sign=="minus"):
fluxpars2use=GetNegFluxParameters(SM, fluxpars, X, cycle[i])
else:
fluxpars2use=GetPosFluxParameters(SM, fluxpars, X, cycle[i])
abort_flux=False
for fp in fluxpars2use:
if(str(fp) not in neglect):
appearList.append(GetAppearances(fp, fluxpars, SM))
else:
abort_flux=True
if(abort_flux):
##### Change sign
print("Sign changed!",flush=True)
signChanged=True
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
fluxpars2use=GetNegFluxParameters(SM, fluxpars, X, cycle[i])
else:
fluxpars2use=GetPosFluxParameters(SM, fluxpars, X, cycle[i])
abort_flux=False
for fp in fluxpars2use:
if(str(fp) not in neglect):
appearList.append(GetAppearances(fp, fluxpars, SM))
else:
abort_flux=True
if(sum(appearList) < n2beat and not abort_flux):
n2beat=sum(appearList)
beststate=cycle[i]
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
bestflux=GetNegFluxParameters(SM, fluxpars, X, cycle[i])[0]
else:
bestflux=GetPosFluxParameters(SM, fluxpars, X, cycle[i])[0]
if(min2beat==1 and max(appearList)==1):
besttype=1
else:
if(max(appearList)==1 and min2beat>1):
besttype=2
else:
besttype=3
return(besttype, beststate, bestflux, signChanged)
def GetNegFluxParameters(SM, fluxpars, X, node):
row=list(X).index(parse_expr(node))
liste=[]
for i in range(len(SM.row(row))):
if(SM.row(row)[i]<0):
liste.append(fluxpars[i])
return(liste)
def GetPosFluxParameters(SM, fluxpars, X, node):
row=list(X).index(parse_expr(node))
liste=[]
for i in range(len(SM.row(row))):
if(SM.row(row)[i]>0):
liste.append(fluxpars[i])
return(liste)
def GetType(node, fp, fluxpars, LCLs):
for LCL in LCLs:
ls=parse_expr(LCL.split(' = ')[0])
if(ls.subs(parse_expr(node),1)!=ls):
return(0)
if(GetAppearances(fp, fluxpars)==1):
if(GetDimension(node)==1):
return(1)
else:
return(2)
else:
return(3)
def GetAppearances(fp, fluxpars, SM):
anz=0
cols = [i for i, x in enumerate(fluxpars) if x == fp]
#col=list(fluxpars).index(fp)
for i in cols:
for j in range(len(SM.col(i))):
if(SM.col(i)[j]!=0):
anz=anz+1
return(anz)
def GetDimension(node, X, SM, getSign=False):
row=list(X).index(parse_expr(node))
anzminus=0
anzappearminus=0
for i in range(len(SM.row(row))):
if(SM.row(row)[i]<0):
anzappearminus=anzappearminus+CountNZE(SM.col(i))
anzminus=anzminus+1
anzplus=0
anzappearplus=0
for i in range(len(SM.row(row))):
if(SM.row(row)[i]>0):
anzappearplus=anzappearplus+CountNZE(SM.col(i))
anzplus=anzplus+1
if(not getSign):
return(min(anzminus, anzplus))
else:
if(anzminus<anzplus or (anzminus==anzplus and anzappearminus<anzappearplus)):
return(anzminus, "minus")
else:
return(anzplus, "plus")
def GetOutfluxes(node, X, SM, F, fluxpars):
row=list(X).index(parse_expr(node))
outsum=0
out=[]
fps=[]
for i in range(len(SM.row(row))):
if(SM.row(row)[i]<0):
outsum=outsum-SM.row(row)[i]*F[i]
out.append(-SM.row(row)[i]*F[i])
fps.append(fluxpars[i])
return(out, outsum, fps)
def GetInfluxes(node, X, SM, F, fluxpars):
row=list(X).index(parse_expr(node))
outsum=0
out=[]
fps=[]
for i in range(len(SM.row(row))):
if(SM.row(row)[i]>0):
outsum=outsum+SM.row(row)[i]*F[i]
out.append(SM.row(row)[i]*F[i])
fps.append(fluxpars[i])
return(out, outsum, fps)
def FindNodeToSolve(graph):
for el in graph:
if(graph[el]==[]):
return(el)
return(None)
def CountNZE(V):
counter=0
for v in V:
if(v!=0):
counter=counter+1
return(counter)
def Sparsify(M, level, sparseIter):
oldM=M.copy()
if(level==3):
ncol=len(M.row(0))
print('0 columns of '+str(ncol) +' done',flush=True)
for i in range(ncol):
icol=M.col(i)
tobeat=CountNZE(M.col(i))
for j in range(ncol):
if(i<j):
for factor_j in [1,2,-1,-2,0]:
for k in range(ncol):
if(i<k and j<k):
for factor_k in [1,2,-1,-2,0]:
for l in range(ncol):
if(i<l and j<l and k<l):
for factor_l in [1,2,-1,-2,0]:
test=icol+factor_j*M.col(j)+factor_k*M.col(k)+factor_l*M.col(l)
if(tobeat > CountNZE(test)):
Mtest=M.copy()
Mtest.col_del(i)
Mtest=Mtest.col_insert(i,test)
if(CountNZE(test)!=0 and M.rank()==Mtest.rank()):
M=Mtest.copy()
tobeat=CountNZE(test)
#print(str(i)+'+'+str(factor_j)+'*'+str(j)+'+'+str(factor_k)+'*'+str(k)+'+'+str(factor_l)+'*'+str(l)+' '+str(tobeat))
print(str(i+1)+' columns of '+str(ncol) +' done',flush=True)
if(level==2):
ncol=len(M.row(0))
for i in range(ncol):
icol=M.col(i)
tobeat=CountNZE(M.col(i))
for j in range(ncol):
if(i<j):
for factor_j in [1,2,-1,-2,0]:
for k in range(ncol):
if(i<k and j<k):
for factor_k in [1,2,-1,-2,0]:
test=icol+factor_j*M.col(j)+factor_k*M.col(k)
if(tobeat > CountNZE(test)):
Mtest=M.copy()
Mtest.col_del(i)
Mtest=Mtest.col_insert(i,test)
if(CountNZE(test)!=0 and M.rank()==Mtest.rank()):
M=Mtest.copy()
tobeat=CountNZE(test)
#print(str(i)+'+'+str(factor_j)+'*'+str(j)+'+'+str(factor_k)+'*'+str(k))
#sys.stdout.write('\rdone %d' %i)
#sys.stdout.flush()
#print('\r'+str(i+1)+' columns of '+str(ncol) +' done\r')
if(level==1):
ncol=len(M.row(0))
for i in range(ncol):
icol=M.col(i)
tobeat=CountNZE(M.col(i))
for j in range(ncol):
if(i<j):
for factor_j in [1,2,-1,-2,0]:
test=icol+factor_j*M.col(j)
if(tobeat > CountNZE(test)):
Mtest=M.copy()
Mtest.col_del(i)
Mtest=Mtest.col_insert(i,test)
if(CountNZE(test)!=0 and M.rank()==Mtest.rank()):
M=Mtest.copy()
tobeat=CountNZE(test)
if(oldM!=M and sparseIter<10):
oldM=M.copy()
print("Sparsify with level", level,", Iteration ",sparseIter, " of maximal 10",flush=True)
return(Sparsify(M,level, sparseIter=sparseIter+1))
else:
return(M)
def Alyssa(filename,
injections=[],
givenCQs=[],
neglect=[],
sparsifyLevel = 2,
outputFormat='R',
testSteady='T'):
filename=str(filename)
file=csv.reader(open(filename), delimiter=',')
print('Reading csv-file ...',flush=True)
L=[]
nrrow=0
nrcol=0
for row in file:
nrrow=nrrow+1
nrcol=len(row)
L.append(row)
nrspecies=nrcol-2
##### Remove injections
counter=0
for i in range(1,len(L)):
if(L[i-counter][1] in injections):
L.remove(L[i-counter])
counter=counter+1
##### Define flux vector F
F=[]
for i in range(1,len(L)):
F.append(L[i][1])
#print(F)
F[i-1]=F[i-1].replace('^','**')
F[i-1]=parse_expr(F[i-1])
for inj in injections:
F[i-1]=F[i-1].subs(parse_expr(inj),0)
F=Matrix(F)
#print(F)
##### Define state vector X
X=[]
X=L[0][2:]
for i in range(len(X)):
X[i]=parse_expr(X[i])
X=Matrix(X)
#print(X)
Xo=X.copy()
##### Define stoichiometry matrix SM
SM=[]
for i in range(len(L)-1):
SM.append(L[i+1][2:])
for i in range(len(SM)):
for j in range(len(SM[0])):
if (SM[i][j]==''):
SM[i][j]='0'
SM[i][j]=parse_expr(SM[i][j])
SM=Matrix(SM)
SM=SM.T
SMorig=SM.copy()
##### Check for zero fluxes
icounter=0
jcounter=0
for i in range(len(F)):
if(F[i-icounter]==0):
F.row_del(i-icounter)
for j in range(len(SM.col(i-icounter))):
if(SM[j-jcounter,i-icounter]!=0):
#UsedRC.append(X[j-jcounter])
X.row_del(j-jcounter)
SM.row_del(j-jcounter)
SMorig.row_del(j-jcounter)
jcounter=jcounter+1
SM.col_del(i-icounter)
SMorig.col_del(i-icounter)
icounter=icounter+1
print('Removed '+str(icounter)+' fluxes that are a priori zero!',flush=True)
nrspecies=nrspecies-icounter
#printmatrix(SM)
#print(F)
#print(X)
#print(UsedRC)
#####Check if some species are zero and remove them from the system
zeroStates=[]
NegRows=checkNegRows(SM)
PosRows=checkPosRows(SM)
#print(PosRows)
#print(NegRows)
while((NegRows!=[]) | (PosRows!=[])):
#print(PosRows)
#print(NegRows)
if(NegRows!=[]):
row=NegRows[0]
zeroStates.append(X[row])
counter=0
for i in range(len(F)):
if(F[i-counter].subs(X[row],1)!=F[i-counter] and F[i-counter].subs(X[row],0)==0):
F.row_del(i-counter)
SM.col_del(i-counter)
counter=counter+1
else:
if(F[i-counter].subs(X[row],1)!=F[i-counter] and F[i-counter].subs(X[row],0)!=0):
F[i-counter]=F[i-counter].subs(X[row],0)
X.row_del(row)
SM.row_del(row)
else:
row=PosRows[0]
zeroFluxes=[]
for j in range(len(SM.row(row))):
if(SM.row(row)[j]!=0):
zeroFluxes.append(F[j])
for k in zeroFluxes:
StateinFlux=[]
for state in X:
if(k.subs(state,1)!=k):
StateinFlux.append(state)
if(len(StateinFlux)==1):
zeroStates.append(StateinFlux[0])
row=list(X).index(StateinFlux[0])
counter=0
for i in range(len(F)):
if(F[i-counter].subs(X[row],1)!=F[i-counter]):
if(F[i-counter].subs(X[row],0)==0):
F.row_del(i-counter)
SM.col_del(i-counter)
else:
F[i-counter]=F[i-counter].subs(X[row],0)
counter=counter+1
#printmatrix(SM)
NegRows=checkNegRows(SM)
PosRows=checkPosRows(SM)
#printmatrix(SM)
#print(F)
#print(X)
nrspecies=nrspecies-len(zeroStates)
if(nrspecies==0):
print('All states are zero!',flush=True)
return(0)
else:
if(zeroStates==[]):
print('No states found that are a priori zero!',flush=True)
else:
print('These states are zero:',flush=True)
for state in zeroStates:
print('\t'+str(state),flush=True)
nrspecies=nrspecies+len(zeroStates)
##### Identify linearities, bilinearities and multilinearities
Xsquared=[]
for i in range(len(X)):
Xsquared.append(X[i]*X[i])
Xsquared=Matrix(Xsquared)
BLList=[]
MLList=[]
for i in range(len(SM*F)):
LHS=str(expand((SM*F)[i]))
LHS=LHS.replace(' ','')
LHS=LHS.replace('-','+')
LHS=LHS.replace('**2','tothepowerof2')
LHS=LHS.replace('**3','tothepowerof3')
exprList=LHS.split('+')
for expr in exprList:
VarList=expr.split('*')
counter=0
factors=[]
for j in range(len(X)):
anz=0
if(str(X[j]) in VarList):
anz=1
factors.append(X[j])
if((str(X[j])+'tothepowerof2') in VarList):
anz=2
factors.append(X[j])
factors.append(X[j])
if((str(X[j])+'tothepowerof3') in VarList):
anz=3
factors.append(X[j])
factors.append(X[j])
factors.append(X[j])
counter=counter+anz
if(counter==2):
string=''
for l in range(len(factors)):
if(l==len(factors)-1):
string=string+str(factors[l])
else:
string=string+str(factors[l])+'*'
if(not(string in BLList)):
BLList.append(string)
if(counter>2):
string=''
for l in range(len(factors)):
if(l==len(factors)-1):
string=string+str(factors[l])
else:
string=string+str(factors[l])+'*'
if(not(string in MLList)):
MLList.append(string)
COPlusLIPlusBL=[]
for i in range(len(SM*F)):
COPlusLIPlusBL.append((SM*F)[i])
for j in range(len(MLList)):
ToSubs=expand((SM*F)[i]).coeff(MLList[j])
COPlusLIPlusBL[i]=expand(COPlusLIPlusBL[i]-ToSubs*parse_expr(MLList[j]))
COPlusLI=[]
for i in range(len(COPlusLIPlusBL)):
COPlusLI.append(COPlusLIPlusBL[i])
for j in range(len(BLList)):
ToSubs=expand((COPlusLIPlusBL)[i]).coeff(BLList[j])
COPlusLI[i]=expand(COPlusLI[i]-ToSubs*parse_expr(BLList[j]))
##### C*X contains linear terms
C=zeros(len(COPlusLI),len(X))
for i in range(len(COPlusLI)):
for j in range(len(X)):
C[i*len(X)+j]=expand((COPlusLI)[i]).coeff(X[j])
##### ML contains multilinearities
ML=expand(Matrix(SM*F)-Matrix(COPlusLIPlusBL))
##### BL contains bilinearities
BL=expand(Matrix(COPlusLIPlusBL)-Matrix(COPlusLI))
#### CM is coefficient matrix of linearities
CM=C
#####CMBL gives coefficient matrix of bilinearities
CMBL=[]
if(BLList!=[]):
for i in range(len(BLList)):
CVBL=[]
for k in range(len(BL)):
CVBL.append(BL[k].coeff(BLList[i]))
CMBL.append(CVBL)
else:
CVBL=[]
for k in range(len(BL)):
CVBL.append(0)
CMBL.append(CVBL)
CMBL=Matrix(CMBL).T
#####CMML gives coefficient matrix of multilinearities
#####Summarize multilinearities and bilinearities
if(MLList!=[]):
CMML=[]
for i in range(len(MLList)):
CVML=[]
for k in range(len(ML)):
CVML.append(expand(ML[k]).coeff(MLList[i]))
CMML.append(CVML)
CMML=Matrix(CMML).T
BLList=BLList+MLList
CMBL=Matrix(concatenate((CMBL,CMML),axis=1))
for i in range(len(BLList)):
BLList[i]=parse_expr(BLList[i])
if(BLList!=[]):
CMbig=Matrix(concatenate((CM,CMBL),axis=1))
else:
CMbig=Matrix(CM)
#### Save ODE equations for testing solutions at the end
print('Rank of SM is '+str(SM.rank()) + '!',flush=True)
SMorig=SM.copy()
ODE=SMorig*F
#### Get Flux Parameters
fluxpars=[]
for flux in F:
if(flux.args!=()):
foundFluxpar=False
for el in flux.args:
if(not foundFluxpar and el not in X and not is_number(str(el))):
if(flux.subs(el, 0)==0):
fluxpars.append(el)
foundFluxpar=True
else:
fluxpars.append(flux)
##### Increase Sparsity of stoichiometry matrix SM
print('Sparsify stoichiometry matrix with sparsify-level '+str(sparsifyLevel)+'!',flush=True)
newSM=(Sparsify(SM.T, level=sparsifyLevel, sparseIter=1)).T
if(newSM!=SM):
print("Sparsified!",flush=True)
SM=newSM
#### Find conserved quantities
#printmatrix(CMbig)
#print(X)
if(givenCQs==[]):
print('\nFinding conserved quantities ...',flush=True)
LCLs, rowsToDel=FindLCL(CMbig.transpose(), X)
else:
print('\nI took the given conserved quantities!',flush=True)
LCLs=givenCQs
if(LCLs!=[]):
print(LCLs,flush=True)
else:
print('System has no conserved quantities!',flush=True)
#### Define graph structure
print('\nDefine graph structure ...\n',flush=True)
SSgraph=DetermineGraphStructure(SM, F, X, neglect)
#printgraph(SSgraph)
#print(fluxpars)
#### Check for Cycles
cycle=FindCycle(SSgraph, X)
#### Remove cycles step by step
gesnew=0
eqOut=[]
while(cycle!=None):
print('Removing cycle '+str(counter),flush=True)
#printmatrix(SM)
#print(F)
minType, state2Rem, fp2Rem, signChanged = GetBestPair(cycle, SM, fluxpars, X, LCLs, neglect)
#print(cycle,flush=True)
#print(state2Rem)
#print(fp2Rem)
#print(minType)
if(minType==-1):
print(" The cycle",flush=True)
print(" "+str(cycle),flush=True)
print(" cannot be removed. Set more parameters free or enable steady-state expressions with minus signs. The latter is not yet provided by the tool.",flush=True)
return(0)
if(minType==0):
for LCL in LCLs:
ls=parse_expr(LCL.split(' = ')[0])
if(ls.subs(parse_expr(state2Rem),1)!=ls):
LCL2Rem=LCL
LCLs.remove(LCL2Rem)
index=list(X).index(parse_expr(state2Rem))
eqOut.append(state2Rem+' = '+state2Rem)
print(' '+str(state2Rem)+' --> '+'Done by CQ',flush=True)
if(minType==1):
index=list(X).index(parse_expr(state2Rem))
eq=(SM*F)[index]
sol=solve(eq, fp2Rem, simplify=False)[0]
eqOut.append(str(fp2Rem)+' = '+str(sol))
print(' '+str(state2Rem)+' --> '+str(fp2Rem),flush=True)
if(minType==2):
anz, sign=GetDimension(state2Rem, X, SM, getSign=True)
index=list(X).index(parse_expr(state2Rem))
negs, sumnegs, negfps=GetOutfluxes(state2Rem, X, SM, F, fluxpars)
poss, sumposs, posfps=GetInfluxes(state2Rem, X, SM, F, fluxpars)
if(anz==1):
print("Error in Type Determination. Please report this bug!",flush=True)
return(0)
else:
nenner=1
for j in range(anz):
if(j>0):
nenner=nenner+parse_expr('r_'+state2Rem+'_'+str(j))
trafoList=[]
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
for j in range(len(negs)):
flux=negs[j]
fp=negfps[j]
prefactor=flux/fp
if(j==0):
trafoList.append(str(fp)+' = ('+str(sumposs)+')*1/('+str(nenner)+')*1/('+str(prefactor)+')')
else:
gesnew=gesnew+1
trafoList.append(str(fp)+' = ('+str(sumposs)+')*'+'r_'+state2Rem+'_'+str(j)+'/('+str(nenner)+')*1/('+str(prefactor)+')')
print(' '+str(state2Rem)+' --> '+str(negfps),flush=True)
else:
for j in range(len(poss)):
flux=poss[j]
fp=posfps[j]
prefactor=flux/fp
if(j==0):
trafoList.append(str(fp)+' = ('+str(sumnegs)+')*1/('+str(nenner)+')*1/('+str(prefactor)+')')
else:
gesnew=gesnew+1
trafoList.append(str(fp)+' = ('+str(sumnegs)+')*'+'r_'+state2Rem+'_'+str(j)+'/('+str(nenner)+')*1/('+str(prefactor)+')')
print(' '+str(state2Rem)+' --> '+str(posfps),flush=True)
for eq in trafoList:
eqOut.append(eq)
if(minType==3):
anz, sign=GetDimension(state2Rem, X, SM, getSign=True)
index=list(X).index(parse_expr(state2Rem))
negs, sumnegs, negfps=GetOutfluxes(state2Rem, X, SM, F, fluxpars)
poss, sumposs, posfps=GetInfluxes(state2Rem, X, SM, F, fluxpars)
if(anz==1):
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
fp2Rem=negfps[0]
flux=negs[0]
else:
fp2Rem=posfps[0]
flux=poss[0]
eq=(SM*F)[index]
sol=solve(eq, fp2Rem, simplify=False)[0]
eqOut.append(str(fp2Rem)+' = '+str(sol))
FsearchFlux = matrix_multiply_elementwise(abs(SM[index,:]),F.T)
colindex=list(FsearchFlux).index(flux)
for row2repl in range(len(SM.col(0))):
if(SM[row2repl,colindex]!=0 and row2repl!=index):
SM=SM.row_insert(row2repl,SM.row(row2repl)-(SM[row2repl,colindex]/SM[index,colindex])*SM.row(index))
SM.row_del(row2repl+1)
#print('HELP',flush=True)
else:
nenner=1
for j in range(anz):
if(j>0):
nenner=nenner+parse_expr('r_'+state2Rem+'_'+str(j))
trafoList=[]
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
for j in range(len(negs)):
flux=negs[j]
fp=negfps[j]
prefactor=flux/fp
if(j==0):
trafoList.append(str(fp)+' = ('+str(sumposs)+')*1/('+str(nenner)+')*1/('+str(prefactor)+')')
else:
gesnew=gesnew+1
trafoList.append(str(fp)+' = ('+str(sumposs)+')*'+'r_'+state2Rem+'_'+str(j)+'/('+str(nenner)+')*1/('+str(prefactor)+')')
FsearchFlux = matrix_multiply_elementwise(abs(SM[index,:]),F.T)
colindex=list(FsearchFlux).index(flux)
for k in range(len(posfps)):
SM=SM.col_insert(len(SM.row(0)),SM.col(colindex))
F=F.row_insert(len(F),Matrix(1,1,[poss[k]/nenner]))
fluxpars.append(posfps[k])
SM.col_del(colindex)
F.row_del(colindex)
fluxpars.__delitem__(colindex)
print(' '+str(state2Rem)+' --> '+str(negfps),flush=True)
else:
for j in range(len(poss)):
flux=poss[j]
fp=posfps[j]
prefactor=flux/fp
if(j==0):
trafoList.append(str(fp)+' = ('+str(sumnegs)+')*1/('+str(nenner)+')*1/('+str(prefactor)+')')
else:
gesnew=gesnew+1
trafoList.append(str(fp)+' = ('+str(sumnegs)+')*'+'r_'+state2Rem+'_'+str(j)+'/('+str(nenner)+')*1/('+str(prefactor)+')')
FsearchFlux = matrix_multiply_elementwise(abs(SM[index,:]),F.T)
colindex=list(FsearchFlux).index(flux)
for k in range(len(negfps)):
SM=SM.col_insert(len(SM.row(0)),SM.col(colindex))
F=F.row_insert(len(F),Matrix(1,1,[negs[k]/nenner]))
fluxpars.append(negfps[k])
SM.col_del(colindex)
F.row_del(colindex)
fluxpars.__delitem__(colindex)
print(' '+str(state2Rem)+' --> '+str(posfps),flush=True)
for eq in trafoList:
eqOut.append(eq)
X.row_del(index)
SM.row_del(index)
SSgraph=DetermineGraphStructure(SM, F, X, neglect)
#print(X)
#printgraph(SSgraph)
cycle=FindCycle(SSgraph, X)
counter=counter+1
print('There is no cycle in the system!\n',flush=True)
#### Solve remaining equations
eqOut.reverse()
print('Solving remaining equations ...\n',flush=True)
while(SSgraph!={}):
#print(SSgraph)
node=FindNodeToSolve(SSgraph)
#print(node)
index=list(X).index(parse_expr(node))
#print((SM*F)[index])
sol=solve((SM*F)[index],parse_expr(node), simplify=True)
#print(sol)
eqOut.insert(0,node+' = '+str(sol[0]))
for f in range(len(F)):
F[f]=F[f].subs(parse_expr(node), sol[0])
#print(node+' = '+str(sol[0]))
X.row_del(index)
SM.row_del(index)
SSgraph=DetermineGraphStructure(SM, F, X, neglect=[])
#### Test Solution
if(testSteady=='T'):
print('Testing Steady State...\n',flush=True)
NonSteady=False
#print(eqOut)
#print(ODE)
#print(SM*F)
for i in range(len(ODE)):
expr=parse_expr(str(ODE[i]))
for j in range(len(zeroStates)):
zeroState=zeroStates[j]
expr=expr.subs(zeroState, 0)
#print(len(eqOut))
for j in range(len(eqOut)):
ls, rs = eqOut[-(j+1)].split('=')
#print(ls)
ls=parse_expr(ls)
#print(rs)
rs=parse_expr(rs)
expr=expr.subs(ls, rs)
#print(simplify(expr))
expr=simplify(expr)
#print(expr)
if(expr!=0):
print(' Equation '+str(ODE[i]),flush=True)
print(' results:'+str(expr),flush=True)
NonSteady=True
if(NonSteady):
print('Solution is wrong!\n',flush=True)
else:
print('Solution is correct!\n',flush=True)
elif(testSteady=='F'):
print('Skipping the Testing of Steady State...\n',flush=True)
else:
print('Skipping the Testing of Steady State...\n',flush=True)
#### Print Equations
print('I obtained the following equations:\n',flush=True)
if(outputFormat=='M'):
for state in zeroStates:
print('\tinit_'+str(state)+' "0"'+'\n',flush=True)
eqOutReturn=[]
for i in range(len(eqOut)):
ls, rs = eqOut[i].split('=')
ls=parse_expr(ls)
rs=parse_expr(rs)
for j in range(i,len(eqOut)):
ls2, rs2 = eqOut[j].split('=')
rs2=parse_expr(rs2)
rs2=rs2.subs(ls,rs)
eqOut[j]=str(ls2)+'='+str(rs2)
for state in Xo:
ls=ls.subs(state, parse_expr('init_'+str(state)))
rs=rs.subs(state, parse_expr('init_'+str(state)))
eqOut[i]=str(ls)+' "'+str(rs)+'"'
for i in range(len(eqOut)):
eqOut[i]=eqOut[i].replace('**','^')
for eq in eqOut:
print('\t'+eq+'\n',flush=True)
eqOutReturn.append(eq)
else:
for state in zeroStates:
print('\t'+str(state)+' = 0'+'\n',flush=True)
eqOutReturn=[]
for eq in eqOut:
ls, rs = eq.split(' = ')
print('\t'+ls+' = "'+rs+'",'+'\n',flush=True)
eqOutReturn.append(ls+'='+rs)
print('Number of Species: '+str(nrspecies),flush=True)
print('Number of Equations: '+str(len(eqOut)+len(zeroStates)),flush=True)
print('Number of new introduced variables: '+str(gesnew),flush=True)
return(eqOutReturn)
| 39,483
| 35.491682
| 176
|
py
|
dMod
|
dMod-master/inst/code/functions_obs.py
|
from sympy import *
from sympy.parsing.sympy_parser import parse_expr
try:
import readline
readlineAvailable = True
except:
readlineAvailable = False
var('epsilon')
var('t')
#returns a matrix of base vectors of the null space given a matrix in rref
#the base vectors are in the columns of the matrix
def nullSpace(matrix, pivots):
m = matrix.cols
notPivots = []
solutions = zeros(m, m-len(pivots))
i, k, l = m-1, 0, matrix.rows-1
while i >= 0:
if i in pivots:
for h in range(len(notPivots)):
solutions[i,h] = - matrix[l,notPivots[h]]
l -= 1
else:
notPivots.append(i)
solutions[i,k] = 1
k += 1
i -= 1
return solutions
#from stoichiometry matrix, calculate conserved quantities
def conservedQuantities(stoichiometry):
stoiSpace, pivots = (stoichiometry.transpose()).rref()
stoiSpace = stoiSpace[0:len(pivots),:]#base vectors in rows
conservedBase = nullSpace(stoiSpace,pivots)#base vectors in columns
return conservedBase
#enables use of simplify in R
def simplifyWrapper(expr):
if type(expr) == type([]):
for e in range(len(expr)):
expr[e] = expr[e].replace('.','_6174823504_').replace('^','**')
#expr[e] = str(simplify(parse_expr(expr[e])))
expr[e] = str(together(parse_expr(expr[e])))
expr[e] = expr[e].replace('_6174823504_','.').replace('**','^')
return expr
else:
expr = expr.replace('.','_6174823504_').replace('^','**')
#expr = str(simplify(parse_expr(expr)))
expr = str(together(parse_expr(expr)))
expr = expr.replace('_6174823504_','.').replace('**','^')
return expr
| 1,562
| 23.421875
| 74
|
py
|
dMod
|
dMod-master/inst/code/functions.py
|
# Author: Benjamin Merkt, Physikalisches Institut, Universitaet Freiburg
import sys
import time
import numpy as np
import sympy as spy
from sympy.parsing.sympy_parser import parse_expr
# try/except necessary for R interface (imports automatically and does not find other files)
try:
from polyClass import *
except:
pass
# readline might not be available
try:
import readline
readlineAvailable = True
except:
readlineAvailable = False
extension_str = '_93502158393064762'
# wrapper on spy.var for renaming QCOSINE variables
def giveVar(expr):
if expr == 'epsilon':
#print "\n\n***Error: Transformation parameter 'epsilon' not allowed in any input***"
raise(UserWarning("Transformation parameter 'epsilon' not allowed in any input"))
for v in ['Q', 'C', 'O', 'S', 'I', 'N', 'E']:
expr = expr.replace(v, v + extension_str)
return spy.var(expr)
# wrapper on sympy.parsing.sympy_parser.parse_expr for renaming QCOSINE variables
def giveParsed(expr):
for v in ['Q', 'C', 'O', 'S', 'I', 'N', 'E']:
expr = expr.replace(v, v + extension_str)
return parse_expr(expr)
# recursive function to construct a multidimensional polynomial
# vars: variables, i: position in vars, p: degree left for other variables
# summand: current monom under construction, poly: full polynomial
# num: umber of coefficients, k: ansatz for which variable, rs: list of coefficiets
def giveDegree(vars, i, p, summand, poly, num, k, rs):
if i == len(vars)-1:
rs.append(giveVar('r_'+str(vars[k])+'_'+str(num)))
poly += rs[-1]*summand*vars[i]**p
return poly, num+1
else:
for j in range(p+1):
poly, num = giveDegree(vars, i+1, p-j, summand*vars[i]**j, poly, num, k, rs)
return poly, num
# make infinitesimal ansatz
def makeAnsatz(ansatz, allVariables, m, q, pMax, fixed):
n = len(allVariables)
if ansatz == 'uni':
#construct polynomial
rs = []
infis = []
for k in range(n):
infis.append(spy.sympify(0))
if allVariables[k] in fixed: continue #if in fixed, ansatz is 0
for p in range(pMax+1):
rs.append(giveVar('r_'+str(allVariables[k])+'_'+str(p)))
infis[-1] += rs[-1] * allVariables[k]**p
#calculate derivatives
diffInfis = [[0]*n]
for i in range(n):
diffInfis[0][i] = spy.diff(infis[i],allVariables[i])
elif ansatz == 'par':
rs = []
infis = []
for k in range(n):
infis.append(spy.sympify(0))
if allVariables[k] in fixed: continue #if in fixed, ansatz is 0
num = 0
for p in range(pMax+1): #for every degree for 0 to pMax
vari = allVariables[m+q:] #all parameters
if k < (m+q): #if ansatz is not for a
vari.append(allVariables[k])
kp = len(vari)-1
else:
kp = k-(m+q)
degree, num = giveDegree(vari, 0, p, 1, 0, num, kp, rs)
infis[-1] += degree
#calculate derivatives
diffInfis = [[0]*n]
for i in range(n):
diffInfis[0][i] = spy.diff(infis[i],allVariables[i])
elif ansatz == 'multi':
rs = []
infis = []
for k in range(n):
infis.append(spy.sympify(0))
if allVariables[k] in fixed: continue #if in fixed, ansatz is 0
num = 0
for p in range(pMax+1): #for every degree for 0 to pMax
if k < m: #if ansatz is for a dynamic variable
vari = allVariables[:m] + allVariables[m+q:]
kp = k
elif k < m+q: #if ansatz is for an input
vari = allVariables[:]
kp = k
else: #if ansatz is for a parameter
vari = allVariables[m+q:] #all parameters
kp = k-(m+q)
degree, num = giveDegree(vari, 0, p, 1, 0, num, kp, rs)
infis[-1] += degree
#calculate derivatives
diffInfis = [0]*n
for i in range(n):
diffInfis[i] = [0]*n
for i in range(n):
for j in range(n):
diffInfis[i][j] = spy.diff(infis[i],allVariables[j])
return infis, diffInfis, rs
def transformExprToPoly(diff, i, infis, queue, allVariables, rs):
if diff:
queue.put((Apoly(infis[i[0]][i[1]], allVariables, rs), diff, i))
else:
queue.put((Apoly(infis[i], allVariables, rs), diff, i))
def transformInfisToPoly(infis, diffInfis, allVariables, rs, nProc, ansatz):
if nProc > 1:
from multiprocessing import Queue, Process
else:
from multiprocessing import Queue
n = len(allVariables)
k = len(diffInfis)
ns = 0
queue = Queue()
### start the transformation for the first equations
while ns < min([n+k*n, nProc]):
if ns < n:
if nProc > 1: p = Process(target=transformExprToPoly, args=(False, ns, infis, queue, allVariables, rs))
else: transformExprToPoly(False, ns, infis, queue, allVariables, rs)
else:
if ansatz == 'multi': i = divmod(ns-n,n)
else: i = (0, ns-n)
if nProc > 1: p = Process(target=transformExprToPoly, args=(True, i, diffInfis, queue, allVariables, rs))
else: transformExprToPoly(True, i, diffInfis, queue, allVariables, rs)
if nProc > 1: p.start()
ns += 1
sys.stdout.write("\rPreparing equations...0%")
sys.stdout.flush()
### wait till a process has finished and start the transformation for a new equation
infisPoly = [0]*n
diffInfisPoly = [0]*k
for i in range(k):
diffInfisPoly[i] = [0]*n
finished = 0
while ns < n+k*n:
#if mp:
poly, diff, i = queue.get()
if diff: diffInfisPoly[i[0]][i[1]] = poly
else: infisPoly[i] = poly
finished += 1
if ns < n:
if nProc > 1: p = Process(target=transformExprToPoly, args=(False, ns, infis, queue, allVariables, rs))
else: transformExprToPoly(False, ns, infis, queue, allVariables, rs)
else:
if ansatz == 'multi': i = divmod(ns-n,n)
else: i = (0, ns-n)
if nProc > 1: p = Process(target=transformExprToPoly, args=(True, i, diffInfis, queue, allVariables, rs))
else: transformExprToPoly(True, i, diffInfis, queue, allVariables, rs)
if nProc > 1: p.start()
ns += 1
prog = int(float(finished)/(n+k*n)*100)
sys.stdout.write("\rPreparing equations...%d%%" %prog)
sys.stdout.flush()
### wait for all processes to finish
while finished < n+k*n:
poly, diff, i = queue.get()
if diff: diffInfisPoly[i[0]][i[1]] = poly
else: infisPoly[i] = poly
finished += 1
prog = int(float(finished)/(n+k*n)*100)
sys.stdout.write("\rPreparing equations...%d%%" %prog)
sys.stdout.flush()
return infisPoly, diffInfisPoly
### calculate rref from a upper triangular matrix
def getrref(rSystem):
pivots = []
pivotLines = []
i = -1
for j in xrange(rSystem.shape[1]):
if rSystem[j,j] == 0:
k = 1
while j-k > i:
if rSystem[j-k,j] != 0:
i = j-k
break
k += 1
else:
k = i-1
while k >= 0:
if rSystem[k,j] != 0 and (not k in pivotLines):
rSystem[[j,k],:] = rSystem[[k,j],:]
i = j
break
k -= 1
else: continue
else:
i = j
pivots.append(j)
pivotLines.append(i)
coeff = rSystem[i,j]
rSystem[i,:] = rSystem[i,:]/coeff
for k in xrange(i):
coeff = rSystem[k,j]
if coeff != 0:
rSystem[k,:] = rSystem[k,:] - coeff*rSystem[i,:]
return rSystem[pivotLines,:], pivots
### returns a matrix of base vectors of the null space given a matrix in rref
### the base vectors are the columns of the matrix
def nullSpace(matrix, pivots):
m = matrix.shape[1]
notPivots = []
solutions = np.zeros((m, m-len(pivots)))
i, k, l = m-1, 0, matrix.shape[0]-1
while i >= 0:
if i in pivots:
for h in range(len(notPivots)):
solutions[i,h] = - matrix[l,notPivots[h]]
l -= 1
else:
notPivots.append(i)
solutions[i,k] = 1
k += 1
i -= 1
return solutions
def checkForCommonFactor(infisTmp, allVariables, m):
spy.var('epsilon')
#extract all factors from first infinitesimal
for i in range(len(allVariables)):
if infisTmp[i] != 0:
fac = spy.factor(infisTmp[i])
if type(fac) == type(epsilon+1):
factors = [infisTmp[i]]
elif type(fac) == type(epsilon):
factors = [fac]
else:
factors = list(fac.args)
break
i = 0
while i < len(factors):
if factors[i].is_number:
factors.pop(i)
elif factors[i] in allVariables[:m]:
factors.pop(i)
elif type(factors[i]) == type(epsilon+1):
factors.pop(i)
elif type(factors[i]) == type(epsilon**2):
if type(factors[i].args[0]) != type(epsilon+1):
factors[i] = factors[i].args[0]
i += 1
else:
factors.pop(i)
else:
i += 1
#check which of the factors is in all other infinitesimals
for i in range(1,len(infisTmp)):
if infisTmp[i] == 0: continue
fac = spy.factor(infisTmp[i])
if type(fac) == type(epsilon+1):
factorsTmp = [fac]
elif type(fac) == type(epsilon):
factorsTmp = [fac]
else:
factorsTmp = list(fac.args)
j = 0
while j < len(factors):
k = 0
while k < len(factorsTmp):
if factorsTmp[k].is_number:
factorsTmp.pop(k)
elif factorsTmp[k] in allVariables[:m]:
factorsTmp.pop(k)
elif type(factorsTmp[k]) == type(epsilon+1):
factorsTmp.pop(k)
elif type(factorsTmp[k]) == type(epsilon**2):
if type(factorsTmp[k].args[0]) != type(epsilon+1):
factorsTmp[k] = factorsTmp[k].args[0]
k += 1
else:
factorsTmp.pop(k)
else:
k += 1
if factors[j] in factorsTmp:
j += 1
continue
else:
factors.pop(j)
if len(factors) != 0:
continue #if potential common factors are left, try next ifinitesimal
else:
break #otherwise treat next solution
if len(factors) == 0:
return False
else:
return True
### determine known transformations from infinitesimals
def buildTransformation(infis, allVariables):
n = len(allVariables)
spy.var('epsilon')
transformations = [0]*n
tType = [False]*6 #0: unknown, 1: scaling, 2: translation, 3: MM-like, 4: p>2, 5: generalized translation
for i in range(n):
if infis[i] == 0:
transformations[i] = allVariables[i]
else:
poly = spy.Poly(infis[i], allVariables).as_dict()
monomials = poly.keys()
coefs = poly.values()
if len(monomials) == 1:
p = None
for j in range(n):
if monomials[0][j] != 0:
if j == i and p == None: # p Symmetry
p = monomials[0][i]
elif p == None and monomials[0][j] == 1: #
p = -1-j
else:
transformations[i] = '-?-'
tType[0] = True
break
else:
if p == None: # translation
transformations[i] = allVariables[i] + epsilon*coefs[0]
tType[2] = True
elif p <= 0: #
transformations[i] = allVariables[i] + epsilon*coefs[0] * allVariables[-p-1]
tType[5] = True
elif p == 1: # scaling
transformations[i] = spy.exp(epsilon*coefs[0])*allVariables[i]
tType[1] = True
else: # p Symmetry
transformations[i] = spy.simplify(allVariables[i]/(1-(p-1)*epsilon*allVariables[i]**(p-1))**(spy.sympify(1)/(p-1)))
if p == 2: tType[3] = True
else: tType[4] = True
else:
transformations[i] = '-?-'
tType[0] = True
string = 'Type: '
if tType[0]: string += 'unknown, '
if tType[1]: string += 'scaling, '
if tType[2]: string += 'translation, '
if tType[3]: string += 'MM-like, '
if tType[4]: string += 'p>2, '
if tType[5]: string += 'gen. tanslation, '
string = string[0:(len(string)-2)]
return transformations, string
### print found transformations
def printTransformations(infisAll, allVariables):
n = len(infisAll[0])
length1 = 8
length2 = 13
length3 = 14
transformations = [0]*len(infisAll)
types = [0]*len(infisAll)
outputs = []
for l in range(len(infisAll)):
for i in range(n):
infisAll[l][i] = spy.nsimplify(infisAll[l][i])
transformations[l], types[l] = buildTransformation(infisAll[l], allVariables)
outputs.append([])
for i in range(n):
if infisAll[l][i] != 0:
# get stuff for output line
outputs[-1].append(\
[str(allVariables[i]), str(infisAll[l][i]), str(transformations[l][i])])
# remove string extension
for v in ['Q', 'C', 'O', 'S', 'I', 'N', 'E']:
outputs[-1][-1][0] = outputs[-1][-1][0].replace(v + extension_str, v)
outputs[-1][-1][1] = outputs[-1][-1][1].replace(v + extension_str, v)
outputs[-1][-1][2] = outputs[-1][-1][2].replace(v + extension_str, v)
# search for longest string
if len(outputs[-1][-1][0]) > length1:
length1 = len(outputs[-1][-1][0])
if len(outputs[-1][-1][1]) > length2:
length2 = len(outputs[-1][-1][1])
if len(outputs[-1][-1][2]) > length3:
length3 = len(outputs[-1][-1][2])
# print all stuff
print ('{0:'+str(length1)+'s} : ').format('variable') \
+ ('{0:'+str(length2)+'s} : ').format('infinitesimal')\
+ str('transformation')
for l in range(len(infisAll)):
print '-'*(length1+length2+length3+6)
print '#' + str(l+1) + ': ' + types[l]
for lst in outputs[l]:
print ('{0:'+str(length1)+'s} : ').format(lst[0]) \
+ ('{0:'+str(length2)+'s} : ').format(str(lst[1]))\
+ str(lst[2])
| 12,635
| 26.832599
| 121
|
py
|
dMod
|
dMod-master/inst/code/sbmlAmiciDmod.py
|
#!/usr/bin/env python3
#
# (c) INCOME Hackathon 2018, Bernried, Daniel^2
#
import sys
import numpy as np
import json
try:
import amici.sbml_import
except:
from amici import sbml_import
def symengineMatrixToNumpy(x, astype='float'):
return np.array(x).reshape(x.shape).astype(astype)
def getModelJSON(sbml_file_name):
importer = amici.sbml_import.SbmlImporter(sbml_file_name, check_validity=False)
observables = amici.sbml_import.assignmentRules2observables(importer.sbml,
filter_function=lambda variableId:
variableId.getId().startswith('observable_') and not
variableId.getId().endswith('_sigma'))
importer.processSBML()
# importer.computeModelEquations()
S = symengineMatrixToNumpy(importer.stoichiometricMatrix)
dataPy = {
'S': importer.stoichiometricMatrix.tolist(),
'v': [str(x) for x in importer.fluxVector],
'p': importer.parameterIndex,
'stateNames': symengineMatrixToNumpy(importer.symbols['species']['sym'], astype='str').tolist(),
'parameterNames': symengineMatrixToNumpy(importer.symbols['parameter']['sym'], astype='str').tolist(),
'x0': symengineMatrixToNumpy(importer.speciesInitial, astype='str').tolist(),
"observables": observables
}
data = json.dumps(dataPy)
return data
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: %s SBML-FILE-NAME [OUTFILE]' % __file__)
sys.exit(1)
sbml_file_name = sys.argv[1]
output = getModelJSON(sbml_file_name)
if len(sys.argv) > 2:
outfile = sys.argv[2]
with open(outfile, "w") as f:
f.write(output)
else:
print(output)
| 1,730
| 28.844828
| 110
|
py
|
dMod
|
dMod-master/inst/code/AlyssaPetit_ver1.0.py
|
# AlyssaPetit version 1.0
# Use with python 3.x
import numpy
import sympy
from sympy import Matrix, simplify, expand, solve
from numpy import shape, zeros, concatenate
from numpy.linalg import matrix_rank
from sympy.parsing.sympy_parser import parse_expr
from sympy.matrices import *
from sympy.matrices import matrix_multiply_elementwise
import csv
import random
from random import shuffle
def LCS(s1, s2):
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def SolveSymbLES(A,b):
dim=shape(A)[0]
Asave=A[:]
Asave=Matrix(dim, dim, Asave)
#printmatrix(Asave)
#print(b)
determinant=Asave.det()
if(determinant==0):
#print('Determinant of LCL-calculation is zero! Try to specify LCLs yourself!')
return([])
result=[]
for i in range(dim):
A=Matrix(dim,dim,Asave)
A.col_del(i)
A=A.col_insert(i,b)
result.append(simplify(A.det()/determinant))
return(result)
def CutStringListatSymbol(liste, symbol):
out=[]
for el in liste:
if(symbol in el):
add=el.split(symbol)
else:
add=[el]
out=out+add
return(out)
def FillwithRanNum(M):
dimx=len(M.row(0))
dimy=len(M.col(0))
ranM=zeros(dimy, dimx)
parlist=[]
ranlist=[]
for i in M[:]:
if(i!=0):
if(str(i)[0]=='-'):
parlist.append(str(i)[1:])
else:
parlist.append(str(i))
parlist=list(set(parlist))
for symbol in [' - ', ' + ', '*', '/', '(',')']:
parlist=CutStringListatSymbol(parlist,symbol)
parlist=list(set(parlist))
temp=[]
for i in parlist:
if(i!=''):
if(not is_number(i)):
temp.append(i)
ranlist.append(random.random())
parlist=temp
for i in range(dimy):
for j in range(dimx):
ranM[i,j]=M[i,j]
if(ranM[i,j]!=0):
for p in range(len(parlist)):
ranM[i,j]=ranM[i,j].subs(parse_expr(parlist[p]),ranlist[p])
return(ranM)
def FindLinDep(M, tol=1e-12):
ranM=FillwithRanNum(M)
Q,R=numpy.linalg.qr(ranM)
for i in range(shape(R)[0]):
for j in range(shape(R)[1]):
if(abs(R[i,j]) < tol):
R[i,j]=0.0
LinDepList=[]
for i in range(shape(R)[0]):
if(R[i][i]==0):
LinDepList.append(i)
return(LinDepList)
def FindLCL(M, X):
LCL=[]
LinDepList=FindLinDep(M)
i=0
counter=0
deleted_rows=[]
states=Matrix(X[:])
while(LinDepList!=[]):
i=LinDepList[0]
testM=FillwithRanNum(M)
rowliste=list(numpy.nonzero(testM[:,i])[0])
colliste=[i]
for z in range(i):
for k in rowliste:
for j in range(i):
jliste=list(numpy.nonzero(testM[:,j])[0])
if(k in jliste):
rowliste=rowliste+jliste
colliste=colliste+[j]
rowliste=list(set(rowliste))
colliste=list(set(colliste))
rowliste.sort()
colliste.sort()
colliste.pop()
rowlisteTry=rowliste[0:(len(colliste))]
vec=SolveSymbLES(M[rowlisteTry,colliste],M[rowlisteTry,i])
shufflecounter=0
while(vec==[] and shufflecounter < 100):
shuffle(rowliste)
shufflecounter=shufflecounter+1
rowlisteTry=rowliste[0:(len(colliste))]
vec=SolveSymbLES(M[rowlisteTry,colliste],M[rowlisteTry,i])
if(shufflecounter==100):
print('Problems while finding conserved quantities!')
return(0,0)
counter=counter+1
try:
mat=[states[l] for l in colliste]
test=parse_expr('0')
for v in range(0,len(vec)):
test=test-parse_expr(str(vec[v]))*parse_expr(str(mat[v]))
except:
return([],0)
partStr=str(test)+' + '+str(states[i])
partStr=partStr.split(' + ')
partStr2=[]
for index in range(len(partStr)):
partStr2=partStr2+partStr[index].split('-')
partStr=partStr2
if(len(partStr) > 1):
CLString=LCS(str(partStr[0]),str(partStr[1]))
for ps in range(2,len(partStr)):
CLString=LCS(CLString,str(partStr[ps]))
else:
CLString=str(partStr[0])
if(CLString==''):
CLString=str(counter)
LCL.append(str(test)+' + '+str(states[i])+' = '+'total'+CLString)
M.col_del(i)
states.row_del(i)
deleted_rows.append(i+counter-1)
LinDepList=FindLinDep(M)
return(LCL, deleted_rows)
def printmatrix(M):
lengths=[]
for i in range(len(M.row(0))):
lengths.append(0)
for j in range(len(M.col(0))):
lengths[i]=max(lengths[i],len(str(M.col(i)[j])))
string=''.ljust(5)
string2=''.ljust(5)
for j in range(len(M.row(0))):
string=string+(str(j)).ljust(lengths[j]+2)
for k in range(lengths[j]+2):
string2=string2+('-')
print(string)
print(string2)
for i in range(len(M.col(0))):
string=str(i).ljust(4) + '['
for j in range(len(M.row(0))):
if(j==len(M.row(0))-1):
string=string+str(M.row(i)[j]).ljust(lengths[j])
else:
string=string+(str(M.row(i)[j])+', ').ljust(lengths[j]+2)
print(string+']')
return()
def printgraph(G):
for el in G:
print(el+': '+str(G[el]))
return()
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def checkNegRows(M):
NegRows=[]
if((M==Matrix(0,0,[])) | (M==Matrix(0,1,[])) | (M==Matrix(1,0,[]))):
return(NegRows)
else:
for i in range(len(M.col(0))):
foundPos=False
for j in range(len(M.row(i))):
if(M[i,j]>0):
foundPos=True
if(foundPos==False):
NegRows.append(i)
return(NegRows)
def checkPosRows(M):
PosRows=[]
if((M==Matrix(0,0,[])) | (M==Matrix(0,1,[])) | (M==Matrix(1,0,[]))):
return(PosRows)
else:
for i in range(len(M.col(0))):
foundNeg=False
for j in range(len(M.row(i))):
if(M[i,j]<0):
foundNeg=True
if(foundNeg==False):
PosRows.append(i)
return(PosRows)
def DetermineGraphStructure(SM, F, X, neglect):
graph={}
for i in range(len(SM*F)):
liste=[]
for j in range(len(X)):
if((SM*F)[i]!=((SM*F)[i]).subs(X[j],1)):
if(j==i):
In=((SM*F)[i]).subs(X[j],0)
Out=simplify(((SM*F)[i]-In)/X[j])
if(Out!=Out.subs(X[j],1)):
liste.append(str(X[j]))
else:
liste.append(str(X[j]))
else:
if(j==i):
liste.append(str(X[j]))
graph[str(X[i])]=liste
#print(graph)
for el in neglect:
if(parse_expr(el) in X):
if not el in graph:
graph[el]=[el]
else:
if(el not in graph[el]):
graph[el].append(el)
return(graph)
def FindCycle(graph, X):
for el in X:
cycle=find_cycle(graph, str(el), str(el), path=[])
if(cycle!=None):
return(cycle)
return(None)
def find_cycle(graph, start, end, path=[]):
path = path + [start]
if not start in graph:
return None
if ((start == end) & (path!=[start])):
return path
for node in graph[start]:
if node==end: return (path+[end])
if node not in path:
#print(node)
newpath = find_cycle(graph, node, end, path)
if newpath: return newpath
return None
def GetBestPair(cycle, SM, fluxpars, X, LCLs, neglect):
for state in cycle:
for LCL in LCLs:
ls=parse_expr(LCL.split(' = ')[0])
if(ls.subs(parse_expr(state),1)!=ls):
return(0, state, None, False)
dimList=[]
signList=[]
for state in cycle:
dim, sign = GetDimension(state, X, SM, True)
signList.append(sign)
dimList.append(dim)
#minOfDimList=min(dimList)
beststate=None
bestflux=None
besttype=-1
n2beat=1000
signChanged=False
min2beat=max(dimList)+1
for i in range(len(dimList)):
if(dimList[i] < min2beat):
min2beat=dimList[i]
sign=signList[i]
appearList=[]
#print(sign)
if(sign=="minus"):
fluxpars2use=GetNegFluxParameters(SM, fluxpars, X, cycle[i])
else:
fluxpars2use=GetPosFluxParameters(SM, fluxpars, X, cycle[i])
abort_flux=False
for fp in fluxpars2use:
if(str(fp) not in neglect):
appearList.append(GetAppearances(fp, fluxpars, SM))
else:
abort_flux=True
if(abort_flux):
##### Change sign
print("Sign changed!")
signChanged=True
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
fluxpars2use=GetNegFluxParameters(SM, fluxpars, X, cycle[i])
else:
fluxpars2use=GetPosFluxParameters(SM, fluxpars, X, cycle[i])
abort_flux=False
for fp in fluxpars2use:
if(str(fp) not in neglect):
appearList.append(GetAppearances(fp, fluxpars, SM))
else:
abort_flux=True
if(sum(appearList) < n2beat and not abort_flux):
n2beat=sum(appearList)
beststate=cycle[i]
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
bestflux=GetNegFluxParameters(SM, fluxpars, X, cycle[i])[0]
else:
bestflux=GetPosFluxParameters(SM, fluxpars, X, cycle[i])[0]
if(min2beat==1 and max(appearList)==1):
besttype=1
else:
if(max(appearList)==1 and min2beat>1):
besttype=2
else:
besttype=3
return(besttype, beststate, bestflux, signChanged)
def GetNegFluxParameters(SM, fluxpars, X, node):
row=list(X).index(parse_expr(node))
liste=[]
for i in range(len(SM.row(row))):
if(SM.row(row)[i]<0):
liste.append(fluxpars[i])
return(liste)
def GetPosFluxParameters(SM, fluxpars, X, node):
row=list(X).index(parse_expr(node))
liste=[]
for i in range(len(SM.row(row))):
if(SM.row(row)[i]>0):
liste.append(fluxpars[i])
return(liste)
def GetType(node, fp, fluxpars, LCLs):
for LCL in LCLs:
ls=parse_expr(LCL.split(' = ')[0])
if(ls.subs(parse_expr(node),1)!=ls):
return(0)
if(GetAppearances(fp, fluxpars)==1):
if(GetDimension(node)==1):
return(1)
else:
return(2)
else:
return(3)
def GetAppearances(fp, fluxpars, SM):
anz=0
cols = [i for i, x in enumerate(fluxpars) if x == fp]
#col=list(fluxpars).index(fp)
for i in cols:
for j in range(len(SM.col(i))):
if(SM.col(i)[j]!=0):
anz=anz+1
return(anz)
def GetDimension(node, X, SM, getSign=False):
row=list(X).index(parse_expr(node))
anzminus=0
anzappearminus=0
for i in range(len(SM.row(row))):
if(SM.row(row)[i]<0):
anzappearminus=anzappearminus+CountNZE(SM.col(i))
anzminus=anzminus+1
anzplus=0
anzappearplus=0
for i in range(len(SM.row(row))):
if(SM.row(row)[i]>0):
anzappearplus=anzappearplus+CountNZE(SM.col(i))
anzplus=anzplus+1
if(not getSign):
return(min(anzminus, anzplus))
else:
if(anzminus<anzplus or (anzminus==anzplus and anzappearminus<anzappearplus)):
return(anzminus, "minus")
else:
return(anzplus, "plus")
def GetOutfluxes(node, X, SM, F, fluxpars):
row=list(X).index(parse_expr(node))
outsum=0
out=[]
fps=[]
for i in range(len(SM.row(row))):
if(SM.row(row)[i]<0):
outsum=outsum-SM.row(row)[i]*F[i]
out.append(-SM.row(row)[i]*F[i])
fps.append(fluxpars[i])
return(out, outsum, fps)
def GetInfluxes(node, X, SM, F, fluxpars):
row=list(X).index(parse_expr(node))
outsum=0
out=[]
fps=[]
for i in range(len(SM.row(row))):
if(SM.row(row)[i]>0):
outsum=outsum+SM.row(row)[i]*F[i]
out.append(SM.row(row)[i]*F[i])
fps.append(fluxpars[i])
return(out, outsum, fps)
def FindNodeToSolve(graph):
for el in graph:
if(graph[el]==[]):
return(el)
return(None)
def CountNZE(V):
counter=0
for v in V:
if(v!=0):
counter=counter+1
return(counter)
def Sparsify(M, level, sparseIter):
oldM=M.copy()
if(level==3):
ncol=len(M.row(0))
print('0 columns of '+str(ncol) +' done')
for i in range(ncol):
icol=M.col(i)
tobeat=CountNZE(M.col(i))
for j in range(ncol):
if(i<j):
for factor_j in [1,2,-1,-2,0]:
for k in range(ncol):
if(i<k and j<k):
for factor_k in [1,2,-1,-2,0]:
for l in range(ncol):
if(i<l and j<l and k<l):
for factor_l in [1,2,-1,-2,0]:
test=icol+factor_j*M.col(j)+factor_k*M.col(k)+factor_l*M.col(l)
if(tobeat > CountNZE(test)):
Mtest=M.copy()
Mtest.col_del(i)
Mtest=Mtest.col_insert(i,test)
if(CountNZE(test)!=0 and M.rank()==Mtest.rank()):
M=Mtest.copy()
tobeat=CountNZE(test)
#print(str(i)+'+'+str(factor_j)+'*'+str(j)+'+'+str(factor_k)+'*'+str(k)+'+'+str(factor_l)+'*'+str(l)+' '+str(tobeat))
print(str(i+1)+' columns of '+str(ncol) +' done')
if(level==2):
ncol=len(M.row(0))
for i in range(ncol):
icol=M.col(i)
tobeat=CountNZE(M.col(i))
for j in range(ncol):
if(i<j):
for factor_j in [1,2,-1,-2,0]:
for k in range(ncol):
if(i<k and j<k):
for factor_k in [1,2,-1,-2,0]:
test=icol+factor_j*M.col(j)+factor_k*M.col(k)
if(tobeat > CountNZE(test)):
Mtest=M.copy()
Mtest.col_del(i)
Mtest=Mtest.col_insert(i,test)
if(CountNZE(test)!=0 and M.rank()==Mtest.rank()):
M=Mtest.copy()
tobeat=CountNZE(test)
#print(str(i)+'+'+str(factor_j)+'*'+str(j)+'+'+str(factor_k)+'*'+str(k))
#sys.stdout.write('\rdone %d' %i)
#sys.stdout.flush()
#print('\r'+str(i+1)+' columns of '+str(ncol) +' done\r')
if(level==1):
ncol=len(M.row(0))
for i in range(ncol):
icol=M.col(i)
tobeat=CountNZE(M.col(i))
for j in range(ncol):
if(i<j):
for factor_j in [1,2,-1,-2,0]:
test=icol+factor_j*M.col(j)
if(tobeat > CountNZE(test)):
Mtest=M.copy()
Mtest.col_del(i)
Mtest=Mtest.col_insert(i,test)
if(CountNZE(test)!=0 and M.rank()==Mtest.rank()):
M=Mtest.copy()
tobeat=CountNZE(test)
if(oldM!=M and sparseIter<10):
oldM=M.copy()
print("Sparsify with level", level,", Iteration ",sparseIter, " of maximal 10")
return(Sparsify(M,level, sparseIter=sparseIter+1))
else:
return(M)
def Alyssa(filename,
injections=[],
givenCQs=[],
neglect=[],
sparsifyLevel = 2,
outputFormat='R'):
filename=str(filename)
file=csv.reader(open(filename), delimiter=',')
print('Reading csv-file ...')
L=[]
nrrow=0
nrcol=0
for row in file:
nrrow=nrrow+1
nrcol=len(row)
L.append(row)
nrspecies=nrcol-2
##### Remove injections
counter=0
for i in range(1,len(L)):
if(L[i-counter][1] in injections):
L.remove(L[i-counter])
counter=counter+1
##### Define flux vector F
F=[]
for i in range(1,len(L)):
F.append(L[i][1])
#print(F)
F[i-1]=F[i-1].replace('^','**')
F[i-1]=parse_expr(F[i-1])
for inj in injections:
F[i-1]=F[i-1].subs(parse_expr(inj),0)
F=Matrix(F)
#print(F)
##### Define state vector X
X=[]
X=L[0][2:]
for i in range(len(X)):
X[i]=parse_expr(X[i])
X=Matrix(X)
#print(X)
Xo=X.copy()
##### Define stoichiometry matrix SM
SM=[]
for i in range(len(L)-1):
SM.append(L[i+1][2:])
for i in range(len(SM)):
for j in range(len(SM[0])):
if (SM[i][j]==''):
SM[i][j]='0'
SM[i][j]=parse_expr(SM[i][j])
SM=Matrix(SM)
SM=SM.T
SMorig=SM.copy()
##### Check for zero fluxes
icounter=0
jcounter=0
for i in range(len(F)):
if(F[i-icounter]==0):
F.row_del(i-icounter)
for j in range(len(SM.col(i-icounter))):
if(SM[j-jcounter,i-icounter]!=0):
#UsedRC.append(X[j-jcounter])
X.row_del(j-jcounter)
SM.row_del(j-jcounter)
SMorig.row_del(j-jcounter)
jcounter=jcounter+1
SM.col_del(i-icounter)
SMorig.col_del(i-icounter)
icounter=icounter+1
print('Removed '+str(icounter)+' fluxes that are a priori zero!')
nrspecies=nrspecies-icounter
#printmatrix(SM)
#print(F)
#print(X)
#print(UsedRC)
#####Check if some species are zero and remove them from the system
zeroStates=[]
NegRows=checkNegRows(SM)
PosRows=checkPosRows(SM)
#print(PosRows)
#print(NegRows)
while((NegRows!=[]) | (PosRows!=[])):
#print(PosRows)
#print(NegRows)
if(NegRows!=[]):
row=NegRows[0]
zeroStates.append(X[row])
counter=0
for i in range(len(F)):
if(F[i-counter].subs(X[row],1)!=F[i-counter] and F[i-counter].subs(X[row],0)==0):
F.row_del(i-counter)
SM.col_del(i-counter)
counter=counter+1
else:
if(F[i-counter].subs(X[row],1)!=F[i-counter] and F[i-counter].subs(X[row],0)!=0):
F[i-counter]=F[i-counter].subs(X[row],0)
X.row_del(row)
SM.row_del(row)
else:
row=PosRows[0]
zeroFluxes=[]
for j in range(len(SM.row(row))):
if(SM.row(row)[j]!=0):
zeroFluxes.append(F[j])
for k in zeroFluxes:
StateinFlux=[]
for state in X:
if(k.subs(state,1)!=k):
StateinFlux.append(state)
if(len(StateinFlux)==1):
zeroStates.append(StateinFlux[0])
row=list(X).index(StateinFlux[0])
counter=0
for i in range(len(F)):
if(F[i-counter].subs(X[row],1)!=F[i-counter]):
if(F[i-counter].subs(X[row],0)==0):
F.row_del(i-counter)
SM.col_del(i-counter)
else:
F[i-counter]=F[i-counter].subs(X[row],0)
counter=counter+1
#printmatrix(SM)
NegRows=checkNegRows(SM)
PosRows=checkPosRows(SM)
#printmatrix(SM)
#print(F)
#print(X)
nrspecies=nrspecies-len(zeroStates)
if(nrspecies==0):
print('All states are zero!')
return(0)
else:
if(zeroStates==[]):
print('No states found that are a priori zero!')
else:
print('These states are zero:')
for state in zeroStates:
print('\t'+str(state))
nrspecies=nrspecies+len(zeroStates)
##### Identify linearities, bilinearities and multilinearities
Xsquared=[]
for i in range(len(X)):
Xsquared.append(X[i]*X[i])
Xsquared=Matrix(Xsquared)
BLList=[]
MLList=[]
for i in range(len(SM*F)):
LHS=str(expand((SM*F)[i]))
LHS=LHS.replace(' ','')
LHS=LHS.replace('-','+')
LHS=LHS.replace('**2','tothepowerof2')
LHS=LHS.replace('**3','tothepowerof3')
exprList=LHS.split('+')
for expr in exprList:
VarList=expr.split('*')
counter=0
factors=[]
for j in range(len(X)):
anz=0
if(str(X[j]) in VarList):
anz=1
factors.append(X[j])
if((str(X[j])+'tothepowerof2') in VarList):
anz=2
factors.append(X[j])
factors.append(X[j])
if((str(X[j])+'tothepowerof3') in VarList):
anz=3
factors.append(X[j])
factors.append(X[j])
factors.append(X[j])
counter=counter+anz
if(counter==2):
string=''
for l in range(len(factors)):
if(l==len(factors)-1):
string=string+str(factors[l])
else:
string=string+str(factors[l])+'*'
if(not(string in BLList)):
BLList.append(string)
if(counter>2):
string=''
for l in range(len(factors)):
if(l==len(factors)-1):
string=string+str(factors[l])
else:
string=string+str(factors[l])+'*'
if(not(string in MLList)):
MLList.append(string)
COPlusLIPlusBL=[]
for i in range(len(SM*F)):
COPlusLIPlusBL.append((SM*F)[i])
for j in range(len(MLList)):
ToSubs=expand((SM*F)[i]).coeff(MLList[j])
COPlusLIPlusBL[i]=expand(COPlusLIPlusBL[i]-ToSubs*parse_expr(MLList[j]))
COPlusLI=[]
for i in range(len(COPlusLIPlusBL)):
COPlusLI.append(COPlusLIPlusBL[i])
for j in range(len(BLList)):
ToSubs=expand((COPlusLIPlusBL)[i]).coeff(BLList[j])
COPlusLI[i]=expand(COPlusLI[i]-ToSubs*parse_expr(BLList[j]))
##### C*X contains linear terms
C=zeros(len(COPlusLI),len(X))
for i in range(len(COPlusLI)):
for j in range(len(X)):
C[i*len(X)+j]=expand((COPlusLI)[i]).coeff(X[j])
##### ML contains multilinearities
ML=expand(Matrix(SM*F)-Matrix(COPlusLIPlusBL))
##### BL contains bilinearities
BL=expand(Matrix(COPlusLIPlusBL)-Matrix(COPlusLI))
#### CM is coefficient matrix of linearities
CM=C
#####CMBL gives coefficient matrix of bilinearities
CMBL=[]
if(BLList!=[]):
for i in range(len(BLList)):
CVBL=[]
for k in range(len(BL)):
CVBL.append(BL[k].coeff(BLList[i]))
CMBL.append(CVBL)
else:
CVBL=[]
for k in range(len(BL)):
CVBL.append(0)
CMBL.append(CVBL)
CMBL=Matrix(CMBL).T
#####CMML gives coefficient matrix of multilinearities
#####Summarize multilinearities and bilinearities
if(MLList!=[]):
CMML=[]
for i in range(len(MLList)):
CVML=[]
for k in range(len(ML)):
CVML.append(expand(ML[k]).coeff(MLList[i]))
CMML.append(CVML)
CMML=Matrix(CMML).T
BLList=BLList+MLList
CMBL=Matrix(concatenate((CMBL,CMML),axis=1))
for i in range(len(BLList)):
BLList[i]=parse_expr(BLList[i])
if(BLList!=[]):
CMbig=Matrix(concatenate((CM,CMBL),axis=1))
else:
CMbig=Matrix(CM)
#### Save ODE equations for testing solutions at the end
print('Rank of SM is '+str(SM.rank()) + '!')
SMorig=SM.copy()
ODE=SMorig*F
#### Get Flux Parameters
fluxpars=[]
for flux in F:
if(flux.args!=()):
foundFluxpar=False
for el in flux.args:
if(not foundFluxpar and el not in X and not is_number(str(el))):
if(flux.subs(el, 0)==0):
fluxpars.append(el)
foundFluxpar=True
else:
fluxpars.append(flux)
##### Increase Sparsity of stoichiometry matrix SM
print('Sparsify stoichiometry matrix with sparsify-level '+str(sparsifyLevel)+'!')
newSM=(Sparsify(SM.T, level=sparsifyLevel, sparseIter=1)).T
if(newSM!=SM):
print("Sparsified!")
SM=newSM
#### Find conserved quantities
#printmatrix(CMbig)
#print(X)
if(givenCQs==[]):
print('\nFinding conserved quantities ...')
LCLs, rowsToDel=FindLCL(CMbig.transpose(), X)
else:
print('\nI took the given conserved quantities!')
LCLs=givenCQs
if(LCLs!=[]):
print(LCLs)
else:
print('System has no conserved quantities!')
#### Define graph structure
print('\nDefine graph structure ...\n')
SSgraph=DetermineGraphStructure(SM, F, X, neglect)
#printgraph(SSgraph)
#print(fluxpars)
#### Check for Cycles
cycle=FindCycle(SSgraph, X)
#### Remove cycles step by step
gesnew=0
eqOut=[]
while(cycle!=None):
print('Removing cycle '+str(counter))
#printmatrix(SM)
#print(F)
minType, state2Rem, fp2Rem, signChanged = GetBestPair(cycle, SM, fluxpars, X, LCLs, neglect)
#print(cycle)
#print(state2Rem)
#print(fp2Rem)
#print(minType)
if(minType==-1):
print(" The cycle")
print(" "+str(cycle))
print(" cannot be removed. Set more parameters free or enable steady-state expressions with minus signs. The latter is not yet provided by the tool.")
return(0)
if(minType==0):
for LCL in LCLs:
ls=parse_expr(LCL.split(' = ')[0])
if(ls.subs(parse_expr(state2Rem),1)!=ls):
LCL2Rem=LCL
LCLs.remove(LCL2Rem)
index=list(X).index(parse_expr(state2Rem))
eqOut.append(state2Rem+' = '+state2Rem)
print(' '+str(state2Rem)+' --> '+'Done by CQ')
if(minType==1):
index=list(X).index(parse_expr(state2Rem))
eq=(SM*F)[index]
sol=solve(eq, fp2Rem, simplify=False)[0]
eqOut.append(str(fp2Rem)+' = '+str(sol))
print(' '+str(state2Rem)+' --> '+str(fp2Rem))
if(minType==2):
anz, sign=GetDimension(state2Rem, X, SM, getSign=True)
index=list(X).index(parse_expr(state2Rem))
negs, sumnegs, negfps=GetOutfluxes(state2Rem, X, SM, F, fluxpars)
poss, sumposs, posfps=GetInfluxes(state2Rem, X, SM, F, fluxpars)
if(anz==1):
print("Error in Type Determination. Please report this bug!")
return(0)
else:
nenner=1
for j in range(anz):
if(j>0):
nenner=nenner+parse_expr('r_'+state2Rem+'_'+str(j))
trafoList=[]
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
for j in range(len(negs)):
flux=negs[j]
fp=negfps[j]
prefactor=flux/fp
if(j==0):
trafoList.append(str(fp)+' = ('+str(sumposs)+')*1/('+str(nenner)+')*1/('+str(prefactor)+')')
else:
gesnew=gesnew+1
trafoList.append(str(fp)+' = ('+str(sumposs)+')*'+'r_'+state2Rem+'_'+str(j)+'/('+str(nenner)+')*1/('+str(prefactor)+')')
print(' '+str(state2Rem)+' --> '+str(negfps))
else:
for j in range(len(poss)):
flux=poss[j]
fp=posfps[j]
prefactor=flux/fp
if(j==0):
trafoList.append(str(fp)+' = ('+str(sumnegs)+')*1/('+str(nenner)+')*1/('+str(prefactor)+')')
else:
gesnew=gesnew+1
trafoList.append(str(fp)+' = ('+str(sumnegs)+')*'+'r_'+state2Rem+'_'+str(j)+'/('+str(nenner)+')*1/('+str(prefactor)+')')
print(' '+str(state2Rem)+' --> '+str(posfps))
for eq in trafoList:
eqOut.append(eq)
if(minType==3):
anz, sign=GetDimension(state2Rem, X, SM, getSign=True)
index=list(X).index(parse_expr(state2Rem))
negs, sumnegs, negfps=GetOutfluxes(state2Rem, X, SM, F, fluxpars)
poss, sumposs, posfps=GetInfluxes(state2Rem, X, SM, F, fluxpars)
if(anz==1):
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
fp2Rem=negfps[0]
flux=negs[0]
else:
fp2Rem=posfps[0]
flux=poss[0]
eq=(SM*F)[index]
sol=solve(eq, fp2Rem, simplify=False)[0]
eqOut.append(str(fp2Rem)+' = '+str(sol))
FsearchFlux = matrix_multiply_elementwise(abs(SM[index,:]),F.T)
colindex=list(FsearchFlux).index(flux)
for row2repl in range(len(SM.col(0))):
if(SM[row2repl,colindex]!=0 and row2repl!=index):
SM=SM.row_insert(row2repl,SM.row(row2repl)-(SM[row2repl,colindex]/SM[index,colindex])*SM.row(index))
SM.row_del(row2repl+1)
else:
nenner=1
for j in range(anz):
if(j>0):
nenner=nenner+parse_expr('r_'+state2Rem+'_'+str(j))
trafoList=[]
if((sign=="minus" and not signChanged) or (sign=="plus" and signChanged)):
for j in range(len(negs)):
flux=negs[j]
fp=negfps[j]
prefactor=flux/fp
if(j==0):
trafoList.append(str(fp)+' = ('+str(sumposs)+')*1/('+str(nenner)+')*1/('+str(prefactor)+')')
else:
gesnew=gesnew+1
trafoList.append(str(fp)+' = ('+str(sumposs)+')*'+'r_'+state2Rem+'_'+str(j)+'/('+str(nenner)+')*1/('+str(prefactor)+')')
FsearchFlux = matrix_multiply_elementwise(abs(SM[index,:]),F.T)
colindex=list(FsearchFlux).index(flux)
for k in range(len(posfps)):
SM=SM.col_insert(len(SM.row(0)),SM.col(colindex))
F=F.row_insert(len(F),Matrix(1,1,[poss[k]/nenner]))
fluxpars.append(posfps[k])
SM.col_del(colindex)
F.row_del(colindex)
fluxpars.__delitem__(colindex)
print(' '+str(state2Rem)+' --> '+str(negfps))
else:
for j in range(len(poss)):
flux=poss[j]
fp=posfps[j]
prefactor=flux/fp
if(j==0):
trafoList.append(str(fp)+' = ('+str(sumnegs)+')*1/('+str(nenner)+')*1/('+str(prefactor)+')')
else:
gesnew=gesnew+1
trafoList.append(str(fp)+' = ('+str(sumnegs)+')*'+'r_'+state2Rem+'_'+str(j)+'/('+str(nenner)+')*1/('+str(prefactor)+')')
FsearchFlux = matrix_multiply_elementwise(abs(SM[index,:]),F.T)
colindex=list(FsearchFlux).index(flux)
for k in range(len(negfps)):
SM=SM.col_insert(len(SM.row(0)),SM.col(colindex))
F=F.row_insert(len(F),Matrix(1,1,[negs[k]/nenner]))
fluxpars.append(negfps[k])
SM.col_del(colindex)
F.row_del(colindex)
fluxpars.__delitem__(colindex)
print(' '+str(state2Rem)+' --> '+str(posfps))
for eq in trafoList:
eqOut.append(eq)
X.row_del(index)
SM.row_del(index)
SSgraph=DetermineGraphStructure(SM, F, X, neglect)
#print(X)
#printgraph(SSgraph)
cycle=FindCycle(SSgraph, X)
counter=counter+1
print('There is no cycle in the system!\n')
#### Solve remaining equations
eqOut.reverse()
print('Solving remaining equations ...\n')
while(SSgraph!={}):
#print(SSgraph)
node=FindNodeToSolve(SSgraph)
#print(node)
index=list(X).index(parse_expr(node))
#print((SM*F)[index])
sol=solve((SM*F)[index],parse_expr(node), simplify=True)
#print(sol)
eqOut.insert(0,node+' = '+str(sol[0]))
for f in range(len(F)):
F[f]=F[f].subs(parse_expr(node), sol[0])
#print(node+' = '+str(sol[0]))
X.row_del(index)
SM.row_del(index)
SSgraph=DetermineGraphStructure(SM, F, X, neglect=[])
#### Test Solution
print('Testing Steady State...\n')
NonSteady=False
#print(eqOut)
#print(ODE)
#print(SM*F)
for i in range(len(ODE)):
expr=parse_expr(str(ODE[i]))
for j in range(len(zeroStates)):
zeroState=zeroStates[j]
expr=expr.subs(zeroState, 0)
#print(len(eqOut))
for j in range(len(eqOut)):
ls, rs = eqOut[-(j+1)].split('=')
#print(ls)
ls=parse_expr(ls)
#print(rs)
rs=parse_expr(rs)
expr=expr.subs(ls, rs)
#print(simplify(expr))
expr=simplify(expr)
#print(expr)
if(expr!=0):
print(' Equation '+str(ODE[i]))
print(' results:'+str(expr))
NonSteady=True
if(NonSteady):
print('Solution is wrong!\n')
else:
print('Solution is correct!\n')
#### Print Equations
print('I obtained the following equations:\n')
if(outputFormat=='M'):
for state in zeroStates:
print('\tinit_'+str(state)+' "0"'+'\n')
eqOutReturn=[]
for i in range(len(eqOut)):
ls, rs = eqOut[i].split('=')
ls=parse_expr(ls)
rs=parse_expr(rs)
for j in range(i,len(eqOut)):
ls2, rs2 = eqOut[j].split('=')
rs2=parse_expr(rs2)
rs2=rs2.subs(ls,rs)
eqOut[j]=str(ls2)+'='+str(rs2)
for state in Xo:
ls=ls.subs(state, parse_expr('init_'+str(state)))
rs=rs.subs(state, parse_expr('init_'+str(state)))
eqOut[i]=str(ls)+' "'+str(rs)+'"'
for i in range(len(eqOut)):
eqOut[i]=eqOut[i].replace('**','^')
for eq in eqOut:
print('\t'+eq+'\n')
eqOutReturn.append(eq)
else:
for state in zeroStates:
print('\t'+str(state)+' = 0'+'\n')
eqOutReturn=[]
for eq in eqOut:
ls, rs = eq.split(' = ')
print('\t'+ls+' = "'+rs+'",'+'\n')
eqOutReturn.append(ls+'='+rs)
print('Number of Species: '+str(nrspecies))
print('Number of Equations: '+str(len(eqOut)+len(zeroStates)))
print('Number of new introduced variables: '+str(gesnew))
return(eqOutReturn)
| 38,516
| 34.963585
| 176
|
py
|
dMod
|
dMod-master/inst/code/checkPredictions.py
|
# Author: Benjamin Merkt, Physikalisches Institut, Universitaet Freiburg
import sys
import sympy as spy
# try/except necessary for R interface (imports automatically and does not find other files)
try:
from functions import extension_str
except:
pass
def checkPredictions(predictions, predFunctions, infisAll, allVariables):
n = len(allVariables)
print '\nChecking predictions:'
printStrings = []
for i in range(len(predictions)):
printStrings.append([])
admits = True
for j in range(len(infisAll)):
infiPred = 0
for k in range(n):
if infisAll[j][k] != 0:
infiPred += infisAll[j][k] * spy.diff(predFunctions[i], allVariables[k])
infiPred = spy.simplify(infiPred)
if infiPred != 0:
admits = False
p = spy.Wild('p',exclude=[0])
c = spy.Wild('c')
if infiPred.match(c*predFunctions[i]**p) != None:
matches = infiPred.match(c*predFunctions[i]**p)
printStrings[i].append([\
str(predictions[i]),
'#'+str(j+1),
str((c*predictions[i]**p).subs(c,matches[c]).subs(p,matches[p]))])
elif infiPred.match(c*(-1*predFunctions[i])**p) != None:
matches = infiPred.match(c*(-1*predFunctions[i])**p)
printStrings[i].append([\
str(predictions[i]),
'#'+str(j+1),
str((c*(-1)**p*predictions[i]**p).subs(c,matches[c]).subs(p,matches[p]))])
else:
printStrings[i].append([str(predictions[i]), '#'+str(j+1), str(infiPred)])
if admits:
printStrings[i] = True
length0 = 10
length1 = 10
length2 = 13
for i in range(len(printStrings)):
tmp = str(predictions[i])
for v in ['Q', 'C', 'O', 'S', 'I', 'N', 'E']:
tmp = tmp.replace(v + extension_str, v)
if length0 <= len(tmp):
length0 = len(tmp)
if printStrings[i] == True: continue
for j in range(len(printStrings[i])):
for v in ['Q', 'C', 'O', 'S', 'I', 'N', 'E']:
printStrings[i][j][0] = printStrings[i][j][0].replace(v + extension_str, v)
printStrings[i][j][2] = printStrings[i][j][2].replace(v + extension_str, v)
if length1 <= len(printStrings[i][j][1]):
length1 = len(printStrings[i][j][1])
if length2 <= len(printStrings[i][j][2]):
length2 = len(printStrings[i][j][2])
print ('{0:'+str(length0)+'s} : ').format('prediction') \
+ ('{0:'+str(length1)+'s} : ').format('symmetry')\
+ str('infinitesimal')
for i in range(len(predictions)):
print '-'*(length0+length1+length2+6)
if printStrings[i] == True:
print ('{0:'+str(length0)+'s} : ').format(tmp) \
+ ('{0:'+str(length1)+'s} : ').format('admits all')\
+ ('{0:'+str(length2)+'s}').format(' - ')
continue
print ('{0:'+str(length0)+'s} : ').format(printStrings[i][0][0]) \
+ ('{0:'+str(length1)+'s} : ').format(printStrings[i][0][1])\
+ str(printStrings[i][0][2])
for j in range(1,len(printStrings[i])):
print ('{0:'+str(length0)+'s} : ').format('') \
+ ('{0:'+str(length1)+'s} : ').format(printStrings[i][j][1])\
+ str(printStrings[i][j][2])
| 2,982
| 31.78022
| 92
|
py
|
dMod
|
dMod-master/inst/code/extendObservation.py
|
from sympy import *
from sympy.parsing.sympy_parser import *
from sympy.parsing.sympy_tokenize import *
def getObservation(observation, variables, stoichiometry, flows, conserved):
m = len(variables)
inversion = [0]*m
stoichiometry = Matrix(len(stoichiometry)/m,m,stoichiometry)
stoichiometry = stoichiometry.transpose()
for i in range(len(flows)):
flows[i] = flows[i].replace('^','**')
flows[i] = parse_expr(flows[i])
diffEquations = list(stoichiometry*Matrix(len(flows),1,flows))
for v in range(m):
variables[v] = parse_expr(variables[v])
#extract observation function from read string
obsFunctions = []
for o in range(len(observation)):
observation[o] = str(observation[o])
obsFunctions.append(parse_expr(observation[o][(observation[o].find('=')+1):len(observation[o])]))
#extract observables and parameters from read observation
global newLine, l , observables, parameters
newLine = True
observables = []
parameters = []
global l
l = -1
def read():
global l
l += 1
if l < len(observation):
return observation[l] + '\n'
else:
raise StopIteration
def useToken(key, value, Coord1, Coord2, fullLine):
global newLine, observables, obsFunctions, parameters
if key == 1: #1: NAME 2: NUMBER 51: OP 4: NEWLINE 0: ENDMARKER
var(value)
if newLine == True:
observables.append(parse_expr(value))
else:
parameters.append(parse_expr(value))
newLine = False
elif key == 4:
newLine = True
tokenize(read, useToken)
variablesMatrix = Matrix(m,1,variables)
h = len(observables)
#calculate conserved quantities
conservedBase = conservedQuantities(stoichiometry) #base vectors in columns of matrix
conservedBase = conservedBase.transpose()
#calculate jacobian of observation
jacobian = zeros(h, m)
for i in range(h):
for j in range(m):
jacobian[i,j] = diff(obsFunctions[i], variables[j])
#check if observation functions are linearly dependent
_, pivots = jacobian.rref(simplify = True)
if len(pivots) < h:
print 'Error: Observation functions are linearly dependent.'
#first extend with conserved quantities
if conserved:
for i in range(conservedBase.rows):
jacobianTemp = jacobian.col_join(conservedBase[i,:])
_, pivots = jacobianTemp.rref(simplify = True)
if len(pivots) < jacobianTemp.rows:
continue
else:
jacobian = jacobianTemp
observables.append(parse_expr('CONST'+str(i+1)))
#finally extend with ones
s = len(observables)
l = jacobian.rows
jacobian = jacobian.col_join(zeros(m-jacobian.rows, m))
for i in range(m):
if not i in pivots:
jacobian[l, i] = 1
l += 1
observables.append(parse_expr(str(variables[i])+'OBS'))
inversion[i] = observables[-1]
obsFunctions = obsFunctions + list(jacobian[h:,:]*variablesMatrix)
#substitute trivial inversions in observation functions
obsFunctionsTemp = obsFunctions[:]
for i in range(s):
for j in range(s,m):
obsFunctionsTemp[i] = obsFunctionsTemp[i].subs(obsFunctionsTemp[j], observables[j])
#invert nontrivial part
vars = []
for i in range(len(inversion)):
if inversion[i] == 0:
vars.append(variables[i])
eq = []
for i in range(s):
eq.append(obsFunctionsTemp[i]-observables[i])
result = solve(eq, vars, dict=True)
if len(result)>1:
print 'Warning: Inversion of observation not unique'
result = result[0]
for v in vars:
inversion[variables.index(v)] = result[v]
#build new differential equations
newDiffEquations = list(jacobian*Matrix(m,1,diffEquations))
for i in range(m):
for j in range(m):
newDiffEquations[i] = newDiffEquations[i].subs(variables[j], inversion[j])
newDiffEquations[i] = simplify(newDiffEquations[i])
#convert output to strings
for i in range(m):
observables[i] = str(observables[i])
obsFunctions[i] = str(obsFunctions[i])
newDiffEquations[i] = str(newDiffEquations[i])
newDiffEquations[i] = newDiffEquations[i].replace('**','^')
inversion[i] = str(inversion[i])
return observables, observables[:h], obsFunctions, newDiffEquations, inversion
| 4,017
| 27.7
| 99
|
py
|
dMod
|
dMod-master/inst/code/polyClass.py
|
# Author: Benjamin Merkt, Physikalisches Institut, Universitaet Freiburg
import sympy as spy
import numpy as np
from copy import deepcopy
### efficient class for polynomial calculations
class Apoly:
def __init__(self, expr, variables, rs):
if expr is None:
self.coefs = []
self.exps = []
self.vars = variables
self.rs = rs
else:
poly = spy.Poly(expr, variables).as_dict()
#extract coefficients from polynomial
if rs is None:
self.coefs = poly.values()
else:
coefsTmp = poly.values()
self.coefs = [0]*len(coefsTmp)
for i in xrange(len(coefsTmp)):
self.coefs[i] = np.zeros(len(rs))
for j, r in enumerate(rs):
if coefsTmp[i].has(r):
self.coefs[i][j] = spy.diff(coefsTmp[i], r)
#extract exponents from polynomial
self.exps = poly.keys()
for i in xrange(len(self.exps)):
self.exps[i] = np.array(self.exps[i])
self.vars = variables
self.rs = rs
def __repr__(self):
return str(self.coefs) + '\n' + str(self.exps)
def __str__(self):
return str(self.coefs) + '\n' + str(self.exps)
### return a copy of self
def getCopy(self):
newPoly = Apoly(None,self.vars, self.rs)
newPoly.coefs = deepcopy(self.coefs)
newPoly.exps = deepcopy(self.exps)
return newPoly
### add a second polynomial
### self is overwritten with result
def add(self, otherPoly):
for i in xrange(len(otherPoly.exps)):
for j in xrange(len(self.exps)):
if np.array_equal(otherPoly.exps[i], self.exps[j]):
self.coefs[j] = self.coefs[j] + otherPoly.coefs[i]
if not np.any(self.coefs[j]):
self.coefs.pop(j)
self.exps.pop(j)
break
else:
self.coefs.append(otherPoly.coefs[i])
self.exps.append(otherPoly.exps[i])
### substract a second polynomial
### self is overwritten with result
def sub(self, otherPoly):
for i in xrange(len(otherPoly.exps)):
for j in xrange(len(self.exps)):
if np.array_equal(otherPoly.exps[i], self.exps[j]):
self.coefs[j] = self.coefs[j] - otherPoly.coefs[i]
if not np.any(self.coefs[j]):
self.coefs.pop(j)
self.exps.pop(j)
break
else:
self.coefs.append(-1*otherPoly.coefs[i])
self.exps.append(otherPoly.exps[i])
### multiply with a second polynomial
### a new Apoly is created and returned. self remains unchanged
def mul(self, otherPoly):
newPoly = Apoly(None, self.vars, self.rs)
newPoly.coefs = [0]*(len(self.coefs)*len(otherPoly.coefs))
newPoly.exps = [0]*(len(self.coefs)*len(otherPoly.coefs))
k = 0
for i in xrange(len(otherPoly.exps)):
for j in xrange(len(self.exps)):
newPoly.coefs[k] = otherPoly.coefs[i] * self.coefs[j] #works only because only one poly has rs
newPoly.exps[k] = otherPoly.exps[i] + self.exps[j]
k += 1
i = 0
while i < len(newPoly.coefs):
j = i+1
while j <len(newPoly.coefs):
if np.array_equal(newPoly.exps[i], newPoly.exps[j]):
newPoly.exps.pop(j)
newPoly.coefs[i] = newPoly.coefs[i] + newPoly.coefs.pop(j)
else:
j += 1
i += 1
return newPoly
### differentiate the polynomial
### a new Apoly is created and returned. self remains unchanged
def diff(self, j):
newPoly = self.getCopy()
i = 0
while i < len(newPoly.exps):
if newPoly.exps[i][j] != 0:
newPoly.coefs[i] = newPoly.coefs[i]*newPoly.exps[i][j]
newPoly.exps[i][j] -= 1
i += 1
else:
newPoly.coefs.pop(i)
newPoly.exps.pop(i)
return newPoly
### transform polynomial to regular sympy expression
def as_expr(self):
expr = 0
for i in range(len(self.coefs)):
fact = 1
for j in range(len(self.vars)):
fact = fact*self.vars[j]**self.exps[i][j]
if self.rs is None:
expr += self.coefs[i]*fact
else:
coef = 0
for j in range(len(self.rs)):
coef += self.rs[j]*self.coefs[i][j]
expr += coef*fact
return spy.nsimplify(expr)
| 3,831
| 27.176471
| 98
|
py
|
dMod
|
dMod-master/inst/code/buildSystem.py
|
# Author: Benjamin Merkt, Physikalisches Institut, Universitaet Freiburg
import sys
import sympy as spy
import numpy as np
from multiprocessing import Queue, Queue, Process
# try/except necessary for R interface (imports automatically and does not find other files)
try:
from functions import *
from polyClass import *
except:
pass
### calculate conditions for a differential equation
def doEquation(k, numerators, denominators, derivativesNum, infis,
diffInfis, allVariables, rs, ansatz, queue):
n = len(allVariables)
m = len(numerators)
polynomial = Apoly(None, allVariables, rs)
if ansatz == 'uni' or ansatz == 'par':
#calculate polynomial
polynomial.add(diffInfis[0][k].mul(denominators[k]).mul(numerators[k]))
for i in range(n):
polynomial.sub(infis[i].mul(derivativesNum[k][i]))
elif ansatz == 'multi':
for j in range(m):
summand = diffInfis[k][j].mul(denominators[k]).mul(numerators[j])
for l in range(m):
if l != j:
summand = summand.mul(denominators[l])
polynomial.add(summand)
for i in range(n):
summand = infis[i].mul(derivativesNum[k][i])
for l in range(m):
if l != k:
summand = summand.mul(denominators[l])
polynomial.sub(summand)
#determine rSystem such that the coefficients vanish
lgs = np.empty([len(polynomial.coefs), len(rs)])
for i in range(len(polynomial.coefs)):
lgs[i,:] = polynomial.coefs[i]
queue.put(lgs)
### calculate conditions for an observation equation
def doObsEquation(k, obsDerivativesNum, infis, allVariables, rs, queue):
n = len(allVariables)
#calculate polynomial
polynomial = Apoly(None, allVariables, rs)
for l in range(n):
polynomial.add(infis[l].mul(obsDerivativesNum[k][l]))
#determine rSystem such that the coefficients vanish
lgs = np.empty([len(polynomial.coefs), len(rs)])
for i in range(len(polynomial.coefs)):
lgs[i,:] = polynomial.coefs[i]
queue.put(lgs)
### calculate conditions for an initial equation
def doInitEquation(k, initDenominators, initDerivativesNum,
initFunctions, infis, allVariables, rs, queue):
n = len(allVariables)
m = len(initFunctions)
#calculate polynomial
polynomial = infis[k].mul(initDenominators[k]).mul(initDenominators[k])
for i in range(n):
polynomial.sub(infis[i].mul(initDerivativesNum[k][i]))
#substitute initial Functions into conditions
polynomial = polynomial.as_expr()
for i in range(m):
if polynomial.has(allVariables[i]):
polynomial = polynomial.subs(allVariables[i], initFunctions[i])
#determine rSystem such that the coefficients vanish
polynomial = Apoly(polynomial, allVariables, rs)
lgs = np.empty([len(polynomial.coefs), len(rs)])
for i in range(len(polynomial.coefs)):
lgs[i,:] = polynomial.coefs[i]
queue.put(lgs)
def buildSystem(numerators, denominators, derivativesNum, obsDerivativesNum,
initDenominators, initDerivativesNum, initFunctions,
infis, diffInfis, allVariables, rs, nProc, ansatz):
if nProc>1:
from multiprocessing import Queue, Process
else:
from multiprocessing import Queue
n = len(allVariables)
m = len(numerators)
h = len(obsDerivativesNum)
o = len(initFunctions)
### start the calculations for the first equations
ns = 0
queue = Queue()
while ns < min([m+h+o, nProc]):
if ns < m:
if nProc>1: p = Process(target=doEquation, args=(ns, numerators, denominators, derivativesNum, infis,
diffInfis, allVariables, rs, ansatz, queue))
else: doEquation(ns, numerators, denominators, derivativesNum, infis,
diffInfis, allVariables, rs, ansatz, queue)
elif ns < m+h:
if nProc>1: p = Process(target=doObsEquation, args=(ns-m, obsDerivativesNum, infis, allVariables, rs, queue))
else: doObsEquation(ns-m, obsDerivativesNum, infis, allVariables, rs, queue)
else:
if nProc>1: p = Process(target=doInitEquation, args=(ns-m-h, initDenominators, initDerivativesNum,
initFunctions, infis, allVariables, rs, queue))
else: doInitEquation(ns-m-h, initDenominators, initDerivativesNum,
initFunctions, infis, allVariables, rs, queue)
if nProc>1: p.start()
ns += 1
sys.stdout.write("\rBuilding system...0%")
sys.stdout.flush()
### wait till a process has finished and start the calculation for a new equation
lgsList = []
lgsSize = 0
finished = 0
while ns < m+h+o:
lgs = queue.get()
if ns < m:
if nProc>1: p = Process(target=doEquation, args=(ns,numerators, denominators, derivativesNum, infis,
diffInfis, allVariables, rs, ansatz, queue))
else: doEquation(ns,numerators, denominators, derivativesNum, infis,
diffInfis, allVariables, rs, ansatz, queue)
elif ns < m+h:
if nProc>1: p = Process(target=doObsEquation, args=(ns-m, obsDerivativesNum, infis, allVariables, rs, queue))
else: doObsEquation(ns-m, obsDerivativesNum, infis, allVariables, rs, queue)
else:
if nProc>1: p = Process(target=doInitEquation, args=(ns-m-h, initDenominators, initDerivativesNum,
initFunctions, infis, allVariables, rs, queue))
else: doInitEquation(ns-m-h, initDenominators, initDerivativesNum,
initFunctions, infis, allVariables, rs, queue)
if nProc>1: p.start()
ns += 1
lgsList.append(lgs)
lgsSize += lgs.shape[0]
finished += 1
prog = int(float(finished)/(m+h+o)*100)
sys.stdout.write("\rBuilding system...%d%%" %prog)
sys.stdout.flush()
### wait for all processes to finish
while finished < m+h+o:
lgs = queue.get()
lgsList.append(lgs)
lgsSize += lgs.shape[0]
finished += 1
prog = int(float(finished)/(m+h+o)*100)
sys.stdout.write("\rBuilding system...%d%%" %prog)
sys.stdout.flush()
sys.stdout.write("\nCombining system...")
sys.stdout.flush()
### combine all conditions into one matrix
rSystem = np.empty([lgsSize, len(rs)])
pos = 0
for lgs in lgsList:
rSystem[pos:(pos+lgs.shape[0]), :] = lgs
pos += lgs.shape[0]
return rSystem
| 5,837
| 30.728261
| 112
|
py
|
dMod
|
dMod-master/inst/code/quasiSteadyStates.py
|
from sympy import *
from numpy import concatenate
from numpy.linalg import matrix_rank
from sympy.parsing.sympy_parser import *
import csv
def SolveSymbLES(A,b):
dim=shape(A)[0]
Asave=A[:]
Asave=Matrix(dim, dim, Asave)
determinant=Asave.det()
if(determinant==0):
return([])
result=[]
for i in range(dim):
A=Matrix(dim,dim,Asave)
A.col_del(i)
A=A.col_insert(i,b)
result.append(simplify(A.det()/determinant))
return(result)
def difftotal(expr, diffby, diffmap):
#Take the total derivative with respect to a variable.
#Example:
# theta, t, theta_dot = symbols("theta t theta_dot")
# difftotal(cos(theta), t, {theta: theta_dot})
#returns
# -theta_dot*sin(theta)
# Replace all symbols in the diffmap by a functional form
fnexpr = expr.subs({s:s(diffby) for s in diffmap})
# Do the differentiation
diffexpr = diff(fnexpr, diffby)
# Replace the Derivatives with the variables in diffmap
derivmap = {Derivative(v(diffby), diffby):dv
for v,dv in diffmap.iteritems()}
finaldiff = diffexpr.subs(derivmap)
# Replace the functional forms with their original form
return finaldiff.subs({s(diffby):s for s in diffmap})
def getIndOfParticipatingSpecies(SM, F, X, fastreact):
liste=[]
for f in F:
for fr in fastreact:
if(fr in str(f)):
testcol=SM.col(list(F).index(f))
for i in range(len(testcol)):
if(testcol[i]!=0):
if(i not in liste):
liste.append(i)
return(liste)
def getIndOfFastReactions(F, fastreact):
liste=[]
for fr in fastreact:
for i in range(len(F)):
if(fr in str(F[i]) and i not in liste):
liste.append(i)
return(liste)
def findMin(vec):
m=max(max(vec),max(-vec))
for el in vec:
if(abs(el)<m and abs(el)>1e-8):
m=abs(el)
return(m)
def nullZ(A):
ret = A.rref() # compute reduced row echelon form of A
R = ret[0] # matrix A in rref
pivcol = ret[1] #columns in which a pivot was found
n = len(A.row(0)) # number of columns of A
r = len(pivcol) # rank of reduced row echelon form
nopiv = range(n)
nopiv2 = [nopiv[i] for i in range(n) if i not in pivcol] # columns in which no pivot was found
# print(ret)
# print(nopiv2)
# print(n)
# print(r)
if(n > r):
Z=eye(n-r)
if(r>0):
Z=concatenate((-R[pivcol, nopiv2], Z), axis=0)
return(Z)
def QSS(filename,
fastreact=[],
state2Remove=[],
SM=False,
X=[],
F=[],
outputFormat='R'):
if(filename==None):
print('Use specified stoichiometry matrix ...')
if(filename!=None):
filename=str(filename)
file=csv.reader(open(filename), delimiter=',')
print('Reading csv-file ...')
L=[]
nrrow=0
nrcol=0
for row in file:
nrrow=nrrow+1
nrcol=len(row)
L.append(row)
#print("Test")
##### Define flux vector F
if(filename!=None):
F=[]
for i in range(1,len(L)):
F.append(L[i][1])
#print(F)
F[i-1]=F[i-1].replace('^','**')
F[i-1]=parse_expr(F[i-1])
F=Matrix(F)
else:
#print(F)
if(F!=[]):
flist=[]
for f in F:
flist.append(parse_expr(f))
F=Matrix(flist)
else:
print("You have to specify a flux vector or a model file!")
#print(F)
##### Define state vector X
if(filename!=None):
X=[]
X=L[0][2:]
for i in range(len(X)):
X[i]=parse_expr(X[i])
X=Matrix(X)
else:
if(X!=[]):
xlist=[]
for x in X:
xlist.append(parse_expr(x))
X=Matrix(xlist)
else:
print("You have to specify a state vector or a model file!")
#print(X)
##### Define stoichiometry matrix SM
#print(SM)
if(filename!=None):
SM=[]
for i in range(len(L)-1):
SM.append(L[i+1][2:])
for i in range(len(SM)):
for j in range(len(SM[0])):
if (SM[i][j]==''):
SM[i][j]='0'
SM[i][j]=parse_expr(SM[i][j])
SM=Matrix(SM)
SM=SM.T
else:
if(SM):
SMfile=csv.reader(open("smatrix.csv"), delimiter=',')
nrrow=0
nrcol=0
L=[]
for row in SMfile:
nrrow=nrrow+1
nrcol=len(row)
L.append(row)
SM=[]
for i in range(len(L)-1):
SM.append(L[i+1][:])
for i in range(len(SM)):
for j in range(len(SM[0])):
if (SM[i][j]=='NA'):
SM[i][j]='0'
SM[i][j]=parse_expr(SM[i][j])
SM=Matrix(SM)
SM=SM.T
else:
print("You have to specify a stoichiometry matrix or a model file.")
#print(SM)
print('Simplifying System ...')
PS=getIndOfParticipatingSpecies(SM, F, X, fastreact)
index_list = getIndOfFastReactions(F, fastreact)
frsymb_list=[parse_expr(fastreact[i]) for i in range(len(fastreact))]
mapping={}
#F_list=[parse_expr('fastflux'+str(i)) for i in range(len(PS))]
#variables=F_list[:]
variables=[]
for el in X:
if(list(X).index(el) in PS):
mapping[el]=parse_expr(str(el)+'_dot')
#variables.append(parse_expr(str(el)+'_dot'))
#print(index_list)
FF=Matrix([F[i] for i in index_list])
SMF=SM[PS,index_list]
SMStimesFS=SM[PS,:]*F-SMF*FF
#print(SMF)
#print(SMStimesFS)
for i in index_list:
F[i]=parse_expr('F_'+str(i))
#variables.append(parse_expr('F_'+str(i)))
#F_red2=Matrix([F[i] for i in index_list])
eqs=[]
#print(PS)
for ps in PS:
#print(SMStimesFS[list(PS).index(ps)])
eqs.append(parse_expr(str(X[ps])+'_dot')-parse_expr(str(X[ps])+'_tilde')-SMStimesFS[list(PS).index(ps)])
variables.append(parse_expr(str(X[ps])+'_dot'))
#eqs.append((SM_red*F_red2)[PS.index(ps)]-parse_expr('G_'+str(X[ps])))
variables.append(parse_expr(str(X[ps])+'_tilde'))
if(max(SMF.shape) > matrix_rank(SMF)):
ns=nullZ(SMF.T)
#print(ns)
for i in range(ns.shape[1]):
eq=0
factor=1/findMin(ns[:,i])
for ps in PS:
eq=eq+ns[:,i][list(PS).index(ps)]*factor*parse_expr(str(X[ps])+'_tilde')
eqs.append(eq)
#print(ns)
t = symbols("t")
fastEqDiff_list=[difftotal((SMF*FF)[i], t, mapping) for i in range(len(PS))]
eqs=eqs+fastEqDiff_list
#print(eqs)
#print(variables)
sol=solve(eqs, variables)
if(sol==[]):
print("Did not find a solution for the equation system.")
return([])
#print(frsymb_list)
#print(state2Remove)
if(state2Remove==[]):
varfast=[X[ps] for ps in PS]
pivcol = SMF.rref()[1] #columns in which a pivot was found
varfast=[varfast[i] for i in pivcol]
state2Remove=[str(v) for v in varfast]
else:
if(matrix_rank(SMF)==len(state2Remove)):
varfast=[parse_expr(state) for state in state2Remove]
else:
print("Rank of the fast stoichiometry matrix equals {}. Please specify {} states to remove from the system!".format(matrix_rank(SMF), matrix_rank(SMF)) )
return([])
#print(varfast)
#print(SMF*FF)
solfast=solve(SMF*FF,varfast)
#print(isinstance(solfast, list))
#print(isinstance(solfast[0], list))
if(not isinstance(solfast, list)):
varfast=solfast.keys()
solfast=[solfast[el] for el in solfast.keys()]
#print(isinstance(solfast, list))
else:
if(isinstance(solfast[0], tuple)):
liste=[]
for i in range(len(solfast[0])):
liste.append(solfast[0][i])
solfast=liste
#print(varfast)
#print(solfast)
#print(solfast[0])
ausgabe=[]
for var in variables:
if('tilde' not in str(var) and str(var).split('_dot')[0] not in state2Remove):
term=sol[var]
for el in range(len(varfast)):
term=term.subs(varfast[el], solfast[el])
for i in range(1,len(frsymb_list)):
term=term.subs(frsymb_list[i],frsymb_list[0]*parse_expr('r_'+str(frsymb_list[i])+'_'+str(frsymb_list[0])))
term=simplify(term)
ausgabe.append(str(var)+' = '+str(term))
#for ps in PS:
# if(str(X[ps])!=statenot2Remove):
print('Use the following observation functions!')
#print(solfast[0])
for el in range(len(varfast)):
term=solfast[el]
for i in range(1,len(frsymb_list)):
term=term.subs(frsymb_list[i],frsymb_list[0]*parse_expr('r_'+str(frsymb_list[i])+'_'+str(frsymb_list[0])))
term=simplify(term)
print(' '+str(varfast[el])+' = '+str(term))
#print(solfast)
#print(len(solfast))
#print(ausgabe)
#print(varfast)
for i in range(len(varfast)):
ausgabe.append(str(varfast[i]))
ausgabe.append(str(len(varfast)))
#print(ausgabe.append(varfast))
print("Done")
return(ausgabe)
| 9,482
| 30.929293
| 165
|
py
|
dMod
|
dMod-master/inst/code/symmetryDetection.py
|
# Author: Benjamin Merkt, Physikalisches Institut, Universitaet Freiburg
# Version: 0.11
import sys
import argparse
import time
import sympy as spy
import scipy.linalg
# try/except necessary for R interface which imports automatically after loading
try:
from readData import *
from functions import *
from buildSystem import *
from polyClass import *
from checkPredictions import *
except:
pass
t0 = time.time()
spy.var('epsilon')
def symmetryDetection(allVariables, diffEquations, observables, obsFunctions, initFunctions,
predictions, predFunctions, ansatz = 'uni', pMax = 2, inputs = [],
fixed = [], parallel = 1, allTrafos = False):
n = len(allVariables)
m = len(diffEquations)
h = len(observables)
###########################################################################################
############################# prepare equations ####################################
###########################################################################################
sys.stdout.write('Preparing equations...')
sys.stdout.flush()
# make infinitesimal ansatz
infis, diffInfis, rs = makeAnsatz(ansatz, allVariables, m, len(inputs), pMax, fixed)
# and convert to polynomial
infis, diffInfis = transformInfisToPoly(infis, diffInfis, allVariables, rs, parallel, ansatz)
### extract numerator and denominator of equations
#differential equations
numerators = [0]*m
denominators = [0]*m
for k in range(m):
rational = spy.together(diffEquations[k])
numerators[k] = Apoly(spy.numer(rational), allVariables, None)
denominators[k] = Apoly(spy.denom(rational), allVariables, None)
#observation functions
obsNumerators = [0]*h
obsDenominatros = [0]*h
for k in range(h):
rational = spy.together(obsFunctions[k])
obsNumerators[k] = Apoly(spy.numer(rational), allVariables, None)
obsDenominatros[k] = Apoly(spy.denom(rational), allVariables, None)
#initial functions
if len(initFunctions) != 0:
initNumerators = [0]*m
initDenominatros = [0]*m
for k in range(m):
rational = spy.together(initFunctions[k])
initNumerators[k] = Apoly(spy.numer(rational), allVariables, None)
initDenominatros[k] = Apoly(spy.denom(rational), allVariables, None)
else:
initNumerators = []
initDenominatros = []
### calculate numerator of derivatives of equations
#differential equatioins
derivativesNum = [0]*m
for i in range(m):
derivativesNum[i] = [0]*n
for k in range(m):
for l in range(n):
derivativesNum[k][l] = Apoly(None, allVariables, None)
derivativesNum[k][l].add(numerators[k].diff(l).mul(denominators[k]))
derivativesNum[k][l].sub(numerators[k].mul(denominators[k].diff(l)))
#observation functions
obsDerivativesNum = [0]*h
for i in range(h):
obsDerivativesNum[i] = [0]*n
for k in range(h):
for l in range(n):
obsDerivativesNum[k][l] = Apoly(None, allVariables, None)
obsDerivativesNum[k][l].add(obsNumerators[k].diff(l).mul(obsDenominatros[k]))
obsDerivativesNum[k][l].sub(obsNumerators[k].mul(obsDenominatros[k].diff(l)))
#initial functions
if len(initFunctions) != 0:
initDerivativesNum = [0]*len(initFunctions)
for i in range(m):
initDerivativesNum[i] = [0]*n
for k in range(m):
for l in range(n):
initDerivativesNum[k][l] = Apoly(None, allVariables, None)
initDerivativesNum[k][l].add(initNumerators[k].diff(l).mul(initDenominatros[k]))
initDerivativesNum[k][l].sub(initNumerators[k].mul(initDenominatros[k].diff(l)))
else:
initDerivativesNum = []
sys.stdout.write('\rPreparing equations...done\n')
sys.stdout.flush()
###########################################################################################
############################ build linear system ###################################
###########################################################################################
sys.stdout.write('\nBuilding system...')
sys.stdout.flush()
rSystem = buildSystem(numerators, denominators, derivativesNum, obsDerivativesNum,
initDenominatros, initDerivativesNum, initFunctions,
infis, diffInfis, allVariables, rs, parallel, ansatz)
sys.stdout.write('done\n')
sys.stdout.flush()
###########################################################################################
############################## solve system ########################################
###########################################################################################
sys.stdout.write('\nSolving system of size ' + str(rSystem.shape[0]) + 'x' +\
str(rSystem.shape[1]) + '...')
sys.stdout.flush()
#get LU decomposition from scipy
rSystem = scipy.linalg.lu(rSystem, permute_l=True)[1]
#calculate reduced row echelon form
rSystem, pivots = getrref(rSystem)
sys.stdout.write('done\n')
sys.stdout.flush()
###########################################################################################
############################# process results ######################################
###########################################################################################
sys.stdout.write('\nProcessing results...')
sys.stdout.flush()
# calculate solution space
sys.stdout.write('\n calculating solution space')
sys.stdout.flush()
baseMatrix = nullSpace(rSystem, pivots)
#substitute solutions into infinitesimals
#(and remove the ones with common parameter factors)
sys.stdout.write('\n substituting solutions')
sys.stdout.flush()
infisAll = []
for l in range(baseMatrix.shape[1]):
infisTmp = [0]*n
for i in range(len(allVariables)):
infisTmp[i] = infis[i].getCopy()
infisTmp[i].rs = baseMatrix[:,l]
infisTmp[i] = infisTmp[i].as_expr()
if allTrafos:
infisAll.append(infisTmp)
else:
if not checkForCommonFactor(infisTmp, allVariables, m):
infisAll.append(infisTmp)
print ''
sys.stdout.write('done\n')
sys.stdout.flush()
# print transformations
print '\n\n' + str(len(infisAll)) + ' transformation(s) found:'
if len(infisAll) != 0: printTransformations(infisAll, allVariables)
###########################################################################################
############################ check predictions #####################################
###########################################################################################
if predictions != False:
checkPredictions(predictions, predFunctions, infisAll, allVariables)
print time.strftime('\nTotal time: %Hh:%Mm:%Ss', time.gmtime(time.time()-t0))
def main():
# check if run with arguments (i.e. from terminal)
try:
sys.argv[0]
except:
return
parser = argparse.ArgumentParser(usage='%(prog)s model_path observation_path [prediction_path] [options]', description='Detect symmetries in systems of ODEs.')
parser.add_argument('model_path', help = 'model csv-file with path')
parser.add_argument('observation_path', help = 'observation txt-file with path')
parser.add_argument('prediction_path', nargs='?', default=False,
help = 'prediction txt-file with path (optional)')
parser.add_argument('-I','--initial', nargs = 1, default=[False],
help = 'initial values txt-file with path')
parser.add_argument('-d','--delim', nargs = 1, default = [','],
help = 'delimiter used in the model csv (default = ,)')
parser.add_argument('-a','--ansatz', choices=['uni', 'par', 'multi'], default = 'uni',
help='ansatz made for infinitesimals (default = uni)')
parser.add_argument('-p','--pMax', nargs = 1, default = [2], type = int,
help = 'maximal power used in the infinitesimal generator (default = 2)')
parser.add_argument('-i','--input', nargs = '+', default = [],
help = 'input variables')
parser.add_argument('-f','--fixed', nargs = '+', default = [],
help = 'variables to consider fixed')
parser.add_argument('-P','--parallel', nargs = 1, default=[1],
help = 'maximal number of processes (default = 1)')
parser.add_argument('-A','--allTrafos', action='store_true', default=False,
help = 'do not remove transformations with common parameter factors')
args = parser.parse_args()
inputs = args.input
#read and print input and fixed variables
if len(inputs) != 0:
s = 'Input variables: '
for v in range(len(inputs)):
s = s + str(inputs[v]) + ', '
inputs[v] = giveVar(inputs[v])
sys.stdout.write(s[0:len(s)-2] + '\n')
sys.stdout.flush()
fixed = args.fixed
if len(fixed) != 0:
s = 'Fixed variables: '
for v in range(len(fixed)):
s = s + str(fixed[v]) + ', '
fixed[v] = giveVar(fixed[v])
sys.stdout.write(s[0:len(s)-2] + '\n')
sys.stdout.flush()
###########################################################################################
########################## read data from files ####################################
###########################################################################################
sys.stdout.write('\nReading files...')
sys.stdout.flush()
# read model
variables, parameters, flows, stoichiometry = readModel(args.model_path, args.delim[0])
# read observation
observables, obsFunctions, parameters = readObservation(args.observation_path, variables,
parameters)
# read initial values
if args.initial[0] != False:
initFunctions, parameters = readInitialValues(args.initial[0], variables, parameters)
else:
initFunctions = []
# read predictions
if args.prediction_path != False:
predictions, predFunctions = readPredictions(args.prediction_path, variables, parameters)
else:
predictions, predFunctions = False, False
# remove inputs from parameters
for par in inputs:
if par in parameters:
parameters.remove(par)
#define some stuff
diffEquations = stoichiometry * flows
allVariables = variables + inputs + parameters
sys.stdout.write('done\n')
sys.stdout.flush()
symmetryDetection(allVariables, diffEquations, observables, obsFunctions, initFunctions,
predictions, predFunctions, args.ansatz, args.pMax[0], args.input,
args.fixed, int(args.parallel[0]), args.allTrafos)
def symmetryDetectiondMod(model, observation, prediction, initial, ansatz, pMax, inputs, fixed,
parallel, allTrafos):
if model == None:
model = []
elif isinstance(model, basestring):
model = [model]
if observation == None:
observation = []
elif isinstance(observation, basestring):
observation = [observation]
if prediction == None:
prediction = []
elif isinstance(prediction, basestring):
prediction = [prediction]
if initial == None:
initial = []
elif isinstance(initial, basestring):
initial = [initial]
if fixed == None:
fixed = []
elif isinstance(fixed, basestring):
fixed = [str(fixed)]
if len(fixed) != 0:
s = 'Fixed variables: '
for v in range(len(fixed)):
s = s + str(fixed[v]) + ', '
fixed[v] = giveVar(fixed[v])
sys.stdout.write(s[0:len(s)-2] + '\n')
sys.stdout.flush()
if inputs == None:
inputs = []
elif isinstance(inputs, basestring):
inputs = [str(inputs)]
if len(inputs) != 0:
s = 'Input variables: '
for v in range(len(inputs)):
s = s + str(inputs[v]) + ', '
inputs[v] = giveVar(inputs[v])
sys.stdout.write(s[0:len(s)-2] + '\n')
sys.stdout.flush()
sys.stdout.write('\nReading input...')
sys.stdout.flush()
# read model
variables, diffEquations, parameters = readEquations(model)
# read observation
observables, obsFunctions, parameters = readObservation(observation, variables, parameters)
# read initial values
if len(initial) != 0:
initFunctions, parameters = readInitialValues(initial, variables, parameters)
else:
initFunctions = []
# read predictions
if len(prediction) != 0:
predictions, predFunctions = readPredictions(prediction, variables, parameters)
else:
predictions, predFunctions = False, False
# remove inputs from parameters
for par in inputs:
if par in parameters:
parameters.remove(par)
#define some stuff
allVariables = variables + inputs + parameters
sys.stdout.write('done\n')
sys.stdout.flush()
symmetryDetection(allVariables, diffEquations, observables, obsFunctions, initFunctions,
predictions, predFunctions, ansatz, pMax, inputs, fixed, parallel, allTrafos)
if __name__ == "__main__":
main()
| 12,218
| 32.476712
| 160
|
py
|
ARFlow
|
ARFlow-master/inference.py
|
import imageio
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
from easydict import EasyDict
from torchvision import transforms
from transforms import sep_transforms
from utils.flow_utils import flow_to_image, resize_flow
from utils.torch_utils import restore_model
from models.pwclite import PWCLite
class TestHelper():
def __init__(self, cfg):
self.cfg = EasyDict(cfg)
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
"cpu")
self.model = self.init_model()
self.input_transform = transforms.Compose([
sep_transforms.Zoom(*self.cfg.test_shape),
sep_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
])
def init_model(self):
model = PWCLite(self.cfg.model)
# print('Number fo parameters: {}'.format(model.num_parameters()))
model = model.to(self.device)
model = restore_model(model, self.cfg.pretrained_model)
model.eval()
return model
def run(self, imgs):
imgs = [self.input_transform(img).unsqueeze(0) for img in imgs]
img_pair = torch.cat(imgs, 1).to(self.device)
return self.model(img_pair)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', default='checkpoints/KITTI15/pwclite_ar.tar')
parser.add_argument('-s', '--test_shape', default=[384, 640], type=int, nargs=2)
parser.add_argument('-i', '--img_list', nargs='+',
default=['examples/img1.png', 'examples/img2.png'])
args = parser.parse_args()
cfg = {
'model': {
'upsample': True,
'n_frames': len(args.img_list),
'reduce_dense': True
},
'pretrained_model': args.model,
'test_shape': args.test_shape,
}
ts = TestHelper(cfg)
imgs = [imageio.imread(img).astype(np.float32) for img in args.img_list]
h, w = imgs[0].shape[:2]
flow_12 = ts.run(imgs)['flows_fw'][0]
flow_12 = resize_flow(flow_12, (h, w))
np_flow_12 = flow_12[0].detach().cpu().numpy().transpose([1, 2, 0])
vis_flow = flow_to_image(np_flow_12)
fig = plt.figure()
plt.imshow(vis_flow)
plt.show()
| 2,310
| 29.813333
| 90
|
py
|
ARFlow
|
ARFlow-master/logger.py
|
import logging
import logging.config
import logging.handlers
from path import Path
def init_logger(level='INFO', log_dir='./', log_name='main_logger', filename='main.log'):
logger = logging.getLogger(log_name)
fh = logging.handlers.RotatingFileHandler(
Path(log_dir) / filename, 'w', 20 * 1024 * 1024, 5)
formatter = logging.Formatter('%(asctime)s %(levelname)5s - %(name)s '
'[%(filename)s line %(lineno)d] - %(message)s',
datefmt='%m-%d %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
# logging to screen
fh = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s] %(message)s',)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.setLevel(level)
logger.info("Start training")
return logger
| 860
| 29.75
| 89
|
py
|
ARFlow
|
ARFlow-master/basic_train.py
|
import torch
from utils.torch_utils import init_seed
from datasets.get_dataset import get_dataset
from models.get_model import get_model
from losses.get_loss import get_loss
from trainer.get_trainer import get_trainer
def main(cfg, _log):
init_seed(cfg.seed)
_log.info("=> fetching img pairs.")
train_set, valid_set = get_dataset(cfg)
_log.info('{} samples found, {} train samples and {} test samples '.format(
len(valid_set) + len(train_set),
len(train_set),
len(valid_set)))
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=cfg.train.batch_size,
num_workers=cfg.train.workers, pin_memory=True, shuffle=True)
max_test_batch = 4
if type(valid_set) is torch.utils.data.ConcatDataset:
valid_loader = [torch.utils.data.DataLoader(
s, batch_size=min(max_test_batch, cfg.train.batch_size),
num_workers=min(4, cfg.train.workers),
pin_memory=True, shuffle=False) for s in valid_set.datasets]
valid_size = sum([len(l) for l in valid_loader])
else:
valid_loader = torch.utils.data.DataLoader(
valid_set, batch_size=min(max_test_batch, cfg.train.batch_size),
num_workers=min(4, cfg.train.workers),
pin_memory=True, shuffle=False)
valid_size = len(valid_loader)
if cfg.train.epoch_size == 0:
cfg.train.epoch_size = len(train_loader)
if cfg.train.valid_size == 0:
cfg.train.valid_size = valid_size
cfg.train.epoch_size = min(cfg.train.epoch_size, len(train_loader))
cfg.train.valid_size = min(cfg.train.valid_size, valid_size)
model = get_model(cfg.model)
loss = get_loss(cfg.loss)
trainer = get_trainer(cfg.trainer)(
train_loader, valid_loader, model, loss, _log, cfg.save_root, cfg.train)
trainer.train()
| 1,854
| 34.673077
| 80
|
py
|
ARFlow
|
ARFlow-master/train.py
|
import json
import pprint
import datetime
import argparse
from path import Path
from easydict import EasyDict
import basic_train
from logger import init_logger
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', default='configs/sintel_ft.json')
parser.add_argument('-e', '--evaluate', action='store_true')
parser.add_argument('-m', '--model', default=None)
parser.add_argument('--n_gpu', type=int, default=1)
args = parser.parse_args()
with open(args.config) as f:
cfg = EasyDict(json.load(f))
if args.evaluate:
cfg.train.update({
'epochs': 1,
'epoch_size': -1,
'valid_size': 0,
'workers': 1,
'val_epoch_size': 1,
})
if args.model is not None:
cfg.train.pretrained_model = args.model
cfg.train.n_gpu = args.n_gpu
# store files day by day
curr_time = datetime.datetime.now().strftime("%y%m%d%H%M%S")
cfg.save_root = Path('./outputs/checkpoints') / curr_time[:6] / curr_time[6:]
cfg.save_root.makedirs_p()
# init logger
_log = init_logger(log_dir=cfg.save_root, filename=curr_time[6:] + '.log')
_log.info('=> will save everything to {}'.format(cfg.save_root))
# show configurations
cfg_str = pprint.pformat(cfg)
_log.info('=> configurations \n ' + cfg_str)
basic_train.main(cfg, _log)
| 1,418
| 27.38
| 81
|
py
|
ARFlow
|
ARFlow-master/trainer/base_trainer.py
|
import torch
import numpy as np
from abc import abstractmethod
from tensorboardX import SummaryWriter
from utils.torch_utils import bias_parameters, weight_parameters, \
load_checkpoint, save_checkpoint, AdamW
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
self._log = _log
self.cfg = config
self.save_root = save_root
self.summary_writer = SummaryWriter(str(save_root))
self.train_loader, self.valid_loader = train_loader, valid_loader
self.device, self.device_ids = self._prepare_device(config['n_gpu'])
self.model = self._init_model(model)
self.optimizer = self._create_optimizer()
self.loss_func = loss_func
self.best_error = np.inf
self.i_epoch = 0
self.i_iter = 0
@abstractmethod
def _run_one_epoch(self):
...
@abstractmethod
def _validate_with_gt(self):
...
def train(self):
for epoch in range(self.cfg.epoch_num):
self._run_one_epoch()
if self.i_epoch % self.cfg.val_epoch_size == 0:
errors, error_names = self._validate_with_gt()
valid_res = ' '.join(
'{}: {:.2f}'.format(*t) for t in zip(error_names, errors))
self._log.info(' * Epoch {} '.format(self.i_epoch) + valid_res)
def _init_model(self, model):
model = model.to(self.device)
if self.cfg.pretrained_model:
self._log.info("=> using pre-trained weights {}.".format(
self.cfg.pretrained_model))
epoch, weights = load_checkpoint(self.cfg.pretrained_model)
from collections import OrderedDict
new_weights = OrderedDict()
model_keys = list(model.state_dict().keys())
weight_keys = list(weights.keys())
for a, b in zip(model_keys, weight_keys):
new_weights[a] = weights[b]
weights = new_weights
model.load_state_dict(weights)
else:
self._log.info("=> Train from scratch.")
model.init_weights()
model = torch.nn.DataParallel(model, device_ids=self.device_ids)
return model
def _create_optimizer(self):
self._log.info('=> setting Adam solver')
param_groups = [
{'params': bias_parameters(self.model.module),
'weight_decay': self.cfg.bias_decay},
{'params': weight_parameters(self.model.module),
'weight_decay': self.cfg.weight_decay}]
if self.cfg.optim == 'adamw':
optimizer = AdamW(param_groups, self.cfg.lr,
betas=(self.cfg.momentum, self.cfg.beta))
elif self.cfg.optim == 'adam':
optimizer = torch.optim.Adam(param_groups, self.cfg.lr,
betas=(self.cfg.momentum, self.cfg.beta),
eps=1e-7)
else:
raise NotImplementedError(self.cfg.optim)
return optimizer
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self._log.warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self._log.warning(
"Warning: The number of GPU\'s configured to use is {}, "
"but only {} are available.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def save_model(self, error, name):
is_best = error < self.best_error
if is_best:
self.best_error = error
models = {'epoch': self.i_epoch,
'state_dict': self.model.module.state_dict()}
save_checkpoint(self.save_root, models, name, is_best)
| 4,244
| 34.672269
| 83
|
py
|
ARFlow
|
ARFlow-master/trainer/kitti_trainer_ar.py
|
import time
import torch
import numpy as np
from copy import deepcopy
from .base_trainer import BaseTrainer
from utils.flow_utils import load_flow, evaluate_flow
from utils.misc_utils import AverageMeter
from transforms.ar_transforms.sp_transfroms import RandomAffineFlow
from transforms.ar_transforms.oc_transforms import run_slic_pt, random_crop
class TrainFramework(BaseTrainer):
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
super(TrainFramework, self).__init__(
train_loader, valid_loader, model, loss_func, _log, save_root, config)
self.sp_transform = RandomAffineFlow(
self.cfg.st_cfg, addnoise=self.cfg.st_cfg.add_noise).to(self.device)
def _run_one_epoch(self):
am_batch_time = AverageMeter()
am_data_time = AverageMeter()
key_meter_names = ['Loss', 'l_ph', 'l_sm', 'flow_mean', 'l_atst', 'l_ot']
key_meters = AverageMeter(i=len(key_meter_names), precision=4)
self.model.train()
end = time.time()
if 'stage1' in self.cfg:
if self.i_epoch == self.cfg.stage1.epoch:
self.loss_func.cfg.update(self.cfg.stage1.loss)
for i_step, data in enumerate(self.train_loader):
if i_step > self.cfg.epoch_size:
break
# read data to device
img1, img2 = data['img1'].to(self.device), data['img2'].to(self.device)
img_pair = torch.cat([img1, img2], 1)
# measure data loading time
am_data_time.update(time.time() - end)
# run 1st pass
res_dict = self.model(img_pair, with_bk=True)
flows_12, flows_21 = res_dict['flows_fw'], res_dict['flows_bw']
flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in
zip(flows_12, flows_21)]
loss, l_ph, l_sm, flow_mean = self.loss_func(flows, img_pair)
flow_ori = res_dict['flows_fw'][0].detach()
if self.cfg.run_atst:
img1, img2 = data['img1_ph'].to(self.device), data['img2_ph'].to(
self.device)
# construct augment sample
noc_ori = self.loss_func.pyramid_occu_mask1[0] # non-occluded region
s = {'imgs': [img1, img2], 'flows_f': [flow_ori], 'masks_f': [noc_ori]}
st_res = self.sp_transform(deepcopy(s)) if self.cfg.run_st else deepcopy(s)
flow_t, noc_t = st_res['flows_f'][0], st_res['masks_f'][0]
# run 2nd pass
img_pair = torch.cat(st_res['imgs'], 1)
flow_t_pred = self.model(img_pair, with_bk=False)['flows_fw'][0]
if not self.cfg.mask_st:
noc_t = torch.ones_like(noc_t)
l_atst = ((flow_t_pred - flow_t).abs() + self.cfg.ar_eps) ** self.cfg.ar_q
l_atst = (l_atst * noc_t).mean() / (noc_t.mean() + 1e-7)
loss += self.cfg.w_ar * l_atst
else:
l_atst = torch.zeros_like(loss)
if self.cfg.run_ot:
img1, img2 = data['img1_ph'].to(self.device), data['img2_ph'].to(
self.device)
# run 3rd pass
img_pair = torch.cat([img1, img2], 1)
# random crop images
img_pair, flow_t, occ_t = random_crop(img_pair, flow_ori, 1 - noc_ori,
self.cfg.ot_size)
# slic 200, random select 8~16
if self.cfg.ot_slic:
img2 = img_pair[:, 3:]
seg_mask = run_slic_pt(img2, n_seg=200,
compact=self.cfg.ot_compact, rd_select=[8, 16],
fast=self.cfg.ot_fast).type_as(img2) # Nx1xHxW
noise = torch.rand(img2.size()).type_as(img2)
img2 = img2 * (1 - seg_mask) + noise * seg_mask
img_pair[:, 3:] = img2
flow_t_pred = self.model(img_pair, with_bk=False)['flows_fw'][0]
noc_t = 1 - occ_t
l_ot = ((flow_t_pred - flow_t).abs() + self.cfg.ar_eps) ** self.cfg.ar_q
l_ot = (l_ot * noc_t).mean() / (noc_t.mean() + 1e-7)
loss += self.cfg.w_ar * l_ot
else:
l_ot = torch.zeros_like(loss)
# update meters
key_meters.update(
[loss.item(), l_ph.item(), l_sm.item(), flow_mean.item(),
l_atst.item(), l_ot.item()],
img_pair.size(0))
# compute gradient and do optimization step
self.optimizer.zero_grad()
# loss.backward()
scaled_loss = 1024. * loss
scaled_loss.backward()
for param in [p for p in self.model.parameters() if p.requires_grad]:
param.grad.data.mul_(1. / 1024)
self.optimizer.step()
# measure elapsed time
am_batch_time.update(time.time() - end)
end = time.time()
if self.i_iter % self.cfg.record_freq == 0:
for v, name in zip(key_meters.val, key_meter_names):
self.summary_writer.add_scalar('Train_' + name, v, self.i_iter)
if self.i_iter % self.cfg.print_freq == 0:
istr = '{}:{:04d}/{:04d}'.format(
self.i_epoch, i_step, self.cfg.epoch_size) + \
' Time {} Data {}'.format(am_batch_time, am_data_time) + \
' Info {}'.format(key_meters)
self._log.info(istr)
self.i_iter += 1
self.i_epoch += 1
@torch.no_grad()
def _validate_with_gt(self):
batch_time = AverageMeter()
if type(self.valid_loader) is not list:
self.valid_loader = [self.valid_loader]
# only use the first GPU to run validation, multiple GPUs might raise error.
# https://github.com/Eromera/erfnet_pytorch/issues/2#issuecomment-486142360
self.model = self.model.module
self.model.eval()
end = time.time()
all_error_names = []
all_error_avgs = []
n_step = 0
for i_set, loader in enumerate(self.valid_loader):
error_names = ['EPE', 'E_noc', 'E_occ', 'F1_all']
error_meters = AverageMeter(i=len(error_names))
for i_step, data in enumerate(loader):
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
res = list(map(load_flow, data['flow_occ']))
gt_flows, occ_masks = [r[0] for r in res], [r[1] for r in res]
res = list(map(load_flow, data['flow_noc']))
_, noc_masks = [r[0] for r in res], [r[1] for r in res]
gt_flows = [np.concatenate([flow, occ_mask, noc_mask], axis=2) for
flow, occ_mask, noc_mask in
zip(gt_flows, occ_masks, noc_masks)]
# compute output
flows = self.model(img_pair)['flows_fw']
pred_flows = flows[0].detach().cpu().numpy().transpose([0, 2, 3, 1])
es = evaluate_flow(gt_flows, pred_flows)
error_meters.update([l.item() for l in es], img_pair.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_step % self.cfg.print_freq == 0 or i_step == len(loader) - 1:
self._log.info('Test: {0}[{1}/{2}]\t Time {3}\t '.format(
i_set, i_step, self.cfg.valid_size, batch_time) + ' '.join(
map('{:.2f}'.format, error_meters.avg)))
if i_step > self.cfg.valid_size:
break
n_step += len(loader)
# write error to tf board.
for value, name in zip(error_meters.avg, error_names):
self.summary_writer.add_scalar(
'Valid_{}_{}'.format(name, i_set), value, self.i_epoch)
all_error_avgs.extend(error_meters.avg)
all_error_names.extend(['{}_{}'.format(name, i_set) for name in error_names])
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
# In order to reduce the space occupied during debugging,
# only the model with more than cfg.save_iter iterations will be saved.
if self.i_iter > self.cfg.save_iter:
self.save_model(all_error_avgs[0], name='KITTI_Flow')
return all_error_avgs, all_error_names
| 8,755
| 40.49763
| 91
|
py
|
ARFlow
|
ARFlow-master/trainer/sintel_trainer.py
|
import time
import torch
from .base_trainer import BaseTrainer
from utils.flow_utils import evaluate_flow
from utils.misc_utils import AverageMeter
class TrainFramework(BaseTrainer):
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
super(TrainFramework, self).__init__(
train_loader, valid_loader, model, loss_func, _log, save_root, config)
def _run_one_epoch(self):
am_batch_time = AverageMeter()
am_data_time = AverageMeter()
key_meter_names = ['Loss', 'l_ph', 'l_sm', 'flow_mean']
key_meters = AverageMeter(i=len(key_meter_names), precision=4)
self.model.train()
end = time.time()
if 'stage1' in self.cfg:
if self.i_epoch == self.cfg.stage1.epoch:
self.loss_func.cfg.update(self.cfg.stage1.loss)
for i_step, data in enumerate(self.train_loader):
if i_step > self.cfg.epoch_size:
break
# read data to device
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
# measure data loading time
am_data_time.update(time.time() - end)
# compute output
res_dict = self.model(img_pair, with_bk=True)
flows_12, flows_21 = res_dict['flows_fw'], res_dict['flows_bw']
flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in
zip(flows_12, flows_21)]
loss, l_ph, l_sm, flow_mean = self.loss_func(flows, img_pair)
# update meters
key_meters.update([loss.item(), l_ph.item(), l_sm.item(), flow_mean.item()],
img_pair.size(0))
# compute gradient and do optimization step
self.optimizer.zero_grad()
# loss.backward()
scaled_loss = 1024. * loss
scaled_loss.backward()
for param in [p for p in self.model.parameters() if p.requires_grad]:
param.grad.data.mul_(1. / 1024)
self.optimizer.step()
# measure elapsed time
am_batch_time.update(time.time() - end)
end = time.time()
if self.i_iter % self.cfg.record_freq == 0:
for v, name in zip(key_meters.val, key_meter_names):
self.summary_writer.add_scalar('Train_' + name, v, self.i_iter)
if self.i_iter % self.cfg.print_freq == 0:
istr = '{}:{:04d}/{:04d}'.format(
self.i_epoch, i_step, self.cfg.epoch_size) + \
' Time {} Data {}'.format(am_batch_time, am_data_time) + \
' Info {}'.format(key_meters)
self._log.info(istr)
self.i_iter += 1
self.i_epoch += 1
@torch.no_grad()
def _validate_with_gt(self):
batch_time = AverageMeter()
if type(self.valid_loader) is not list:
self.valid_loader = [self.valid_loader]
# only use the first GPU to run validation, multiple GPUs might raise error.
# https://github.com/Eromera/erfnet_pytorch/issues/2#issuecomment-486142360
self.model = self.model.module
self.model.eval()
end = time.time()
all_error_names = []
all_error_avgs = []
n_step = 0
for i_set, loader in enumerate(self.valid_loader):
error_names = ['EPE']
error_meters = AverageMeter(i=len(error_names))
for i_step, data in enumerate(loader):
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
gt_flows = data['target']['flow'].numpy().transpose([0, 2, 3, 1])
# compute output
flows = self.model(img_pair)['flows_fw']
pred_flows = flows[0].detach().cpu().numpy().transpose([0, 2, 3, 1])
es = evaluate_flow(gt_flows, pred_flows)
error_meters.update([l.item() for l in es], img_pair.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_step % self.cfg.print_freq == 0 or i_step == len(loader) - 1:
self._log.info('Test: {0}[{1}/{2}]\t Time {3}\t '.format(
i_set, i_step, self.cfg.valid_size, batch_time) + ' '.join(
map('{:.2f}'.format, error_meters.avg)))
if i_step > self.cfg.valid_size:
break
n_step += len(loader)
# write error to tf board.
for value, name in zip(error_meters.avg, error_names):
self.summary_writer.add_scalar(
'Valid_{}_{}'.format(name, i_set), value, self.i_epoch)
all_error_avgs.extend(error_meters.avg)
all_error_names.extend(['{}_{}'.format(name, i_set) for name in error_names])
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
# In order to reduce the space occupied during debugging,
# only the model with more than cfg.save_iter iterations will be saved.
if self.i_iter > self.cfg.save_iter:
self.save_model(all_error_avgs[0] + all_error_avgs[1], name='Sintel')
return all_error_avgs, all_error_names
| 5,445
| 37.9
| 89
|
py
|
ARFlow
|
ARFlow-master/trainer/kitti_trainer.py
|
import time
import torch
import numpy as np
from .base_trainer import BaseTrainer
from utils.flow_utils import load_flow, evaluate_flow
from utils.misc_utils import AverageMeter
class TrainFramework(BaseTrainer):
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
super(TrainFramework, self).__init__(
train_loader, valid_loader, model, loss_func, _log, save_root, config)
def _run_one_epoch(self):
am_batch_time = AverageMeter()
am_data_time = AverageMeter()
key_meter_names = ['Loss', 'l_ph', 'l_sm', 'flow_mean']
key_meters = AverageMeter(i=len(key_meter_names), precision=4)
self.model.train()
end = time.time()
if 'stage1' in self.cfg:
if self.i_epoch == self.cfg.stage1.epoch:
self.loss_func.cfg.update(self.cfg.stage1.loss)
for i_step, data in enumerate(self.train_loader):
if i_step > self.cfg.epoch_size:
break
# read data to device
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
# measure data loading time
am_data_time.update(time.time() - end)
# compute output
res_dict = self.model(img_pair, with_bk=True)
flows_12, flows_21 = res_dict['flows_fw'], res_dict['flows_bw']
flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in
zip(flows_12, flows_21)]
loss, l_ph, l_sm, flow_mean = self.loss_func(flows, img_pair)
# update meters
key_meters.update([loss.item(), l_ph.item(), l_sm.item(), flow_mean.item()],
img_pair.size(0))
# compute gradient and do optimization step
self.optimizer.zero_grad()
# loss.backward()
scaled_loss = 1024. * loss
scaled_loss.backward()
for param in [p for p in self.model.parameters() if p.requires_grad]:
param.grad.data.mul_(1. / 1024)
self.optimizer.step()
# measure elapsed time
am_batch_time.update(time.time() - end)
end = time.time()
if self.i_iter % self.cfg.record_freq == 0:
for v, name in zip(key_meters.val, key_meter_names):
self.summary_writer.add_scalar('Train_' + name, v, self.i_iter)
if self.i_iter % self.cfg.print_freq == 0:
istr = '{}:{:04d}/{:04d}'.format(
self.i_epoch, i_step, self.cfg.epoch_size) + \
' Time {} Data {}'.format(am_batch_time, am_data_time) + \
' Info {}'.format(key_meters)
self._log.info(istr)
self.i_iter += 1
self.i_epoch += 1
@torch.no_grad()
def _validate_with_gt(self):
batch_time = AverageMeter()
if type(self.valid_loader) is not list:
self.valid_loader = [self.valid_loader]
# only use the first GPU to run validation, multiple GPUs might raise error.
# https://github.com/Eromera/erfnet_pytorch/issues/2#issuecomment-486142360
self.model = self.model.module
self.model.eval()
end = time.time()
all_error_names = []
all_error_avgs = []
n_step = 0
for i_set, loader in enumerate(self.valid_loader):
error_names = ['EPE', 'E_noc', 'E_occ', 'F1_all']
error_meters = AverageMeter(i=len(error_names))
for i_step, data in enumerate(loader):
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
res = list(map(load_flow, data['flow_occ']))
gt_flows, occ_masks = [r[0] for r in res], [r[1] for r in res]
res = list(map(load_flow, data['flow_noc']))
_, noc_masks = [r[0] for r in res], [r[1] for r in res]
gt_flows = [np.concatenate([flow, occ_mask, noc_mask], axis=2) for
flow, occ_mask, noc_mask in
zip(gt_flows, occ_masks, noc_masks)]
# compute output
flows = self.model(img_pair)['flows_fw']
pred_flows = flows[0].detach().cpu().numpy().transpose([0, 2, 3, 1])
es = evaluate_flow(gt_flows, pred_flows)
error_meters.update([l.item() for l in es], img_pair.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_step % self.cfg.print_freq == 0 or i_step == len(loader) - 1:
self._log.info('Test: {0}[{1}/{2}]\t Time {3}\t '.format(
i_set, i_step, self.cfg.valid_size, batch_time) + ' '.join(
map('{:.2f}'.format, error_meters.avg)))
if i_step > self.cfg.valid_size:
break
n_step += len(loader)
# write error to tf board.
for value, name in zip(error_meters.avg, error_names):
self.summary_writer.add_scalar(
'Valid_{}_{}'.format(name, i_set), value, self.i_epoch)
all_error_avgs.extend(error_meters.avg)
all_error_names.extend(['{}_{}'.format(name, i_set) for name in error_names])
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
# In order to reduce the space occupied during debugging,
# only the model with more than cfg.save_iter iterations will be saved.
if self.i_iter > self.cfg.save_iter:
self.save_model(all_error_avgs[0], name='KITTI_Flow')
return all_error_avgs, all_error_names
| 5,884
| 38.496644
| 89
|
py
|
ARFlow
|
ARFlow-master/trainer/sintel_trainer_ar.py
|
import time
import torch
from copy import deepcopy
from .base_trainer import BaseTrainer
from utils.flow_utils import evaluate_flow
from utils.misc_utils import AverageMeter
from transforms.ar_transforms.sp_transfroms import RandomAffineFlow
from transforms.ar_transforms.oc_transforms import run_slic_pt, random_crop
class TrainFramework(BaseTrainer):
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
super(TrainFramework, self).__init__(
train_loader, valid_loader, model, loss_func, _log, save_root, config)
self.sp_transform = RandomAffineFlow(
self.cfg.st_cfg, addnoise=self.cfg.st_cfg.add_noise).to(self.device)
def _run_one_epoch(self):
am_batch_time = AverageMeter()
am_data_time = AverageMeter()
key_meter_names = ['Loss', 'l_ph', 'l_sm', 'flow_mean', 'l_atst', 'l_ot']
key_meters = AverageMeter(i=len(key_meter_names), precision=4)
self.model.train()
end = time.time()
if 'stage1' in self.cfg:
if self.i_epoch == self.cfg.stage1.epoch:
self.loss_func.cfg.update(self.cfg.stage1.loss)
for i_step, data in enumerate(self.train_loader):
if i_step > self.cfg.epoch_size:
break
# read data to device
img1, img2 = data['img1'].to(self.device), data['img2'].to(self.device)
img_pair = torch.cat([img1, img2], 1)
# measure data loading time
am_data_time.update(time.time() - end)
# run 1st pass
res_dict = self.model(img_pair, with_bk=True)
flows_12, flows_21 = res_dict['flows_fw'], res_dict['flows_bw']
flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in
zip(flows_12, flows_21)]
loss, l_ph, l_sm, flow_mean = self.loss_func(flows, img_pair)
flow_ori = res_dict['flows_fw'][0].detach()
if self.cfg.run_atst:
img1, img2 = data['img1_ph'].to(self.device), data['img2_ph'].to(
self.device)
# construct augment sample
noc_ori = self.loss_func.pyramid_occu_mask1[0] # non-occluded region
s = {'imgs': [img1, img2], 'flows_f': [flow_ori], 'masks_f': [noc_ori]}
st_res = self.sp_transform(deepcopy(s)) if self.cfg.run_st else deepcopy(s)
flow_t, noc_t = st_res['flows_f'][0], st_res['masks_f'][0]
# run 2nd pass
img_pair = torch.cat(st_res['imgs'], 1)
flow_t_pred = self.model(img_pair, with_bk=False)['flows_fw'][0]
if not self.cfg.mask_st:
noc_t = torch.ones_like(noc_t)
l_atst = ((flow_t_pred - flow_t).abs() + self.cfg.ar_eps) ** self.cfg.ar_q
l_atst = (l_atst * noc_t).mean() / (noc_t.mean() + 1e-7)
loss += self.cfg.w_ar * l_atst
else:
l_atst = torch.zeros_like(loss)
if self.cfg.run_ot:
img1, img2 = data['img1_ph'].to(self.device), data['img2_ph'].to(
self.device)
# run 3rd pass
img_pair = torch.cat([img1, img2], 1)
# random crop images
img_pair, flow_t, occ_t = random_crop(img_pair, flow_ori, 1 - noc_ori,
self.cfg.ot_size)
# slic 200, random select 8~16
if self.cfg.ot_slic:
img2 = img_pair[:, 3:]
seg_mask = run_slic_pt(img2, n_seg=200,
compact=self.cfg.ot_compact, rd_select=[8, 16],
fast=self.cfg.ot_fast).type_as(img2) # Nx1xHxW
noise = torch.rand(img2.size()).type_as(img2)
img2 = img2 * (1 - seg_mask) + noise * seg_mask
img_pair[:, 3:] = img2
flow_t_pred = self.model(img_pair, with_bk=False)['flows_fw'][0]
noc_t = 1 - occ_t
l_ot = ((flow_t_pred - flow_t).abs() + self.cfg.ar_eps) ** self.cfg.ar_q
l_ot = (l_ot * noc_t).mean() / (noc_t.mean() + 1e-7)
loss += self.cfg.w_ar * l_ot
else:
l_ot = torch.zeros_like(loss)
# update meters
key_meters.update(
[loss.item(), l_ph.item(), l_sm.item(), flow_mean.item(),
l_atst.item(), l_ot.item()],
img_pair.size(0))
# compute gradient and do optimization step
self.optimizer.zero_grad()
# loss.backward()
scaled_loss = 1024. * loss
scaled_loss.backward()
for param in [p for p in self.model.parameters() if p.requires_grad]:
param.grad.data.mul_(1. / 1024)
self.optimizer.step()
# measure elapsed time
am_batch_time.update(time.time() - end)
end = time.time()
if self.i_iter % self.cfg.record_freq == 0:
for v, name in zip(key_meters.val, key_meter_names):
self.summary_writer.add_scalar('Train_' + name, v, self.i_iter)
if self.i_iter % self.cfg.print_freq == 0:
istr = '{}:{:04d}/{:04d}'.format(
self.i_epoch, i_step, self.cfg.epoch_size) + \
' Time {} Data {}'.format(am_batch_time, am_data_time) + \
' Info {}'.format(key_meters)
self._log.info(istr)
self.i_iter += 1
self.i_epoch += 1
@torch.no_grad()
def _validate_with_gt(self):
batch_time = AverageMeter()
if type(self.valid_loader) is not list:
self.valid_loader = [self.valid_loader]
# only use the first GPU to run validation, multiple GPUs might raise error.
# https://github.com/Eromera/erfnet_pytorch/issues/2#issuecomment-486142360
self.model = self.model.module
self.model.eval()
end = time.time()
all_error_names = []
all_error_avgs = []
n_step = 0
for i_set, loader in enumerate(self.valid_loader):
error_names = ['EPE']
error_meters = AverageMeter(i=len(error_names))
for i_step, data in enumerate(loader):
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
gt_flows = data['target']['flow'].numpy().transpose([0, 2, 3, 1])
# compute output
flows = self.model(img_pair)['flows_fw']
pred_flows = flows[0].detach().cpu().numpy().transpose([0, 2, 3, 1])
es = evaluate_flow(gt_flows, pred_flows)
error_meters.update([l.item() for l in es], img_pair.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_step % self.cfg.print_freq == 0 or i_step == len(loader) - 1:
self._log.info('Test: {0}[{1}/{2}]\t Time {3}\t '.format(
i_set, i_step, self.cfg.valid_size, batch_time) + ' '.join(
map('{:.2f}'.format, error_meters.avg)))
if i_step > self.cfg.valid_size:
break
n_step += len(loader)
# write error to tf board.
for value, name in zip(error_meters.avg, error_names):
self.summary_writer.add_scalar(
'Valid_{}_{}'.format(name, i_set), value, self.i_epoch)
all_error_avgs.extend(error_meters.avg)
all_error_names.extend(['{}_{}'.format(name, i_set) for name in error_names])
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
# In order to reduce the space occupied during debugging,
# only the model with more than cfg.save_iter iterations will be saved.
if self.i_iter > self.cfg.save_iter:
self.save_model(all_error_avgs[0] + all_error_avgs[1], name='Sintel')
return all_error_avgs, all_error_names
| 8,316
| 40.173267
| 91
|
py
|
ARFlow
|
ARFlow-master/trainer/get_trainer.py
|
from . import sintel_trainer, sintel_trainer_ar
from . import kitti_trainer, kitti_trainer_ar
def get_trainer(name):
if name == 'Sintel':
TrainFramework = sintel_trainer.TrainFramework
elif name == 'Sintel_AR':
TrainFramework = sintel_trainer_ar.TrainFramework
elif name == 'KITTI':
TrainFramework = kitti_trainer.TrainFramework
elif name == 'KITTI_AR':
TrainFramework = kitti_trainer_ar.TrainFramework
else:
raise NotImplementedError(name)
return TrainFramework
| 530
| 28.5
| 57
|
py
|
ARFlow
|
ARFlow-master/models/pwclite.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.warp_utils import flow_warp
from .correlation_package.correlation import Correlation
# from .correlation_native import Correlation
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True):
if isReLU:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, bias=True),
nn.LeakyReLU(0.1, inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, bias=True)
)
class FeatureExtractor(nn.Module):
def __init__(self, num_chs):
super(FeatureExtractor, self).__init__()
self.num_chs = num_chs
self.convs = nn.ModuleList()
for l, (ch_in, ch_out) in enumerate(zip(num_chs[:-1], num_chs[1:])):
layer = nn.Sequential(
conv(ch_in, ch_out, stride=2),
conv(ch_out, ch_out)
)
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class FlowEstimatorDense(nn.Module):
def __init__(self, ch_in):
super(FlowEstimatorDense, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(ch_in + 128, 128)
self.conv3 = conv(ch_in + 256, 96)
self.conv4 = conv(ch_in + 352, 64)
self.conv5 = conv(ch_in + 416, 32)
self.feat_dim = ch_in + 448
self.conv_last = conv(ch_in + 448, 2, isReLU=False)
def forward(self, x):
x1 = torch.cat([self.conv1(x), x], dim=1)
x2 = torch.cat([self.conv2(x1), x1], dim=1)
x3 = torch.cat([self.conv3(x2), x2], dim=1)
x4 = torch.cat([self.conv4(x3), x3], dim=1)
x5 = torch.cat([self.conv5(x4), x4], dim=1)
x_out = self.conv_last(x5)
return x5, x_out
class FlowEstimatorReduce(nn.Module):
# can reduce 25% of training time.
def __init__(self, ch_in):
super(FlowEstimatorReduce, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(128 + 96, 64)
self.conv5 = conv(96 + 64, 32)
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(torch.cat([x1, x2], dim=1))
x4 = self.conv4(torch.cat([x2, x3], dim=1))
x5 = self.conv5(torch.cat([x3, x4], dim=1))
flow = self.predict_flow(torch.cat([x4, x5], dim=1))
return x5, flow
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 2),
conv(128, 128, 3, 1, 4),
conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16),
conv(64, 32, 3, 1, 1),
conv(32, 2, isReLU=False)
)
def forward(self, x):
return self.convs(x)
class PWCLite(nn.Module):
def __init__(self, cfg):
super(PWCLite, self).__init__()
self.search_range = 4
self.num_chs = [3, 16, 32, 64, 96, 128, 192]
self.output_level = 4
self.num_levels = 7
self.leakyRELU = nn.LeakyReLU(0.1, inplace=True)
self.feature_pyramid_extractor = FeatureExtractor(self.num_chs)
self.upsample = cfg.upsample
self.n_frames = cfg.n_frames
self.reduce_dense = cfg.reduce_dense
self.corr = Correlation(pad_size=self.search_range, kernel_size=1,
max_displacement=self.search_range, stride1=1,
stride2=1, corr_multiply=1)
self.dim_corr = (self.search_range * 2 + 1) ** 2
self.num_ch_in = 32 + (self.dim_corr + 2) * (self.n_frames - 1)
if self.reduce_dense:
self.flow_estimators = FlowEstimatorReduce(self.num_ch_in)
else:
self.flow_estimators = FlowEstimatorDense(self.num_ch_in)
self.context_networks = ContextNetwork(
(self.flow_estimators.feat_dim + 2) * (self.n_frames - 1))
self.conv_1x1 = nn.ModuleList([conv(192, 32, kernel_size=1, stride=1, dilation=1),
conv(128, 32, kernel_size=1, stride=1, dilation=1),
conv(96, 32, kernel_size=1, stride=1, dilation=1),
conv(64, 32, kernel_size=1, stride=1, dilation=1),
conv(32, 32, kernel_size=1, stride=1, dilation=1)])
def num_parameters(self):
return sum(
[p.data.nelement() if p.requires_grad else 0 for p in self.parameters()])
def init_weights(self):
for layer in self.named_modules():
if isinstance(layer, nn.Conv2d):
nn.init.kaiming_normal_(layer.weight)
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
elif isinstance(layer, nn.ConvTranspose2d):
nn.init.kaiming_normal_(layer.weight)
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
def forward_2_frames(self, x1_pyramid, x2_pyramid):
# outputs
flows = []
# init
b_size, _, h_x1, w_x1, = x1_pyramid[0].size()
init_dtype = x1_pyramid[0].dtype
init_device = x1_pyramid[0].device
flow = torch.zeros(b_size, 2, h_x1, w_x1, dtype=init_dtype,
device=init_device).float()
for l, (x1, x2) in enumerate(zip(x1_pyramid, x2_pyramid)):
# warping
if l == 0:
x2_warp = x2
else:
flow = F.interpolate(flow * 2, scale_factor=2,
mode='bilinear', align_corners=True)
x2_warp = flow_warp(x2, flow)
# correlation
out_corr = self.corr(x1, x2_warp)
out_corr_relu = self.leakyRELU(out_corr)
# concat and estimate flow
x1_1by1 = self.conv_1x1[l](x1)
x_intm, flow_res = self.flow_estimators(
torch.cat([out_corr_relu, x1_1by1, flow], dim=1))
flow = flow + flow_res
flow_fine = self.context_networks(torch.cat([x_intm, flow], dim=1))
flow = flow + flow_fine
flows.append(flow)
# upsampling or post-processing
if l == self.output_level:
break
if self.upsample:
flows = [F.interpolate(flow * 4, scale_factor=4,
mode='bilinear', align_corners=True) for flow in flows]
return flows[::-1]
def forward_3_frames(self, x0_pyramid, x1_pyramid, x2_pyramid):
# outputs
flows = []
# init
b_size, _, h_x1, w_x1, = x1_pyramid[0].size()
init_dtype = x1_pyramid[0].dtype
init_device = x1_pyramid[0].device
flow = torch.zeros(b_size, 4, h_x1, w_x1, dtype=init_dtype,
device=init_device).float()
for l, (x0, x1, x2) in enumerate(zip(x0_pyramid, x1_pyramid, x2_pyramid)):
# warping
if l == 0:
x0_warp = x0
x2_warp = x2
else:
flow = F.interpolate(flow * 2, scale_factor=2,
mode='bilinear', align_corners=True)
x0_warp = flow_warp(x0, flow[:, :2])
x2_warp = flow_warp(x2, flow[:, 2:])
# correlation
corr_10, corr_12 = self.corr(x1, x0_warp), self.corr(x1, x2_warp)
corr_relu_10, corr_relu_12 = self.leakyRELU(corr_10), self.leakyRELU(corr_12)
# concat and estimate flow
x1_1by1 = self.conv_1x1[l](x1)
feat_10 = [x1_1by1, corr_relu_10, corr_relu_12, flow[:, :2], -flow[:, 2:]]
feat_12 = [x1_1by1, corr_relu_12, corr_relu_10, flow[:, 2:], -flow[:, :2]]
x_intm_10, flow_res_10 = self.flow_estimators(torch.cat(feat_10, dim=1))
x_intm_12, flow_res_12 = self.flow_estimators(torch.cat(feat_12, dim=1))
flow_res = torch.cat([flow_res_10, flow_res_12], dim=1)
flow = flow + flow_res
feat_10 = [x_intm_10, x_intm_12, flow[:, :2], -flow[:, 2:]]
feat_12 = [x_intm_12, x_intm_10, flow[:, 2:], -flow[:, :2]]
flow_res_10 = self.context_networks(torch.cat(feat_10, dim=1))
flow_res_12 = self.context_networks(torch.cat(feat_12, dim=1))
flow_res = torch.cat([flow_res_10, flow_res_12], dim=1)
flow = flow + flow_res
flows.append(flow)
if l == self.output_level:
break
if self.upsample:
flows = [F.interpolate(flow * 4, scale_factor=4,
mode='bilinear', align_corners=True) for flow in flows]
flows_10 = [flo[:, :2] for flo in flows[::-1]]
flows_12 = [flo[:, 2:] for flo in flows[::-1]]
return flows_10, flows_12
def forward(self, x, with_bk=False):
n_frames = x.size(1) / 3
imgs = [x[:, 3 * i: 3 * i + 3] for i in range(int(n_frames))]
x = [self.feature_pyramid_extractor(img) + [img] for img in imgs]
res_dict = {}
if n_frames == 2:
res_dict['flows_fw'] = self.forward_2_frames(x[0], x[1])
if with_bk:
res_dict['flows_bw'] = self.forward_2_frames(x[1], x[0])
elif n_frames == 3:
flows_10, flows_12 = self.forward_3_frames(x[0], x[1], x[2])
res_dict['flows_fw'], res_dict['flows_bw'] = flows_12, flows_10
elif n_frames == 5:
flows_10, flows_12 = self.forward_3_frames(x[0], x[1], x[2])
flows_21, flows_23 = self.forward_3_frames(x[1], x[2], x[3])
res_dict['flows_fw'] = [flows_12, flows_23]
if with_bk:
flows_32, flows_34 = self.forward_3_frames(x[2], x[3], x[4])
res_dict['flows_bw'] = [flows_21, flows_32]
else:
raise NotImplementedError
return res_dict
| 10,680
| 36.742049
| 90
|
py
|
ARFlow
|
ARFlow-master/models/correlation_native.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Correlation(nn.Module):
def __init__(self, max_displacement=4, *args, **kwargs):
super(Correlation, self).__init__()
self.max_displacement = max_displacement
self.output_dim = 2 * self.max_displacement + 1
self.pad_size = self.max_displacement
def forward(self, x1, x2):
B, C, H, W = x1.size()
x2 = F.pad(x2, [self.pad_size] * 4)
cv = []
for i in range(self.output_dim):
for j in range(self.output_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = torch.mean(cost, 1, keepdim=True)
cv.append(cost)
return torch.cat(cv, 1)
if __name__ == '__main__':
import time
import random
from correlation_package.correlation import Correlation as Correlation_cuda
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
corr1 = Correlation(max_displacement=4, kernel_size=1, stride1=1,
stride2=1, corr_multiply=1).to(device)
corr2 = Correlation_cuda(pad_size=4, kernel_size=1, max_displacement=4, stride1=1,
stride2=1, corr_multiply=1)
t1_sum = 0
t2_sum = 0
for i in range(50):
C = random.choice([128, 256])
H = random.choice([128, 256]) # , 512
W = random.choice([64, 128]) # , 256
x1 = torch.randn(4, C, H, W, requires_grad=True).to(device)
x2 = torch.randn(4, C, H, W).to(device)
end = time.time()
y2 = corr2(x1, x2)
t2_f = time.time() - end
end = time.time()
y2.sum().backward()
t2_b = time.time() - end
end = time.time()
y1 = corr1(x1, x2)
t1_f = time.time() - end
end = time.time()
y1.sum().backward()
t1_b = time.time() - end
assert torch.allclose(y1, y2, atol=1e-7)
print('Forward: cuda: {:.3f}ms, pytorch: {:.3f}ms'.format(t1_f * 100, t2_f * 100))
print(
'Backward: cuda: {:.3f}ms, pytorch: {:.3f}ms'.format(t1_b * 100, t2_b * 100))
if i < 3:
continue
t1_sum += t1_b + t1_f
t2_sum += t2_b + t2_f
print('cuda: {:.3f}s, pytorch: {:.3f}s'.format(t1_sum, t2_sum))
...
| 2,336
| 28.961538
| 90
|
py
|
ARFlow
|
ARFlow-master/models/get_model.py
|
from .pwclite import PWCLite
def get_model(cfg):
if cfg.type == 'pwclite':
model = PWCLite(cfg)
else:
raise NotImplementedError(cfg.type)
return model
| 180
| 19.111111
| 43
|
py
|
ARFlow
|
ARFlow-master/models/correlation_package/correlation.py
|
import torch
from torch.nn.modules.module import Module
from torch.autograd import Function
import correlation_cuda
class CorrelationFunction(Function):
def __init__(self, pad_size=3, kernel_size=3, max_displacement=20, stride1=1, stride2=2, corr_multiply=1):
super(CorrelationFunction, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
# self.out_channel = ((max_displacement/stride2)*2 + 1) * ((max_displacement/stride2)*2 + 1)
def forward(self, input1, input2):
self.save_for_backward(input1, input2)
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
output = input1.new()
correlation_cuda.forward(input1, input2, rbot1, rbot2, output,
self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)
return output
def backward(self, grad_output):
input1, input2 = self.saved_tensors
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
grad_input1 = input1.new()
grad_input2 = input2.new()
correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2,
self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)
return grad_input1, grad_input2
class Correlation(Module):
def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, stride2=2, corr_multiply=1):
super(Correlation, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
def forward(self, input1, input2):
result = CorrelationFunction(self.pad_size, self.kernel_size, self.max_displacement, self.stride1, self.stride2, self.corr_multiply)(input1, input2)
return result
| 2,265
| 34.968254
| 156
|
py
|
ARFlow
|
ARFlow-master/models/correlation_package/setup.py
|
#!/usr/bin/env python3
import os
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
cxx_args = ['-std=c++11']
nvcc_args = [
'-gencode', 'arch=compute_50,code=sm_50',
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',
'-gencode', 'arch=compute_61,code=compute_61',
'-ccbin', '/usr/bin/gcc'
]
setup(
name='correlation_cuda',
ext_modules=[
CUDAExtension('correlation_cuda', [
'correlation_cuda.cc',
'correlation_cuda_kernel.cu'
], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args, 'cuda-path': ['/usr/local/cuda-9.0']})
],
cmdclass={
'build_ext': BuildExtension
})
| 813
| 26.133333
| 105
|
py
|
ARFlow
|
ARFlow-master/models/correlation_package/__init__.py
| 0
| 0
| 0
|
py
|
|
ARFlow
|
ARFlow-master/datasets/get_dataset.py
|
import copy
from torchvision import transforms
from torch.utils.data import ConcatDataset
from transforms.co_transforms import get_co_transforms
from transforms.ar_transforms.ap_transforms import get_ap_transforms
from transforms import sep_transforms
from datasets.flow_datasets import SintelRaw, Sintel
from datasets.flow_datasets import KITTIRawFile, KITTIFlow, KITTIFlowMV
def get_dataset(all_cfg):
cfg = all_cfg.data
input_transform = transforms.Compose([
sep_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
])
co_transform = get_co_transforms(aug_args=all_cfg.data_aug)
if cfg.type == 'Sintel_Flow':
ap_transform = get_ap_transforms(cfg.at_cfg) if cfg.run_at else None
train_set_1 = Sintel(cfg.root_sintel, n_frames=cfg.train_n_frames, type='clean',
split='training', subsplit=cfg.train_subsplit,
with_flow=False,
ap_transform=ap_transform,
transform=input_transform,
co_transform=co_transform
)
train_set_2 = Sintel(cfg.root_sintel, n_frames=cfg.train_n_frames, type='final',
split='training', subsplit=cfg.train_subsplit,
with_flow=False,
ap_transform=ap_transform,
transform=input_transform,
co_transform=co_transform
)
train_set = ConcatDataset([train_set_1, train_set_2])
valid_input_transform = copy.deepcopy(input_transform)
valid_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.test_shape))
valid_set_1 = Sintel(cfg.root_sintel, n_frames=cfg.val_n_frames, type='clean',
split='training', subsplit=cfg.val_subsplit,
transform=valid_input_transform,
target_transform={'flow': sep_transforms.ArrayToTensor()}
)
valid_set_2 = Sintel(cfg.root_sintel, n_frames=cfg.val_n_frames, type='final',
split='training', subsplit=cfg.val_subsplit,
transform=valid_input_transform,
target_transform={'flow': sep_transforms.ArrayToTensor()}
)
valid_set = ConcatDataset([valid_set_1, valid_set_2])
elif cfg.type == 'Sintel_Raw':
train_set = SintelRaw(cfg.root_sintel_raw, n_frames=cfg.train_n_frames,
transform=input_transform, co_transform=co_transform)
valid_input_transform = copy.deepcopy(input_transform)
valid_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.test_shape))
valid_set_1 = Sintel(cfg.root_sintel, n_frames=cfg.val_n_frames, type='clean',
split='training', subsplit=cfg.val_subsplit,
transform=valid_input_transform,
target_transform={'flow': sep_transforms.ArrayToTensor()}
)
valid_set_2 = Sintel(cfg.root_sintel, n_frames=cfg.val_n_frames, type='final',
split='training', subsplit=cfg.val_subsplit,
transform=valid_input_transform,
target_transform={'flow': sep_transforms.ArrayToTensor()}
)
valid_set = ConcatDataset([valid_set_1, valid_set_2])
elif cfg.type == 'KITTI_Raw':
train_input_transform = copy.deepcopy(input_transform)
train_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.train_shape))
ap_transform = get_ap_transforms(cfg.at_cfg) if cfg.run_at else None
train_set = KITTIRawFile(
cfg.root,
cfg.train_file,
cfg.train_n_frames,
transform=train_input_transform,
ap_transform=ap_transform,
co_transform=co_transform # no target here
)
valid_input_transform = copy.deepcopy(input_transform)
valid_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.test_shape))
valid_set_1 = KITTIFlow(cfg.root_kitti15, n_frames=cfg.val_n_frames,
transform=valid_input_transform,
)
valid_set_2 = KITTIFlow(cfg.root_kitti12, n_frames=cfg.val_n_frames,
transform=valid_input_transform,
)
valid_set = ConcatDataset([valid_set_1, valid_set_2])
elif cfg.type == 'KITTI_MV':
train_input_transform = copy.deepcopy(input_transform)
train_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.train_shape))
root_flow = cfg.root_kitti15 if cfg.train_15 else cfg.root_kitti12
ap_transform = get_ap_transforms(cfg.at_cfg) if cfg.run_at else None
train_set = KITTIFlowMV(
root_flow,
cfg.train_n_frames,
transform=train_input_transform,
ap_transform=ap_transform,
co_transform=co_transform # no target here
)
valid_input_transform = copy.deepcopy(input_transform)
valid_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.test_shape))
valid_set_1 = KITTIFlow(cfg.root_kitti15, n_frames=cfg.val_n_frames,
transform=valid_input_transform,
)
valid_set_2 = KITTIFlow(cfg.root_kitti12, n_frames=cfg.val_n_frames,
transform=valid_input_transform,
)
valid_set = ConcatDataset([valid_set_1, valid_set_2])
else:
raise NotImplementedError(cfg.type)
return train_set, valid_set
| 5,969
| 47.536585
| 89
|
py
|
ARFlow
|
ARFlow-master/datasets/flow_datasets.py
|
import imageio
import numpy as np
import random
from path import Path
from abc import abstractmethod, ABCMeta
from torch.utils.data import Dataset
from utils.flow_utils import load_flow
class ImgSeqDataset(Dataset, metaclass=ABCMeta):
def __init__(self, root, n_frames, input_transform=None, co_transform=None,
target_transform=None, ap_transform=None):
self.root = Path(root)
self.n_frames = n_frames
self.input_transform = input_transform
self.co_transform = co_transform
self.ap_transform = ap_transform
self.target_transform = target_transform
self.samples = self.collect_samples()
@abstractmethod
def collect_samples(self):
pass
def _load_sample(self, s):
images = s['imgs']
images = [imageio.imread(self.root / p).astype(np.float32) for p in images]
target = {}
if 'flow' in s:
target['flow'] = load_flow(self.root / s['flow'])
if 'mask' in s:
# 0~255 HxWx1
mask = imageio.imread(self.root / s['mask']).astype(np.float32) / 255.
if len(mask.shape) == 3:
mask = mask[:, :, 0]
target['mask'] = np.expand_dims(mask, -1)
return images, target
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
images, target = self._load_sample(self.samples[idx])
if self.co_transform is not None:
# In unsupervised learning, there is no need to change target with image
images, _ = self.co_transform(images, {})
if self.input_transform is not None:
images = [self.input_transform(i) for i in images]
data = {'img{}'.format(i + 1): p for i, p in enumerate(images)}
if self.ap_transform is not None:
imgs_ph = self.ap_transform(
[data['img{}'.format(i + 1)].clone() for i in range(self.n_frames)])
for i in range(self.n_frames):
data['img{}_ph'.format(i + 1)] = imgs_ph[i]
if self.target_transform is not None:
for key in self.target_transform.keys():
target[key] = self.target_transform[key](target[key])
data['target'] = target
return data
class SintelRaw(ImgSeqDataset):
def __init__(self, root, n_frames=2, transform=None, co_transform=None):
super(SintelRaw, self).__init__(root, n_frames, input_transform=transform,
co_transform=co_transform)
def collect_samples(self):
scene_list = self.root.dirs()
samples = []
for scene in scene_list:
img_list = scene.files('*.png')
img_list.sort()
for st in range(0, len(img_list) - self.n_frames + 1):
seq = img_list[st:st + self.n_frames]
sample = {'imgs': [self.root.relpathto(file) for file in seq]}
samples.append(sample)
return samples
class Sintel(ImgSeqDataset):
def __init__(self, root, n_frames=2, type='clean', split='training',
subsplit='trainval', with_flow=True, ap_transform=None,
transform=None, target_transform=None, co_transform=None, ):
self.dataset_type = type
self.with_flow = with_flow
self.split = split
self.subsplit = subsplit
self.training_scene = ['alley_1', 'ambush_4', 'ambush_6', 'ambush_7', 'bamboo_2',
'bandage_2', 'cave_2', 'market_2', 'market_5', 'shaman_2',
'sleeping_2', 'temple_3'] # Unofficial train-val split
root = Path(root) / split
super(Sintel, self).__init__(root, n_frames, input_transform=transform,
target_transform=target_transform,
co_transform=co_transform, ap_transform=ap_transform)
def collect_samples(self):
img_dir = self.root / Path(self.dataset_type)
flow_dir = self.root / 'flow'
assert img_dir.isdir() and flow_dir.isdir()
samples = []
for flow_map in sorted((self.root / flow_dir).glob('*/*.flo')):
info = flow_map.splitall()
scene, filename = info[-2:]
fid = int(filename[-8:-4])
if self.split == 'training' and self.subsplit != 'trainval':
if self.subsplit == 'train' and scene not in self.training_scene:
continue
if self.subsplit == 'val' and scene in self.training_scene:
continue
s = {'imgs': [img_dir / scene / 'frame_{:04d}.png'.format(fid + i) for i in
range(self.n_frames)]}
try:
assert all([p.isfile() for p in s['imgs']])
if self.with_flow:
if self.n_frames == 3:
# for img1 img2 img3, only flow_23 will be evaluated
s['flow'] = flow_dir / scene / 'frame_{:04d}.flo'.format(fid + 1)
elif self.n_frames == 2:
# for img1 img2, flow_12 will be evaluated
s['flow'] = flow_dir / scene / 'frame_{:04d}.flo'.format(fid)
else:
raise NotImplementedError(
'n_frames {} with flow or mask'.format(self.n_frames))
if self.with_flow:
assert s['flow'].isfile()
except AssertionError:
print('Incomplete sample for: {}'.format(s['imgs'][0]))
continue
samples.append(s)
return samples
class KITTIRawFile(ImgSeqDataset):
def __init__(self, root, sp_file, n_frames=2, ap_transform=None,
transform=None, target_transform=None, co_transform=None):
self.sp_file = sp_file
super(KITTIRawFile, self).__init__(root, n_frames,
input_transform=transform,
target_transform=target_transform,
co_transform=co_transform,
ap_transform=ap_transform)
def collect_samples(self):
samples = []
with open(self.sp_file, 'r') as f:
for line in f.readlines():
sp = line.split()
s = {'imgs': [sp[i] for i in range(self.n_frames)]}
samples.append(s)
return samples
class KITTIFlowMV(ImgSeqDataset):
"""
This dataset is used for unsupervised training only
"""
def __init__(self, root, n_frames=2,
transform=None, co_transform=None, ap_transform=None, ):
super(KITTIFlowMV, self).__init__(root, n_frames,
input_transform=transform,
co_transform=co_transform,
ap_transform=ap_transform)
def collect_samples(self):
flow_occ_dir = 'flow_' + 'occ'
assert (self.root / flow_occ_dir).isdir()
img_l_dir, img_r_dir = 'image_2', 'image_3'
assert (self.root / img_l_dir).isdir() and (self.root / img_r_dir).isdir()
samples = []
for flow_map in sorted((self.root / flow_occ_dir).glob('*.png')):
flow_map = flow_map.basename()
root_filename = flow_map[:-7]
for img_dir in [img_l_dir, img_r_dir]:
img_list = (self.root / img_dir).files('*{}*.png'.format(root_filename))
img_list.sort()
for st in range(0, len(img_list) - self.n_frames + 1):
seq = img_list[st:st + self.n_frames]
sample = {}
sample['imgs'] = []
for i, file in enumerate(seq):
frame_id = int(file[-6:-4])
if 12 >= frame_id >= 9:
break
sample['imgs'].append(self.root.relpathto(file))
if len(sample['imgs']) == self.n_frames:
samples.append(sample)
return samples
class KITTIFlow(ImgSeqDataset):
"""
This dataset is used for validation only, so all files about target are stored as
file filepath and there is no transform about target.
"""
def __init__(self, root, n_frames=2, transform=None):
super(KITTIFlow, self).__init__(root, n_frames, input_transform=transform)
def __getitem__(self, idx):
s = self.samples[idx]
# img 1 2 for 2 frames, img 0 1 2 for 3 frames.
st = 1 if self.n_frames == 2 else 0
ed = st + self.n_frames
imgs = [s['img{}'.format(i)] for i in range(st, ed)]
inputs = [imageio.imread(self.root / p).astype(np.float32) for p in imgs]
raw_size = inputs[0].shape[:2]
data = {
'flow_occ': self.root / s['flow_occ'],
'flow_noc': self.root / s['flow_noc'],
}
data.update({ # for test set
'im_shape': raw_size,
'img1_path': self.root / s['img1'],
})
if self.input_transform is not None:
inputs = [self.input_transform(i) for i in inputs]
data.update({'img{}'.format(i + 1): inputs[i] for i in range(self.n_frames)})
return data
def collect_samples(self):
'''Will search in training folder for folders 'flow_noc' or 'flow_occ'
and 'colored_0' (KITTI 2012) or 'image_2' (KITTI 2015) '''
flow_occ_dir = 'flow_' + 'occ'
flow_noc_dir = 'flow_' + 'noc'
assert (self.root / flow_occ_dir).isdir()
img_dir = 'image_2'
assert (self.root / img_dir).isdir()
samples = []
for flow_map in sorted((self.root / flow_occ_dir).glob('*.png')):
flow_map = flow_map.basename()
root_filename = flow_map[:-7]
flow_occ_map = flow_occ_dir + '/' + flow_map
flow_noc_map = flow_noc_dir + '/' + flow_map
s = {'flow_occ': flow_occ_map, 'flow_noc': flow_noc_map}
img1 = img_dir + '/' + root_filename + '_10.png'
img2 = img_dir + '/' + root_filename + '_11.png'
assert (self.root / img1).isfile() and (self.root / img2).isfile()
s.update({'img1': img1, 'img2': img2})
if self.n_frames == 3:
img0 = img_dir + '/' + root_filename + '_09.png'
assert (self.root / img0).isfile()
s.update({'img0': img0})
samples.append(s)
return samples
| 10,692
| 38.3125
| 90
|
py
|
ARFlow
|
ARFlow-master/utils/misc_utils.py
|
import collections
def update_dict(orig_dict, new_dict):
for key, val in new_dict.items():
if isinstance(val, collections.Mapping):
tmp = update_dict(orig_dict.get(key, {}), val)
orig_dict[key] = tmp
else:
orig_dict[key] = val
return orig_dict
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, i=1, precision=3, names=None):
self.meters = i
self.precision = precision
self.reset(self.meters)
self.names = names
if names is not None:
assert self.meters == len(self.names)
else:
self.names = [''] * self.meters
def reset(self, i):
self.val = [0] * i
self.avg = [0] * i
self.sum = [0] * i
self.count = [0] * i
def update(self, val, n=1):
if not isinstance(val, list):
val = [val]
if not isinstance(n, list):
n = [n] * self.meters
assert (len(val) == self.meters and len(n) == self.meters)
for i in range(self.meters):
self.count[i] += n[i]
for i, v in enumerate(val):
self.val[i] = v
self.sum[i] += v * n[i]
self.avg[i] = self.sum[i] / self.count[i]
def __repr__(self):
val = ' '.join(['{} {:.{}f}'.format(n, v, self.precision) for n, v in
zip(self.names, self.val)])
avg = ' '.join(['{} {:.{}f}'.format(n, a, self.precision) for n, a in
zip(self.names, self.avg)])
return '{} ({})'.format(val, avg)
| 1,626
| 30.288462
| 77
|
py
|
ARFlow
|
ARFlow-master/utils/warp_utils.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import inspect
def mesh_grid(B, H, W):
# mesh grid
x_base = torch.arange(0, W).repeat(B, H, 1) # BHW
y_base = torch.arange(0, H).repeat(B, W, 1).transpose(1, 2) # BHW
base_grid = torch.stack([x_base, y_base], 1) # B2HW
return base_grid
def norm_grid(v_grid):
_, _, H, W = v_grid.size()
# scale grid to [-1,1]
v_grid_norm = torch.zeros_like(v_grid)
v_grid_norm[:, 0, :, :] = 2.0 * v_grid[:, 0, :, :] / (W - 1) - 1.0
v_grid_norm[:, 1, :, :] = 2.0 * v_grid[:, 1, :, :] / (H - 1) - 1.0
return v_grid_norm.permute(0, 2, 3, 1) # BHW2
def get_corresponding_map(data):
"""
:param data: unnormalized coordinates Bx2xHxW
:return: Bx1xHxW
"""
B, _, H, W = data.size()
# x = data[:, 0, :, :].view(B, -1).clamp(0, W - 1) # BxN (N=H*W)
# y = data[:, 1, :, :].view(B, -1).clamp(0, H - 1)
x = data[:, 0, :, :].view(B, -1) # BxN (N=H*W)
y = data[:, 1, :, :].view(B, -1)
# invalid = (x < 0) | (x > W - 1) | (y < 0) | (y > H - 1) # BxN
# invalid = invalid.repeat([1, 4])
x1 = torch.floor(x)
x_floor = x1.clamp(0, W - 1)
y1 = torch.floor(y)
y_floor = y1.clamp(0, H - 1)
x0 = x1 + 1
x_ceil = x0.clamp(0, W - 1)
y0 = y1 + 1
y_ceil = y0.clamp(0, H - 1)
x_ceil_out = x0 != x_ceil
y_ceil_out = y0 != y_ceil
x_floor_out = x1 != x_floor
y_floor_out = y1 != y_floor
invalid = torch.cat([x_ceil_out | y_ceil_out,
x_ceil_out | y_floor_out,
x_floor_out | y_ceil_out,
x_floor_out | y_floor_out], dim=1)
# encode coordinates, since the scatter function can only index along one axis
corresponding_map = torch.zeros(B, H * W).type_as(data)
indices = torch.cat([x_ceil + y_ceil * W,
x_ceil + y_floor * W,
x_floor + y_ceil * W,
x_floor + y_floor * W], 1).long() # BxN (N=4*H*W)
values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))],
1)
# values = torch.ones_like(values)
values[invalid] = 0
corresponding_map.scatter_add_(1, indices, values)
# decode coordinates
corresponding_map = corresponding_map.view(B, H, W)
return corresponding_map.unsqueeze(1)
def flow_warp(x, flow12, pad='border', mode='bilinear'):
B, _, H, W = x.size()
base_grid = mesh_grid(B, H, W).type_as(x) # B2HW
v_grid = norm_grid(base_grid + flow12) # BHW2
if 'align_corners' in inspect.getfullargspec(torch.nn.functional.grid_sample).args:
im1_recons = nn.functional.grid_sample(x, v_grid, mode=mode, padding_mode=pad, align_corners=True)
else:
im1_recons = nn.functional.grid_sample(x, v_grid, mode=mode, padding_mode=pad)
return im1_recons
def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5):
flow21_warped = flow_warp(flow21, flow12, pad='zeros')
flow12_diff = flow12 + flow21_warped
mag = (flow12 * flow12).sum(1, keepdim=True) + \
(flow21_warped * flow21_warped).sum(1, keepdim=True)
occ_thresh = scale * mag + bias
occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh
return occ.float()
def get_occu_mask_backward(flow21, th=0.2):
B, _, H, W = flow21.size()
base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW
corr_map = get_corresponding_map(base_grid + flow21) # BHW
occu_mask = corr_map.clamp(min=0., max=1.) < th
return occu_mask.float()
| 3,850
| 33.079646
| 106
|
py
|
ARFlow
|
ARFlow-master/utils/flow_utils.py
|
import torch
import cv2
import numpy as np
from matplotlib.colors import hsv_to_rgb
def load_flow(path):
if path.endswith('.png'):
# for KITTI which uses 16bit PNG images
# see 'https://github.com/ClementPinard/FlowNetPytorch/blob/master/datasets/KITTI.py'
# The -1 is here to specify not to change the image depth (16bit), and is compatible
# with both OpenCV2 and OpenCV3
flo_file = cv2.imread(path, -1)
flo_img = flo_file[:, :, 2:0:-1].astype(np.float32)
invalid = (flo_file[:, :, 0] == 0) # mask
flo_img = flo_img - 32768
flo_img = flo_img / 64
flo_img[np.abs(flo_img) < 1e-10] = 1e-10
flo_img[invalid, :] = 0
return flo_img, np.expand_dims(flo_file[:, :, 0], 2)
else:
with open(path, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
assert (202021.25 == magic), 'Magic number incorrect. Invalid .flo file'
h = np.fromfile(f, np.int32, count=1)[0]
w = np.fromfile(f, np.int32, count=1)[0]
data = np.fromfile(f, np.float32, count=2 * w * h)
# Reshape data into 3D array (columns, rows, bands)
data2D = np.resize(data, (w, h, 2))
return data2D
def flow_to_image(flow, max_flow=256):
if max_flow is not None:
max_flow = max(max_flow, 1.)
else:
max_flow = np.max(flow)
n = 8
u, v = flow[:, :, 0], flow[:, :, 1]
mag = np.sqrt(np.square(u) + np.square(v))
angle = np.arctan2(v, u)
im_h = np.mod(angle / (2 * np.pi) + 1, 1)
im_s = np.clip(mag * n / max_flow, a_min=0, a_max=1)
im_v = np.clip(n - im_s, a_min=0, a_max=1)
im = hsv_to_rgb(np.stack([im_h, im_s, im_v], 2))
return (im * 255).astype(np.uint8)
def resize_flow(flow, new_shape):
_, _, h, w = flow.shape
new_h, new_w = new_shape
flow = torch.nn.functional.interpolate(flow, (new_h, new_w),
mode='bilinear', align_corners=True)
scale_h, scale_w = h / float(new_h), w / float(new_w)
flow[:, 0] /= scale_w
flow[:, 1] /= scale_h
return flow
def evaluate_flow(gt_flows, pred_flows, moving_masks=None):
# credit "undepthflow/eval/evaluate_flow.py"
def calculate_error_rate(epe_map, gt_flow, mask):
bad_pixels = np.logical_and(
epe_map * mask > 3,
epe_map * mask / np.maximum(
np.sqrt(np.sum(np.square(gt_flow), axis=2)), 1e-10) > 0.05)
return bad_pixels.sum() / mask.sum() * 100.
error, error_noc, error_occ, error_move, error_static, error_rate = \
0.0, 0.0, 0.0, 0.0, 0.0, 0.0
error_move_rate, error_static_rate = 0.0, 0.0
B = len(gt_flows)
for gt_flow, pred_flow, i in zip(gt_flows, pred_flows, range(B)):
H, W = gt_flow.shape[:2]
h, w = pred_flow.shape[:2]
pred_flow = np.copy(pred_flow)
pred_flow[:, :, 0] = pred_flow[:, :, 0] / w * W
pred_flow[:, :, 1] = pred_flow[:, :, 1] / h * H
flo_pred = cv2.resize(pred_flow, (W, H), interpolation=cv2.INTER_LINEAR)
epe_map = np.sqrt(
np.sum(np.square(flo_pred[:, :, :2] - gt_flow[:, :, :2]),
axis=2))
if gt_flow.shape[-1] == 2:
error += np.mean(epe_map)
elif gt_flow.shape[-1] == 4:
error += np.sum(epe_map * gt_flow[:, :, 2]) / np.sum(gt_flow[:, :, 2])
noc_mask = gt_flow[:, :, -1]
error_noc += np.sum(epe_map * noc_mask) / np.sum(noc_mask)
error_occ += np.sum(epe_map * (gt_flow[:, :, 2] - noc_mask)) / max(
np.sum(gt_flow[:, :, 2] - noc_mask), 1.0)
error_rate += calculate_error_rate(epe_map, gt_flow[:, :, 0:2],
gt_flow[:, :, 2])
if moving_masks is not None:
move_mask = moving_masks[i]
error_move_rate += calculate_error_rate(
epe_map, gt_flow[:, :, 0:2], gt_flow[:, :, 2] * move_mask)
error_static_rate += calculate_error_rate(
epe_map, gt_flow[:, :, 0:2],
gt_flow[:, :, 2] * (1.0 - move_mask))
error_move += np.sum(epe_map * gt_flow[:, :, 2] *
move_mask) / np.sum(gt_flow[:, :, 2] *
move_mask)
error_static += np.sum(epe_map * gt_flow[:, :, 2] * (
1.0 - move_mask)) / np.sum(gt_flow[:, :, 2] *
(1.0 - move_mask))
if gt_flows[0].shape[-1] == 4:
res = [error / B, error_noc / B, error_occ / B, error_rate / B]
if moving_masks is not None:
res += [error_move / B, error_static / B]
return res
else:
return [error / B]
| 4,870
| 38.601626
| 93
|
py
|
ARFlow
|
ARFlow-master/utils/torch_utils.py
|
import torch
import shutil
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import numbers
import random
import math
from torch.optim import Optimizer
def init_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def weight_parameters(module):
return [param for name, param in module.named_parameters() if 'weight' in name]
def bias_parameters(module):
return [param for name, param in module.named_parameters() if 'bias' in name]
def load_checkpoint(model_path):
weights = torch.load(model_path)
epoch = None
if 'epoch' in weights:
epoch = weights.pop('epoch')
if 'state_dict' in weights:
state_dict = (weights['state_dict'])
else:
state_dict = weights
return epoch, state_dict
def save_checkpoint(save_path, states, file_prefixes, is_best, filename='ckpt.pth.tar'):
def run_one_sample(save_path, state, prefix, is_best, filename):
torch.save(state, save_path / '{}_{}'.format(prefix, filename))
if is_best:
shutil.copyfile(save_path / '{}_{}'.format(prefix, filename),
save_path / '{}_model_best.pth.tar'.format(prefix))
if not isinstance(file_prefixes, str):
for (prefix, state) in zip(file_prefixes, states):
run_one_sample(save_path, state, prefix, is_best, filename)
else:
run_one_sample(save_path, states, file_prefixes, is_best, filename)
def restore_model(model, pretrained_file):
epoch, weights = load_checkpoint(pretrained_file)
model_keys = set(model.state_dict().keys())
weight_keys = set(weights.keys())
# load weights by name
weights_not_in_model = sorted(list(weight_keys - model_keys))
model_not_in_weights = sorted(list(model_keys - weight_keys))
if len(model_not_in_weights):
print('Warning: There are weights in model but not in pre-trained.')
for key in (model_not_in_weights):
print(key)
weights[key] = model.state_dict()[key]
if len(weights_not_in_model):
print('Warning: There are pre-trained weights not in model.')
for key in (weights_not_in_model):
print(key)
from collections import OrderedDict
new_weights = OrderedDict()
for key in model_keys:
new_weights[key] = weights[key]
weights = new_weights
model.load_state_dict(weights)
return model
class AdamW(Optimizer):
"""Implements AdamW algorithm.
It has been proposed in `Fixing Weight Decay Regularization in Adam`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdamW does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# according to the paper, this penalty should come after the bias correction
# if group['weight_decay'] != 0:
# grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'], p.data)
return loss
| 5,676
| 34.04321
| 102
|
py
|
ARFlow
|
ARFlow-master/transforms/co_transforms.py
|
import numbers
import random
import numpy as np
# from scipy.misc import imresize
from skimage.transform import resize as imresize
import scipy.ndimage as ndimage
def get_co_transforms(aug_args):
transforms = []
if aug_args.crop:
transforms.append(RandomCrop(aug_args.para_crop))
if aug_args.hflip:
transforms.append(RandomHorizontalFlip())
if aug_args.swap:
transforms.append(RandomSwap())
return Compose(transforms)
class Compose(object):
def __init__(self, co_transforms):
self.co_transforms = co_transforms
def __call__(self, input, target):
for t in self.co_transforms:
input, target = t(input, target)
return input, target
class RandomCrop(object):
"""Crops the given PIL.Image at a random location to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, inputs, target):
h, w, _ = inputs[0].shape
th, tw = self.size
if w == tw and h == th:
return inputs, target
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
inputs = [img[y1: y1 + th, x1: x1 + tw] for img in inputs]
if 'mask' in target:
target['mask'] = target['mask'][y1: y1 + th, x1: x1 + tw]
if 'flow' in target:
target['flow'] = target['flow'][y1: y1 + th, x1: x1 + tw]
return inputs, target
class RandomSwap(object):
def __call__(self, inputs, target):
n = len(inputs)
if random.random() < 0.5:
inputs = inputs[::-1]
if 'mask' in target:
target['mask'] = target['mask'][::-1]
if 'flow' in target:
raise NotImplementedError("swap cannot apply to flow")
return inputs, target
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __call__(self, inputs, target):
if random.random() < 0.5:
inputs = [np.copy(np.fliplr(im)) for im in inputs]
if 'mask' in target:
target['mask'] = [np.copy(np.fliplr(mask)) for mask in target['mask']]
if 'flow' in target:
for i, flo in enumerate(target['flow']):
flo = np.copy(np.fliplr(flo))
flo[:, :, 0] *= -1
target['flow'][i] = flo
return inputs, target
| 2,709
| 32.04878
| 86
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.