code stringlengths 17 6.64M |
|---|
def test_agent_supertypes_in_env_1():
agents = [MockStrategicAgent('a1'), MockStrategicAgent('a2')]
network = ph.Network(agents)
s1 = MockSampler(0)
s2 = MockSampler(10)
agent_supertypes = {'a1': MockStrategicAgent.Supertype(type_value=s1), 'a2': MockStrategicAgent.Supertype(type_value=s2)}
env = ph.PhantomEnv(1, network, agent_supertypes=agent_supertypes)
assert (set(env._samplers) == set([s1, s2]))
assert (env.agents['a1'].supertype == agent_supertypes['a1'])
assert (env.agents['a1'].type == MockStrategicAgent.Supertype(1))
assert (env.agents['a2'].supertype == agent_supertypes['a2'])
assert (env.agents['a2'].type == MockStrategicAgent.Supertype(11))
assert (env.agents['a1'].supertype.type_value == s1)
assert (env.agents['a2'].supertype.type_value == s2)
env.reset()
assert (env.agents['a1'].type == MockStrategicAgent.Supertype(2))
assert (env.agents['a2'].type == MockStrategicAgent.Supertype(12))
|
def test_agent_supertypes_in_env_2():
agents = [MockStrategicAgent('a1'), MockStrategicAgent('a2')]
network = ph.Network(agents)
s1 = MockSampler(0)
s2 = MockSampler(10)
agent_supertypes = {'a1': {'type_value': s1}, 'a2': {'type_value': s2}}
env = ph.PhantomEnv(1, network, agent_supertypes=agent_supertypes)
assert (set(env._samplers) == set([s1, s2]))
assert (env.agents['a1'].type == MockStrategicAgent.Supertype(1))
assert (env.agents['a1'].supertype == MockStrategicAgent.Supertype(type_value=s1))
assert (env.agents['a2'].type == MockStrategicAgent.Supertype(11))
assert (env.agents['a2'].supertype == MockStrategicAgent.Supertype(type_value=s2))
assert (env.agents['a1'].supertype.type_value == s1)
assert (env.agents['a2'].supertype.type_value == s2)
env.reset()
assert (env.agents['a1'].type == MockStrategicAgent.Supertype(2))
assert (env.agents['a2'].type == MockStrategicAgent.Supertype(12))
|
def test_agent_supertypes_in_env_bad():
agents = [MockStrategicAgent('a1'), MockStrategicAgent('a2')]
network = ph.Network(agents)
agent_supertypes = {'a1': {'wrong': 1.0}, 'a2': {}}
with pytest.raises(Exception):
ph.PhantomEnv(1, network, agent_supertypes=agent_supertypes)
|
def test_env_supertype_in_env_1():
s1 = MockSampler(0)
env_supertype = MockEnv.Supertype(type_value=s1)
env = MockEnv(env_supertype=env_supertype)
assert (set(env._samplers) == set([s1]))
assert (env.env_type is None)
assert (env.env_supertype == MockEnv.Supertype(s1))
env.reset()
assert (env.env_type == MockEnv.Supertype(2))
|
def test_env_supertype_in_env_2():
s1 = MockSampler(0)
env_supertype = MockEnv.Supertype(type_value=s1)
env = MockEnv(env_supertype={'type_value': s1})
assert (set(env._samplers) == set([s1]))
assert (env.env_type is None)
assert (env.env_supertype == env_supertype)
env.reset()
assert (env.env_type == MockEnv.Supertype(2))
|
def test_env_supertype_in_env_bad():
with pytest.raises(Exception):
MockEnv(env_supertype={'xxx': 0.0})
|
def test_env_type_passed_to_agent():
class MockAgent(ph.Agent):
def __init__(self, *args, num_steps=None, **kwargs):
super().__init__(*args, **kwargs)
self.num_steps = num_steps
self.param = 0.0
def generate_messages(self, ctx):
self.param = ctx.env_view.supertype_param
class MockEnv(ph.PhantomEnv):
@dataclass
class Supertype(ph.Supertype):
param: float = 0.0
@dataclass(frozen=True)
class View(ph.EnvView):
supertype_param: float
def view(self, agent_views):
return self.View(self.current_step, (self.current_step / self.num_steps), self.env_type.param)
def __init__(self, **kwargs):
network = ph.StochasticNetwork([MockAgent('a1')])
super().__init__(num_steps=10, network=network, **kwargs)
env = MockEnv(env_supertype=MockEnv.Supertype(MockSampler(0.0)))
env.reset()
env.step({})
assert (env['a1'].param == 2.0)
env.reset()
env.step({})
assert (env['a1'].param == 3.0)
|
def test_telemetry(tmpdir):
ph.telemetry.logger.configure_print_logging(print_actions=True, print_observations=True, print_rewards=True, print_terminations=True, print_truncations=True, print_infos=True, print_messages=True, metrics={'step': ph.metrics.SimpleEnvMetric('current_step')})
env = MockEnv()
env.reset()
for _ in range(5):
env.step({})
assert (ph.telemetry.logger._current_episode is None)
assert (not os.path.isfile(tmpdir.join('log.json')))
ph.telemetry.logger.configure_print_logging(enable=False)
ph.telemetry.logger.configure_file_logging(file_path=tmpdir.join('log.json'), metrics={'step': ph.metrics.SimpleEnvMetric('current_step')})
env = MockEnv()
env.reset()
for _ in range(5):
env.step({})
assert os.path.isfile(tmpdir.join('log.json'))
data = json.load(open(tmpdir.join('log.json'), 'r'))
assert (set(data.keys()) == {'start', 'steps'})
assert (len(data['steps']) == 6)
assert (set(data['steps'][0]) == {'messages', 'metrics', 'observations'})
assert (set(data['steps'][1]) == {'actions', 'terminations', 'truncations', 'infos', 'messages', 'metrics', 'observations', 'rewards'})
ph.telemetry.logger.configure_file_logging(file_path=None)
|
def test_uniform_range():
range_ = ph.utils.ranges.UniformRange(start=0.0, end=10.0, step=1.0)
assert (range_.values() == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])).all()
|
def test_linspace_range():
range_ = ph.utils.ranges.LinspaceRange(start=0.0, end=10.0, n=11)
assert (range_.values() == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])).all()
|
def test_unit_array_uniform_range():
range_ = ph.utils.ranges.UnitArrayUniformRange(start=0.0, end=10.0, step=1.0)
assert (range_.values() == [np.array([i]) for i in range(10)])
|
def test_unit_array_linspace_range():
range_ = ph.utils.ranges.UnitArrayLinspaceRange(start=0.0, end=10.0, n=11)
assert (range_.values() == [np.array([i]) for i in range(11)])
|
def test_rllib_train_rollout(tmpdir):
ph.utils.rllib.train(algorithm='PPO', env_class=MockEnv, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=2, checkpoint_freq=2, results_dir=tmpdir)
results = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', num_repeats=3, num_workers=0)
assert (len(list(results)) == 3)
results = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', env_class=MockEnv, num_repeats=3, num_workers=1)
results = list(results)
assert (len(results) == 3)
assert np.all((results[0].actions_for_agent('a1') == results[1].actions_for_agent('a1') == results[2].actions_for_agent('a1')))
ph.utils.rollout.rollouts_to_dataframe(results, avg_over_repeats=False)
with open(f'{tmpdir}/rollouts.json', 'w') as f:
ph.utils.rollout.rollouts_to_jsonl(results, f)
results = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', env_class=MockEnv, custom_policy_mapping={'a1': MockPolicy}, num_repeats=1, num_workers=1)
assert (list(results)[0].actions_for_agent('a1') == [1, 1, 1, 1, 1])
results = ph.utils.rllib.evaluate_policy(directory=f'{tmpdir}/LATEST', obs=[ph.utils.ranges.LinspaceRange(0.0, 1.0, 3, name='r')], policy_id='mock_policy', explore=False)
results = list(results)
assert (results[0][0] == {'r': 0.0})
assert (results[1][0] == {'r': 0.5})
assert (results[2][0] == {'r': 1.0})
assert (results[0][1][0] == 0.0)
assert (results[1][1][0] == 0.5)
assert (results[2][1][0] == 1.0)
results = ph.utils.rllib.evaluate_policy(directory=f'{tmpdir}/LATEST', obs=[ph.utils.ranges.LinspaceRange(0.0, 1.0, 3, name='r')], policy_id='mock_policy', explore=True)
results = list(results)
assert (results[0][0] == {'r': 0.0})
assert (results[1][0] == {'r': 0.5})
assert (results[2][0] == {'r': 1.0})
assert (results[0][1][0] == 0.0)
assert (results[1][1][0] == 0.5)
assert (results[2][1][0] == 1.0)
|
def test_rllib_rollout_vectorized_fsm_env(tmpdir):
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('A')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['A'], next_stages=['StageA'])
def handle(self):
return 'StageA'
ph.utils.rllib.train(algorithm='PPO', env_class=Env, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=2, checkpoint_freq=2, results_dir=tmpdir)
results1 = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', num_repeats=3, num_workers=1, policy_inference_batch_size=1)
results2 = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', num_repeats=3, num_workers=1, policy_inference_batch_size=3)
assert (list(results1) == list(results2))
class Env2(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('A')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['A'], next_stages=['StageA', 'StageB'])
def handleA(self):
return 'StageB'
@ph.FSMStage(stage_id='StageB', acting_agents=['A'], next_stages=['StageA', 'StageB'])
def handleB(self):
return 'StageA'
ph.utils.rllib.train(algorithm='PPO', env_class=Env2, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=2, checkpoint_freq=1, results_dir=tmpdir)
with pytest.raises(ValueError):
list(ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', num_repeats=3, num_workers=1, policy_inference_batch_size=3))
|
def test_rllib_rollout_bad():
with pytest.raises(AssertionError):
list(ph.utils.rllib.rollout(directory='', env_class=MockEnv, num_repeats=0))
with pytest.raises(AssertionError):
list(ph.utils.rllib.rollout(directory='', env_class=MockEnv, num_workers=(- 1)))
|
def test_rllib_train_no_checkpoint(tmpdir):
algo = ph.utils.rllib.train(algorithm='PPO', env_class=MockEnv, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=1, checkpoint_freq=0, results_dir=tmpdir)
assert (not Path(algo.logdir, f'checkpoint_{str(1).zfill(6)}').exists())
|
def test_rllib_train_not_set_checkpoint_freq(tmpdir):
algo = ph.utils.rllib.train(algorithm='PPO', env_class=MockEnv, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=2, checkpoint_freq=None, results_dir=tmpdir)
assert Path(algo.logdir, f'checkpoint_{str(2).zfill(6)}').exists()
|
def test_rollout_class():
rollout = ph.utils.rollout.Rollout(rollout_id=0, repeat_id=0, env_config={}, rollout_params={}, steps=[ph.utils.rollout.Step(i=0, observations={'agent': {'obs': 1}}, rewards={'agent': 1.0}, terminations={'agent': False}, truncations={'agent': False}, infos={'agent': {'info': 1}}, actions={'agent': {'action': 1}}, messages=None, stage=None), ph.utils.rollout.Step(i=0, observations={}, rewards={'agent': None}, terminations={}, truncations={}, infos={}, actions={}, messages=None, stage=None), ph.utils.rollout.Step(i=0, observations={}, rewards={}, terminations={}, truncations={}, infos={}, actions={}, messages=None, stage=None)], metrics={})
obs = rollout.observations_for_agent('agent', drop_nones=False)
assert (obs == [{'obs': 1}, None, None])
obs = rollout.observations_for_agent('agent', drop_nones=True)
assert (obs == [{'obs': 1}])
rewards = rollout.rewards_for_agent('agent', drop_nones=False)
assert (rewards == [1.0, None, None])
rewards = rollout.rewards_for_agent('agent', drop_nones=True)
assert (rewards == [1.0])
terminations = rollout.rewards_for_agent('agent', drop_nones=False)
assert (terminations == [1.0, None, None])
terminations = rollout.rewards_for_agent('agent', drop_nones=True)
assert (terminations == [1.0])
truncations = rollout.rewards_for_agent('agent', drop_nones=False)
assert (truncations == [1.0, None, None])
truncations = rollout.rewards_for_agent('agent', drop_nones=True)
assert (truncations == [1.0])
infos = rollout.rewards_for_agent('agent', drop_nones=False)
assert (infos == [1.0, None, None])
infos = rollout.rewards_for_agent('agent', drop_nones=True)
assert (infos == [1.0])
actions = rollout.rewards_for_agent('agent', drop_nones=False)
assert (actions == [1.0, None, None])
actions = rollout.rewards_for_agent('agent', drop_nones=True)
assert (actions == [1.0])
|
@pytest.fixture
def float_sampler():
return UniformFloatSampler()
|
@pytest.fixture
def int_sampler():
return UniformIntSampler()
|
def test_comparison_with_float(float_sampler):
float_sampler._value = float_sampler.sample()
assert (float_sampler <= 1.0)
assert (float_sampler >= 0.0)
assert (float_sampler == float_sampler._value)
assert (float_sampler != (float_sampler._value + 0.1))
|
def test_comparison_with_int(int_sampler):
int_sampler._value = int_sampler.sample()
assert ((int_sampler == 0) or (int_sampler == 1))
assert (int_sampler == int_sampler._value)
assert (int_sampler != (int_sampler._value + 1))
|
def test_comparison_with_sampler(float_sampler):
float_sampler._value = 0.5
float_sampler2 = UniformFloatSampler()
float_sampler2._value = 0.5
assert (not (float_sampler == float_sampler2))
assert (float_sampler != float_sampler2)
|
def test_iterable():
sampler1 = UniformFloatSampler()
sampler1._value = 0.5
sampler2 = UniformFloatSampler()
sampler2._value = 0.5
sampler3 = UniformFloatSampler()
sampler3._value = 0.5
assert (sampler3 not in [sampler1, sampler2])
|
def test_lambda_sampler():
def _my_func(a_, b_=0):
return (a_ + b_)
a = 5
b = 1
sampler = LambdaSampler(_my_func, a, b_=b)
assert (sampler.sample() == 6)
assert (sampler.sample() == 6)
sampler = LambdaSampler(_my_func, a)
assert (sampler.sample() == 5)
assert (sampler.sample() == 5)
|
def test_asserts():
with pytest.raises(AssertionError):
UniformFloatSampler(high=0.0, low=1.0)
with pytest.raises(AssertionError):
UniformIntSampler(high=0, low=1)
with pytest.raises(AssertionError):
UniformArraySampler(high=0.0, low=1.0)
|
def Deconv(inputs, f_dim_in, dim, net, batch_size, f_dim_out=None, stride=2):
if (f_dim_out is None):
f_dim_out = int((f_dim_in / 2))
return tl.layers.DeConv3dLayer(inputs, shape=[4, 4, 4, f_dim_out, f_dim_in], output_shape=[batch_size, dim, dim, dim, f_dim_out], strides=[1, stride, stride, stride, 1], W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name=(('g/net_' + net) + '/deconv'))
|
def Conv3D(inputs, f_dim_out, net, f_dim_in=None, batch_norm=False, is_train=True):
if (f_dim_in is None):
f_dim_in = int((f_dim_out / 2))
layer = tl.layers.Conv3dLayer(inputs, shape=[4, 4, 4, f_dim_in, f_dim_out], W_init=tf.random_normal_initializer(stddev=0.02), strides=[1, 2, 2, 2, 1], name=(('d/net_' + net) + '/conv'))
if batch_norm:
return tl.layers.BatchNormLayer(layer, is_train=is_train, name=(('d/net_' + net) + '/batch_norm'))
else:
return layer
|
def generator_64(inputs, is_train=True, reuse=False, batch_size=128, sig=False):
(output_size, half, forth, eighth, sixteenth) = (64, 32, 16, 8, 4)
gf_dim = 512
with tf.variable_scope('gen', reuse=reuse) as vs:
net_0 = tl.layers.InputLayer(inputs, name='g/net_0/in')
net_1 = tl.layers.DenseLayer(net_0, n_units=(((gf_dim * sixteenth) * sixteenth) * sixteenth), W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name='g/net_1/dense')
net_1 = tl.layers.ReshapeLayer(net_1, shape=[(- 1), sixteenth, sixteenth, sixteenth, gf_dim], name='g/net_1/reshape')
net_1 = tl.layers.BatchNormLayer(net_1, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_1/batch_norm')
net_1.outputs = tf.nn.relu(net_1.outputs, name='g/net_1/relu')
net_2 = Deconv(net_1, gf_dim, eighth, '2', batch_size)
net_2 = tl.layers.BatchNormLayer(net_2, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_2/batch_norm')
net_2.outputs = tf.nn.relu(net_2.outputs, name='g/net_2/relu')
net_3 = Deconv(net_2, int((gf_dim / 2)), forth, '3', batch_size)
net_3 = tl.layers.BatchNormLayer(net_3, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_3/batch_norm')
net_3.outputs = tf.nn.relu(net_3.outputs, name='g/net_3/relu')
net_4 = Deconv(net_3, int((gf_dim / 4)), half, '4', batch_size)
net_4 = tl.layers.BatchNormLayer(net_4, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_4/batch_norm')
net_4.outputs = tf.nn.relu(net_4.outputs, name='g/net_4/relu')
net_5 = Deconv(net_4, int((gf_dim / 8)), output_size, '5', batch_size, f_dim_out=1)
net_5.outputs = tf.reshape(net_5.outputs, [batch_size, output_size, output_size, output_size])
if sig:
net_5.outputs = tf.nn.sigmoid(net_5.outputs)
else:
net_5.outputs = tf.nn.tanh(net_5.outputs)
return (net_5, net_5.outputs)
|
def discriminator(inputs, output_size, sig=False, is_train=True, reuse=False, batch_size=128, output_units=1):
inputs = tf.reshape(inputs, [batch_size, output_size, output_size, output_size, 1])
df_dim = output_size
with tf.variable_scope('dis', reuse=reuse) as vs:
net_0 = tl.layers.InputLayer(inputs, name='d/net_0/in')
net_1 = Conv3D(net_0, df_dim, '1', f_dim_in=1, batch_norm=False)
net_1.outputs = tf.nn.leaky_relu(net_1.outputs, alpha=0.2, name='d/net_1/lrelu')
net_2 = Conv3D(net_1, int((df_dim * 2)), '2', batch_norm=True, is_train=is_train)
net_2.outputs = tf.nn.leaky_relu(net_2.outputs, alpha=0.2, name='d/net_2/lrelu')
net_3 = Conv3D(net_2, int((df_dim * 4)), '3', batch_norm=True, is_train=is_train)
net_3.outputs = tf.nn.leaky_relu(net_3.outputs, alpha=0.2, name='d/net_3/lrelu')
net_4 = Conv3D(net_3, int((df_dim * 8)), '4', batch_norm=True, is_train=is_train)
net_4.outputs = tf.nn.leaky_relu(net_4.outputs, alpha=0.2, name='d/net_4/lrelu')
net_5 = FlattenLayer(net_4, name='d/net_5/flatten')
net_5 = tl.layers.DenseLayer(net_5, n_units=output_units, act=tf.identity, W_init=tf.random_normal_initializer(stddev=0.02), name='d/net_5/dense')
if sig:
return (net_5, tf.nn.sigmoid(net_5.outputs))
else:
return (net_5, net_5.outputs)
|
def make_inputs_raw(file_batch):
dt = np.dtype((np.uint8, (64, 64, 64)))
models = [np.fromfile(f, dtype=dt).reshape((64, 64, 64)) for f in file_batch]
models = np.array(models)
start_time = time.time()
return (models, start_time)
|
def load_networks(checkpoint_dir, sess, net_g, net_d, epoch=''):
print('[*] Loading checkpoints...')
if (len(epoch) >= 1):
epoch = ('_' + epoch)
net_g_name = os.path.join(checkpoint_dir, (('net_g' + epoch) + '.npz'))
net_d_name = os.path.join(checkpoint_dir, (('net_d' + epoch) + '.npz'))
if (not (os.path.exists(net_g_name) and os.path.exists(net_d_name))):
print('[!] Loading checkpoints failed!')
else:
net_g_loaded_params = tl.files.load_npz(name=net_g_name)
net_d_loaded_params = tl.files.load_npz(name=net_d_name)
tl.files.assign_params(sess, net_g_loaded_params, net_g)
tl.files.assign_params(sess, net_d_loaded_params, net_d)
print('[*] Loading Generator and Discriminator checkpoints SUCCESS!')
|
def save_networks(checkpoint_dir, sess, net_g, net_d, epoch):
print('[*] Saving checkpoints...')
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
net_g_name = os.path.join(checkpoint_dir, 'net_g.npz')
net_d_name = os.path.join(checkpoint_dir, 'net_d.npz')
net_g_iter_name = os.path.join(checkpoint_dir, ('net_g_%d.npz' % epoch))
net_d_iter_name = os.path.join(checkpoint_dir, ('net_d_%d.npz' % epoch))
tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess)
tl.files.save_npz(net_d.all_params, name=net_d_name, sess=sess)
tl.files.save_npz(net_g.all_params, name=net_g_iter_name, sess=sess)
tl.files.save_npz(net_d.all_params, name=net_d_iter_name, sess=sess)
print('[*] Saving checkpoints SUCCESS!')
|
def save_voxels(save_dir, models, epock):
print('Saving the model')
np.save((save_dir + str(epock)), models[0])
|
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError('window_size and order have to be of type int')
if (((window_size % 2) != 1) or (window_size < 1)):
raise TypeError('window_size size must be a positive odd number')
if (window_size < (order + 2)):
raise TypeError('window_size is too small for the polynomials order')
order_range = range((order + 1))
half_window = ((window_size - 1) // 2)
b = np.mat([[(k ** i) for i in order_range] for k in range((- half_window), (half_window + 1))])
m = ((np.linalg.pinv(b).A[deriv] * (rate ** deriv)) * factorial(deriv))
firstvals = (y[0] - np.abs((y[1:(half_window + 1)][::(- 1)] - y[0])))
lastvals = (y[(- 1)] + np.abs((y[((- half_window) - 1):(- 1)][::(- 1)] - y[(- 1)])))
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::(- 1)], y, mode='valid')
|
def render_graphs(save_dir, epoch, track_g_loss, track_d_loss, epoch_arr):
if (not os.path.exists((save_dir + '/plots/'))):
os.makedirs((save_dir + '/plots/'))
if (len(track_d_loss) > 51):
plt.plot(epoch_arr, track_d_loss, color='blue', alpha=0.5)
plt.plot(epoch_arr, track_g_loss, color='red', alpha=0.5)
plt.legend(("Discriminator's loss", "Generator's loss"), loc='upper right')
plt.title('64-3D-GAN')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig((((save_dir + '/plots/') + str(epoch)) + '.png'))
plt.clf()
|
def save_values(save_dir, track_g_loss, track_d_loss, epoch_arr):
np.save((save_dir + '/plots/track_g_loss'), track_g_loss)
np.save((save_dir + '/plots/track_d_loss'), track_d_loss)
np.save((save_dir + '/plots/epochs'), epoch_arr)
|
def load_values(save_dir):
outputs = []
outputs.append(list(np.load((save_dir + '/plots/track_g_loss.npy'))))
outputs.append(list(np.load((save_dir + '/plots/track_d_loss.npy'))))
outputs.append(list(np.load((save_dir + '/plots/epochs.npy'))))
return outputs
|
def cal_acc(zeros, ones):
accuracy = 0.0
for example in zeros:
if (not np.isnan(example[0])):
if (example[0] < 0.5):
accuracy += 1.0
for example in ones:
if (not np.isnan(example[0])):
if (example[0] > 0.5):
accuracy += 1.0
accuracy = (accuracy / float((len(zeros) + len(ones))))
print(('The accuracy of the discriminator is: ' + str(accuracy)))
return accuracy
|
def Deconv(inputs, f_dim_in, dim, net, batch_size, f_dim_out=None, stride=2):
if (f_dim_out is None):
f_dim_out = int((f_dim_in / 2))
return tl.layers.DeConv3dLayer(inputs, shape=[4, 4, 4, f_dim_out, f_dim_in], output_shape=[batch_size, dim, dim, dim, f_dim_out], strides=[1, stride, stride, stride, 1], W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name=(('g/net_' + net) + '/deconv'))
|
def Conv3D(inputs, f_dim_out, net, f_dim_in=None, batch_norm=False, is_train=True):
if (f_dim_in is None):
f_dim_in = (f_dim_out / 2)
layer = tl.layers.Conv3dLayer(inputs, shape=[4, 4, 4, f_dim_in, f_dim_out], W_init=tf.random_normal_initializer(stddev=0.02), strides=[1, 2, 2, 2, 1], name=(('d/net_' + net) + '/conv'))
if batch_norm:
return tl.layers.BatchNormLayer(layer, is_train=is_train, name=(('d/net_' + net) + '/batch_norm'))
else:
return layer
|
def generator_64(inputs, is_train=True, reuse=False, batch_size=128, sig=False):
(output_size, half, forth, eighth, sixteenth) = (64, 32, 16, 8, 4)
gf_dim = 512
with tf.variable_scope('gen', reuse=reuse) as vs:
net_0 = tl.layers.InputLayer(inputs, name='g/net_0/in')
net_1 = tl.layers.DenseLayer(net_0, n_units=(((gf_dim * sixteenth) * sixteenth) * sixteenth), W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name='g/net_1/dense')
net_1 = tl.layers.ReshapeLayer(net_1, shape=[(- 1), sixteenth, sixteenth, sixteenth, gf_dim], name='g/net_1/reshape')
net_1 = tl.layers.BatchNormLayer(net_1, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_1/batch_norm')
net_1.outputs = tf.nn.relu(net_1.outputs, name='g/net_1/relu')
net_2 = Deconv(net_1, gf_dim, eighth, '2', batch_size)
net_2 = tl.layers.BatchNormLayer(net_2, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_2/batch_norm')
net_2.outputs = tf.nn.relu(net_2.outputs, name='g/net_2/relu')
net_3 = Deconv(net_2, (gf_dim / 2), forth, '3', batch_size)
net_3 = tl.layers.BatchNormLayer(net_3, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_3/batch_norm')
net_3.outputs = tf.nn.relu(net_3.outputs, name='g/net_3/relu')
net_4 = Deconv(net_3, (gf_dim / 4), half, '4', batch_size)
net_4 = tl.layers.BatchNormLayer(net_4, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_4/batch_norm')
net_4.outputs = tf.nn.relu(net_4.outputs, name='g/net_4/relu')
net_5 = Deconv(net_4, (gf_dim / 8), output_size, '5', batch_size, f_dim_out=1)
net_5.outputs = tf.reshape(net_5.outputs, [batch_size, output_size, output_size, output_size])
if sig:
net_5.outputs = tf.nn.sigmoid(net_5.outputs)
else:
net_5.outputs = tf.nn.tanh(net_5.outputs)
return (net_5, net_5.outputs)
|
def discriminator(inputs, output_size, improved=False, sig=False, is_train=True, reuse=False, batch_size=128, output_units=1):
inputs = tf.reshape(inputs, [batch_size, output_size, output_size, output_size, 1])
df_dim = output_size
with tf.variable_scope('dis', reuse=reuse) as vs:
net_0 = tl.layers.InputLayer(inputs, name='d/net_0/in')
net_1 = Conv3D(net_0, df_dim, '1', f_dim_in=1, batch_norm=False)
net_1.outputs = tf.nn.leaky_relu(net_1.outputs, alpha=0.2, name='d/net_1/lrelu')
net_2 = Conv3D(net_1, (df_dim * 2), '2', batch_norm=(not improved), is_train=is_train)
net_2.outputs = tf.nn.leaky_relu(net_2.outputs, alpha=0.2, name='d/net_2/lrelu')
net_3 = Conv3D(net_2, (df_dim * 4), '3', batch_norm=(not improved), is_train=is_train)
net_3.outputs = tf.nn.leaky_relu(net_3.outputs, alpha=0.2, name='d/net_3/lrelu')
net_4 = Conv3D(net_3, (df_dim * 8), '4', batch_norm=(not improved), is_train=is_train)
net_4.outputs = tf.nn.leaky_relu(net_4.outputs, alpha=0.2, name='d/net_4/lrelu')
net_5 = FlattenLayer(net_4, name='d/net_5/flatten')
net_5 = tl.layers.DenseLayer(net_5, n_units=output_units, act=tf.identity, W_init=tf.random_normal_initializer(stddev=0.02), name='d/net_5/dense')
if sig:
return (net_5, tf.nn.sigmoid(net_5.outputs))
else:
return (net_5, net_5.outputs)
|
def make_inputs_raw(file_batch):
dt = np.dtype((np.uint8, (64, 64, 64)))
models = [np.fromfile(f, dtype=dt).reshape((64, 64, 64)) for f in file_batch]
start_time = time.time()
return (models, start_time)
|
def load_networks(checkpoint_dir, sess, net_g, net_d, epoch=''):
print('[*] Loading checkpoints...')
if (len(epoch) >= 1):
epoch = ('_' + epoch)
net_g_name = os.path.join(checkpoint_dir, (('net_g' + epoch) + '.npz'))
net_d_name = os.path.join(checkpoint_dir, (('net_d' + epoch) + '.npz'))
if (not (os.path.exists(net_g_name) and os.path.exists(net_d_name))):
print('[!] Loading checkpoints failed!')
else:
net_g_loaded_params = tl.files.load_npz(name=net_g_name)
net_d_loaded_params = tl.files.load_npz(name=net_d_name)
tl.files.assign_params(sess, net_g_loaded_params, net_g)
tl.files.assign_params(sess, net_d_loaded_params, net_d)
print('[*] Loading Generator and Discriminator checkpoints SUCCESS!')
|
def save_networks(checkpoint_dir, sess, net_g, net_d, epoch):
print('[*] Saving checkpoints...')
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
net_g_name = os.path.join(checkpoint_dir, 'net_g.npz')
net_d_name = os.path.join(checkpoint_dir, 'net_d.npz')
net_g_iter_name = os.path.join(checkpoint_dir, ('net_g_%d.npz' % epoch))
net_d_iter_name = os.path.join(checkpoint_dir, ('net_d_%d.npz' % epoch))
tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess)
tl.files.save_npz(net_d.all_params, name=net_d_name, sess=sess)
tl.files.save_npz(net_g.all_params, name=net_g_iter_name, sess=sess)
tl.files.save_npz(net_d.all_params, name=net_d_iter_name, sess=sess)
print('[*] Saving checkpoints SUCCESS!')
|
def save_voxels(save_dir, models, epock):
print('Saving the model')
np.save((save_dir + str(epock)), models[0])
|
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError('window_size and order have to be of type int')
if (((window_size % 2) != 1) or (window_size < 1)):
raise TypeError('window_size size must be a positive odd number')
if (window_size < (order + 2)):
raise TypeError('window_size is too small for the polynomials order')
order_range = range((order + 1))
half_window = ((window_size - 1) // 2)
b = np.mat([[(k ** i) for i in order_range] for k in range((- half_window), (half_window + 1))])
m = ((np.linalg.pinv(b).A[deriv] * (rate ** deriv)) * factorial(deriv))
firstvals = (y[0] - np.abs((y[1:(half_window + 1)][::(- 1)] - y[0])))
lastvals = (y[(- 1)] + np.abs((y[((- half_window) - 1):(- 1)][::(- 1)] - y[(- 1)])))
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::(- 1)], y, mode='valid')
|
def render_graphs(save_dir, epoch, track_d_loss_iter, track_d_loss, epoch_arr):
if (not os.path.exists((save_dir + '/plots/'))):
os.makedirs((save_dir + '/plots/'))
if (len(track_d_loss) > 51):
smoothed_d_loss = savitzky_golay(track_d_loss, 51, 3)
plt.plot(epoch_arr, track_d_loss)
plt.plot(epoch_arr, smoothed_d_loss, color='red')
plt.legend(("Discriminator's loss", 'Savitzky–Golay'), loc='upper right')
plt.title('64-3D-IWGAN')
plt.xlabel('Epoch')
plt.ylabel("Discriminator's loss")
plt.grid(True)
plt.savefig((((save_dir + '/plots/') + str(epoch)) + '.png'))
plt.clf()
|
def save_values(save_dir, track_d_loss_iter, track_d_loss, epoch_arr):
np.save((save_dir + '/plots/track_d_loss_iter'), track_d_loss_iter)
np.save((save_dir + '/plots/track_d_loss'), track_d_loss)
np.save((save_dir + '/plots/epochs'), epoch_arr)
|
def load_values(save_dir, valid=False):
outputs = []
outputs.append(list(np.load((save_dir + '/plots/track_d_loss_iter.npy'))))
outputs.append(list(np.load((save_dir + '/plots/track_d_loss.npy'))))
outputs.append(list(np.load((save_dir + '/plots/epochs.npy'))))
outputs.append(outputs[0][(- 1)])
return outputs
|
def Deconv(inputs, f_dim_in, dim, net, batch_size, f_dim_out=None, stride=2):
if (f_dim_out is None):
f_dim_out = int((f_dim_in / 2))
return tl.layers.DeConv3dLayer(inputs, shape=[4, 4, 4, f_dim_out, f_dim_in], output_shape=[batch_size, dim, dim, dim, f_dim_out], strides=[1, stride, stride, stride, 1], W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name=(('g/net_' + net) + '/deconv'))
|
def Conv3D(inputs, f_dim_out, net, f_dim_in=None, batch_norm=False, is_train=True):
if (f_dim_in is None):
f_dim_in = int((f_dim_out / 2))
layer = tl.layers.Conv3dLayer(inputs, shape=[4, 4, 4, f_dim_in, f_dim_out], W_init=tf.random_normal_initializer(stddev=0.02), strides=[1, 2, 2, 2, 1], name=(('d/net_' + net) + '/conv'))
if batch_norm:
return tl.layers.BatchNormLayer(layer, is_train=is_train, name=(('d/net_' + net) + '/batch_norm'))
else:
return layer
|
def generator_64(inputs, is_train=True, reuse=False, batch_size=128, sig=False):
(output_size, half, forth, eighth, sixteenth) = (64, 32, 16, 8, 4)
gf_dim = 512
with tf.variable_scope('gen', reuse=reuse) as vs:
net_0 = tl.layers.InputLayer(inputs, name='g/net_0/in')
net_1 = tl.layers.DenseLayer(net_0, n_units=(((gf_dim * sixteenth) * sixteenth) * sixteenth), W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name='g/net_1/dense')
net_1 = tl.layers.ReshapeLayer(net_1, shape=[(- 1), sixteenth, sixteenth, sixteenth, gf_dim], name='g/net_1/reshape')
net_1 = tl.layers.BatchNormLayer(net_1, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_1/batch_norm')
net_1.outputs = tf.nn.relu(net_1.outputs, name='g/net_1/relu')
net_2 = Deconv(net_1, gf_dim, eighth, '2', batch_size)
net_2 = tl.layers.BatchNormLayer(net_2, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_2/batch_norm')
net_2.outputs = tf.nn.relu(net_2.outputs, name='g/net_2/relu')
net_3 = Deconv(net_2, int((gf_dim / 2)), forth, '3', batch_size)
net_3 = tl.layers.BatchNormLayer(net_3, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_3/batch_norm')
net_3.outputs = tf.nn.relu(net_3.outputs, name='g/net_3/relu')
net_4 = Deconv(net_3, int((gf_dim / 4)), half, '4', batch_size)
net_4 = tl.layers.BatchNormLayer(net_4, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_4/batch_norm')
net_4.outputs = tf.nn.relu(net_4.outputs, name='g/net_4/relu')
net_5 = Deconv(net_4, int((gf_dim / 8)), output_size, '5', batch_size, f_dim_out=1)
net_5.outputs = tf.reshape(net_5.outputs, [batch_size, output_size, output_size, output_size])
if sig:
net_5.outputs = tf.nn.sigmoid(net_5.outputs)
return (net_5, net_5.outputs)
|
def discriminator(inputs, output_size, sig=False, is_train=True, reuse=False, batch_size=128, output_units=1):
inputs = tf.reshape(inputs, [batch_size, output_size, output_size, output_size, 1])
df_dim = output_size
with tf.variable_scope('dis', reuse=reuse) as vs:
net_0 = tl.layers.InputLayer(inputs, name='d/net_0/in')
net_1 = Conv3D(net_0, df_dim, '1', f_dim_in=1, batch_norm=False)
net_1.outputs = tf.nn.leaky_relu(net_1.outputs, alpha=0.2, name='d/net_1/lrelu')
net_2 = Conv3D(net_1, int((df_dim * 2)), '2', batch_norm=True, is_train=is_train)
net_2.outputs = tf.nn.leaky_relu(net_2.outputs, alpha=0.2, name='d/net_2/lrelu')
net_3 = Conv3D(net_2, int((df_dim * 4)), '3', batch_norm=True, is_train=is_train)
net_3.outputs = tf.nn.leaky_relu(net_3.outputs, alpha=0.2, name='d/net_3/lrelu')
net_4 = Conv3D(net_3, int((df_dim * 8)), '4', batch_norm=True, is_train=is_train)
net_4.outputs = tf.nn.leaky_relu(net_4.outputs, alpha=0.2, name='d/net_4/lrelu')
net_5 = FlattenLayer(net_4, name='d/net_5/flatten')
net_5 = tl.layers.DenseLayer(net_5, n_units=output_units, act=tf.identity, W_init=tf.random_normal_initializer(stddev=0.02), name='d/net_5/dense')
if sig:
return (net_5, tf.nn.sigmoid(net_5.outputs))
else:
return (net_5, net_5.outputs)
|
def make_inputs_raw(file_batch):
dt = np.dtype((np.uint8, (64, 64, 64)))
models = [np.fromfile(f, dtype=dt).reshape((64, 64, 64)) for f in file_batch]
models = np.array(models)
models = models.astype(np.float32)
start_time = time.time()
return (models, start_time)
|
def load_networks(checkpoint_dir, sess, net_g, net_d, epoch=''):
print('[*] Loading checkpoints...')
if (len(epoch) >= 1):
epoch = ('_' + epoch)
net_g_name = os.path.join(checkpoint_dir, (('net_g' + epoch) + '.npz'))
net_d_name = os.path.join(checkpoint_dir, (('net_d' + epoch) + '.npz'))
if (not (os.path.exists(net_g_name) and os.path.exists(net_d_name))):
print('[!] Loading checkpoints failed!')
else:
net_g_loaded_params = tl.files.load_npz(name=net_g_name)
net_d_loaded_params = tl.files.load_npz(name=net_d_name)
tl.files.assign_params(sess, net_g_loaded_params, net_g)
tl.files.assign_params(sess, net_d_loaded_params, net_d)
print('[*] Loading Generator and Discriminator checkpoints SUCCESS!')
|
def save_networks(checkpoint_dir, sess, net_g, net_d, epoch):
print('[*] Saving checkpoints...')
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
net_g_name = os.path.join(checkpoint_dir, 'net_g.npz')
net_d_name = os.path.join(checkpoint_dir, 'net_d.npz')
net_g_iter_name = os.path.join(checkpoint_dir, ('net_g_%d.npz' % epoch))
net_d_iter_name = os.path.join(checkpoint_dir, ('net_d_%d.npz' % epoch))
tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess)
tl.files.save_npz(net_d.all_params, name=net_d_name, sess=sess)
tl.files.save_npz(net_g.all_params, name=net_g_iter_name, sess=sess)
tl.files.save_npz(net_d.all_params, name=net_d_iter_name, sess=sess)
print('[*] Saving checkpoints SUCCESS!')
|
def save_voxels(save_dir, models, epock):
print('Saving the model')
np.save((save_dir + str(epock)), models[0])
|
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError('window_size and order have to be of type int')
if (((window_size % 2) != 1) or (window_size < 1)):
raise TypeError('window_size size must be a positive odd number')
if (window_size < (order + 2)):
raise TypeError('window_size is too small for the polynomials order')
order_range = range((order + 1))
half_window = ((window_size - 1) // 2)
b = np.mat([[(k ** i) for i in order_range] for k in range((- half_window), (half_window + 1))])
m = ((np.linalg.pinv(b).A[deriv] * (rate ** deriv)) * factorial(deriv))
firstvals = (y[0] - np.abs((y[1:(half_window + 1)][::(- 1)] - y[0])))
lastvals = (y[(- 1)] + np.abs((y[((- half_window) - 1):(- 1)][::(- 1)] - y[(- 1)])))
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::(- 1)], y, mode='valid')
|
def render_graphs(save_dir, epoch, track_g_loss, track_d_loss, epoch_arr):
if (not os.path.exists((save_dir + '/plots/'))):
os.makedirs((save_dir + '/plots/'))
if (len(track_d_loss) > 51):
smoothed_d_loss = savitzky_golay(track_d_loss, 51, 3)
smoothed_g_loss = savitzky_golay(track_g_loss, 51, 3)
plt.plot(epoch_arr, track_d_loss, color='cornflowerblue', alpha=0.5)
plt.plot(epoch_arr, smoothed_d_loss, color='navy', alpha=0.5)
plt.plot(epoch_arr, track_g_loss, color='indianred', alpha=0.5)
plt.plot(epoch_arr, smoothed_g_loss, color='crimson', alpha=0.5)
plt.legend(("Discriminator's loss", 'D-loss (Savitzky–Golay)', "Generator's loss", 'G-loss (Savitzky–Golay)'), loc='upper right')
plt.title(('64-3D-RSGAN [lrG=%.5f, lrD=%.5f]' % (args.generator_learning_rate, args.discriminator_learning_rate)))
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig((((save_dir + '/plots/') + str(epoch)) + '.png'))
plt.clf()
|
def save_values(save_dir, track_g_loss, track_d_loss, epoch_arr):
np.save((save_dir + '/plots/track_g_loss'), track_g_loss)
np.save((save_dir + '/plots/track_d_loss'), track_d_loss)
np.save((save_dir + '/plots/epochs'), epoch_arr)
|
def load_values(save_dir):
outputs = []
outputs.append(list(np.load((save_dir + '/plots/track_g_loss.npy'))))
outputs.append(list(np.load((save_dir + '/plots/track_d_loss.npy'))))
outputs.append(list(np.load((save_dir + '/plots/epochs.npy'))))
return outputs
|
def continue_download(is40=False):
queryStr = 'The ModelNet10.zip file is over 450 MB. Proceed to download (y/n): '
if is40:
queryStr = 'The ModelNet40.tar file is 2 GB and over 9 GB uncompressed. Proceed to download (y/n): '
while True:
reply = str(input(queryStr)).lower().strip()
if (reply[0] == 'y'):
return True
if (reply[0] == 'n'):
return False
else:
print('please reply with y or n')
|
def query_dataset():
while True:
reply = str(input('Choose dataset, ModelNet10 (1) or manually aligned subset of the ModelNet40 (2):')).lower().strip()
if (reply[0] == '1'):
return True
if (reply[0] == '2'):
return False
else:
print('please reply with 1 or 2')
|
def camPosToQuaternion(cx, cy, cz):
camDist = math.sqrt((((cx * cx) + (cy * cy)) + (cz * cz)))
cx = (cx / camDist)
cy = (cy / camDist)
cz = (cz / camDist)
axis = ((- cz), 0, cx)
angle = math.acos(cy)
a = (math.sqrt(2) / 2)
b = (math.sqrt(2) / 2)
w1 = axis[0]
w2 = axis[1]
w3 = axis[2]
c = math.cos((angle / 2))
d = math.sin((angle / 2))
q1 = ((a * c) - ((b * d) * w1))
q2 = ((b * c) + ((a * d) * w1))
q3 = (((a * d) * w2) + ((b * d) * w3))
q4 = ((((- b) * d) * w2) + ((a * d) * w3))
return (q1, q2, q3, q4)
|
def quaternionFromYawPitchRoll(yaw, pitch, roll):
c1 = math.cos((yaw / 2.0))
c2 = math.cos((pitch / 2.0))
c3 = math.cos((roll / 2.0))
s1 = math.sin((yaw / 2.0))
s2 = math.sin((pitch / 2.0))
s3 = math.sin((roll / 2.0))
q1 = (((c1 * c2) * c3) + ((s1 * s2) * s3))
q2 = (((c1 * c2) * s3) - ((s1 * s2) * c3))
q3 = (((c1 * s2) * c3) + ((s1 * c2) * s3))
q4 = (((s1 * c2) * c3) - ((c1 * s2) * s3))
return (q1, q2, q3, q4)
|
def camRotQuaternion(cx, cy, cz, theta):
theta = ((theta / 180.0) * math.pi)
camDist = math.sqrt((((cx * cx) + (cy * cy)) + (cz * cz)))
cx = ((- cx) / camDist)
cy = ((- cy) / camDist)
cz = ((- cz) / camDist)
q1 = math.cos((theta * 0.5))
q2 = ((- cx) * math.sin((theta * 0.5)))
q3 = ((- cy) * math.sin((theta * 0.5)))
q4 = ((- cz) * math.sin((theta * 0.5)))
return (q1, q2, q3, q4)
|
def quaternionProduct(qx, qy):
a = qx[0]
b = qx[1]
c = qx[2]
d = qx[3]
e = qy[0]
f = qy[1]
g = qy[2]
h = qy[3]
q1 = ((((a * e) - (b * f)) - (c * g)) - (d * h))
q2 = ((((a * f) + (b * e)) + (c * h)) - (d * g))
q3 = ((((a * g) - (b * h)) + (c * e)) + (d * f))
q4 = ((((a * h) + (b * g)) - (c * f)) + (d * e))
return (q1, q2, q3, q4)
|
def obj_centened_camera_pos(dist, azimuth_deg, elevation_deg):
phi = ((float(elevation_deg) / 180) * math.pi)
theta = ((float(azimuth_deg) / 180) * math.pi)
x = ((dist * math.cos(theta)) * math.cos(phi))
y = ((dist * math.sin(theta)) * math.cos(phi))
z = (dist * math.sin(phi))
return (x, y, z)
|
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTTDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
except:
train_sampler = None
dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msrvtt_dataset), train_sampler)
|
def dataloader_msrvtt_test(args, tokenizer, subset='test'):
msrvtt_testset = MSRVTTDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_testset)
except:
test_sampler = None
dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_msrvtt, len(msrvtt_testset))
|
def dataloader_activity_train(args, tokenizer):
activity_dataset = ActivityNetDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset)
dataloader = DataLoader(activity_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(activity_dataset), train_sampler)
|
def dataloader_activity_test(args, tokenizer, subset='test'):
activity_testset = ActivityNetDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(activity_testset)
except:
test_sampler = None
dataloader_activity = DataLoader(activity_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_activity, len(activity_testset))
|
def dataloader_didemo_train(args, tokenizer):
didemo_dataset = DiDeMoDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset)
dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(didemo_dataset), train_sampler)
|
def dataloader_didemo_test(args, tokenizer, subset='test'):
didemo_testset = DiDeMoDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(didemo_testset)
except:
test_sampler = None
dataloader_didemo = DataLoader(didemo_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_didemo, len(didemo_testset))
|
def dataloader_lsmdc_train(args, tokenizer):
lsmdc_dataset = LsmdcDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset)
dataloader = DataLoader(lsmdc_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(lsmdc_dataset), train_sampler)
|
def dataloader_lsmdc_test(args, tokenizer, subset='test'):
lsmdc_testset = LsmdcDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
try:
test_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_testset)
except:
test_sampler = None
dataloader_lsmdc = DataLoader(lsmdc_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False)
return (dataloader_lsmdc, len(lsmdc_testset))
|
def dataloader_msvd_train(args, tokenizer):
msvd_dataset = MsvdDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset)
dataloader = DataLoader(msvd_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msvd_dataset), train_sampler)
|
def dataloader_msvd_test(args, tokenizer, subset='test'):
msvd_testset = MsvdDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args)
dataloader_msvd = DataLoader(msvd_testset, batch_size=args.batch_size_val, num_workers=args.workers, shuffle=False, drop_last=False)
return (dataloader_msvd, len(msvd_testset))
|
class LsmdcDataset(RetrievalDataset):
'LSMDC dataset.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(LsmdcDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
'\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n '
video_json_path_dict = {}
video_json_path_dict['train'] = os.path.join(self.anno_path, 'LSMDC16_annos_training.csv')
video_json_path_dict['train_test'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv')
video_json_path_dict['val'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv')
video_json_path_dict['test'] = os.path.join(self.anno_path, 'LSMDC16_challenge_1000_publictect.csv')
video_id_list = []
caption_dict = {}
with open(video_json_path_dict[self.subset], 'r') as fp:
for line in fp:
line = line.strip()
line_split = line.split('\t')
assert (len(line_split) == 6)
(clip_id, start_aligned, end_aligned, start_extracted, end_extracted, sentence) = line_split
if (clip_id not in ['0017_Pianist_00.23.28.872-00.23.34.843', '0017_Pianist_00.30.36.767-00.30.38.009', '3064_SPARKLE_2012_01.41.07.000-01.41.11.793', '3087_WE_BOUGHT_A_ZOO_01.37.34.502-01.37.39.361', '3044_KNOCKED_UP_00.45.19.000-00.45.23.549', '3023_DISTRICT_9_01.12.44.778-01.12.48.729']):
caption_dict[len(caption_dict)] = (clip_id, (sentence, None, None))
if (clip_id not in video_id_list):
video_id_list.append(clip_id)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
for (root, dub_dir, video_files) in os.walk(self.video_path):
for video_file in video_files:
video_id_ = '.'.join(video_file.split('.')[:(- 1)])
if (video_id_ not in video_id_list):
continue
file_path_ = os.path.join(root, video_file)
video_dict[video_id_] = file_path_
for (clip_id, sentence) in caption_dict.values():
if (clip_id not in video_dict):
continue
sentences_dict[len(sentences_dict)] = (clip_id, sentence)
unique_sentence = set([v[1][0] for v in sentences_dict.values()])
print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict)))
return (video_dict, sentences_dict)
|
class MSRVTTDataset(RetrievalDataset):
'MSRVTT dataset.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(MSRVTTDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
'\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n '
csv_path = {'train': join(self.anno_path, 'MSRVTT_train.9k.csv'), 'val': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv'), 'test': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv')}[subset]
if exists(csv_path):
csv = pd.read_csv(csv_path)
else:
raise FileNotFoundError
video_id_list = list(csv['video_id'].values)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
if (subset == 'train'):
anno_path = join(self.anno_path, 'MSRVTT_data.json')
data = json.load(open(anno_path, 'r'))
for itm in data['sentences']:
if (itm['video_id'] in video_id_list):
sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['caption'], None, None))
video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id']))
else:
for (_, itm) in csv.iterrows():
sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['sentence'], None, None))
video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id']))
unique_sentence = set([v[1][0] for v in sentences_dict.values()])
print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict)))
return (video_dict, sentences_dict)
|
class MsvdDataset(RetrievalDataset):
'MSVD dataset loader.'
def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None):
super(MsvdDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config)
pass
def _get_anns(self, subset='train'):
self.sample_len = 0
self.cut_off_points = []
self.multi_sentence_per_video = True
video_id_path_dict = {}
video_id_path_dict['train'] = os.path.join(self.anno_path, 'train_list.txt')
video_id_path_dict['val'] = os.path.join(self.anno_path, 'val_list.txt')
video_id_path_dict['test'] = os.path.join(self.anno_path, 'test_list.txt')
caption_file = os.path.join(self.anno_path, 'raw-captions.pkl')
with open(video_id_path_dict[subset], 'r') as fp:
video_ids = [itm.strip() for itm in fp.readlines()]
with open(caption_file, 'rb') as f:
captions = pickle.load(f)
video_dict = OrderedDict()
sentences_dict = OrderedDict()
for (root, dub_dir, video_files) in os.walk(self.video_path):
for video_file in video_files:
video_id_ = '.'.join(video_file.split('.')[:(- 1)])
if (video_id_ not in video_ids):
continue
file_path_ = os.path.join(root, video_file)
video_dict[video_id_] = file_path_
for video_id in video_ids:
assert (video_id in captions)
for cap in captions[video_id]:
cap_txt = ' '.join(cap)
sentences_dict[len(sentences_dict)] = (video_id, (cap_txt, None, None))
self.cut_off_points.append((len(sentences_dict) - 1))
if ((subset == 'val') or (subset == 'test')):
self.sentence_num = len(sentences_dict)
self.video_num = len(video_ids)
assert (len(self.cut_off_points) == self.video_num)
print('For {}, sentence number: {}'.format(subset, self.sentence_num))
print('For {}, video number: {}'.format(subset, self.video_num))
print('Video number: {}'.format(len(video_dict)))
print('Total Paire: {}'.format(len(sentences_dict)))
self.sample_len = len(sentences_dict)
return (video_dict, sentences_dict)
|
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
|
def _check_args_tf(kwargs):
if (('fillcolor' in kwargs) and (_PIL_VER < (5, 0))):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
|
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
|
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
|
def translate_x_rel(img, pct, **kwargs):
pixels = (pct * img.size[0])
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
|
def translate_y_rel(img, pct, **kwargs):
pixels = (pct * img.size[1])
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
|
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
|
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
|
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if (_PIL_VER >= (5, 2)):
return img.rotate(degrees, **kwargs)
elif (_PIL_VER >= (5, 0)):
(w, h) = img.size
post_trans = (0, 0)
rotn_center = ((w / 2.0), (h / 2.0))
angle = (- math.radians(degrees))
matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round((- math.sin(angle)), 15), round(math.cos(angle), 15), 0.0]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return ((((a * x) + (b * y)) + c), (((d * x) + (e * y)) + f))
(matrix[2], matrix[5]) = transform(((- rotn_center[0]) - post_trans[0]), ((- rotn_center[1]) - post_trans[1]), matrix)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
|
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
|
def invert(img, **__):
return ImageOps.invert(img)
|
def equalize(img, **__):
return ImageOps.equalize(img)
|
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
|
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if (i < thresh):
lut.append(min(255, (i + add)))
else:
lut.append(i)
if (img.mode in ('L', 'RGB')):
if ((img.mode == 'RGB') and (len(lut) == 256)):
lut = ((lut + lut) + lut)
return img.point(lut)
else:
return img
|
def posterize(img, bits_to_keep, **__):
if (bits_to_keep >= 8):
return img
return ImageOps.posterize(img, bits_to_keep)
|
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
|
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.