code stringlengths 17 6.64M |
|---|
@provides('realnvp')
def realnvp(dataset, model, use_baseline):
return {'schema_type': 'flat-realnvp', 'num_density_layers': 20, 'coupler_shared_nets': True, 'coupler_hidden_channels': ([1024] * 2), 'st_nets': ([100] * 2), 'p_nets': ([100] * 2), 'q_nets': ([100] * 2)}
|
@provides('sos')
def sos(dataset, model, use_baseline):
assert use_baseline, 'A CIF version of this config has not yet been tested'
return {'schema_type': 'sos', 'num_density_layers': 8, 'g_hidden_channels': ([200] * 2), 'num_polynomials_per_layer': 5, 'polynomial_degree': 4, 'lr': 0.001, 'opt': 'sgd'}
|
@provides('nsf-ar')
def nsf(dataset, model, use_baseline):
common = {'schema_type': 'nsf', 'autoregressive': True, 'num_density_layers': 10, 'tail_bound': 3, 'batch_norm': False, 'opt': 'adam', 'lr_schedule': 'cosine', 'weight_decay': 0.0, 'early_stopping': False, 'max_grad_norm': 5, 'valid_batch_size': 5000, 'test_batch_size': 5000, 'epochs_per_test': 5}
if (dataset in ['power', 'gas', 'hepmass', 'bsds300']):
dropout = {'power': 0.0, 'gas': 0.1, 'hepmass': 0.2, 'bsds300': 0.2}[dataset]
dset_size = {'power': 1615917, 'gas': 852174, 'hepmass': 315123, 'bsds300': 1000000}[dataset]
batch_size = 512
train_steps = 400000
config = {'lr': 0.0005, 'num_hidden_layers': 2, 'num_hidden_channels': (512 if (dataset == 'bsds300') else 256), 'num_bins': 8, 'dropout_probability': dropout, 'st_nets': ([100] * 3), 'p_nets': ([200] * 3), 'q_nets': ([10] * 2)}
elif (dataset == 'miniboone'):
dset_size = 29556
batch_size = 64
train_steps = 250000
config = {'lr': 0.0003, 'num_hidden_layers': 1, 'num_hidden_channels': 64, 'num_bins': 4, 'dropout_probability': 0.2, 'st_nets': ([25] * 3), 'p_nets': ([50] * 3), 'q_nets': ([10] * 2)}
else:
assert False, f'Invalid dataset {dataset}'
steps_per_epoch = (dset_size // batch_size)
epochs = int(((train_steps / steps_per_epoch) + 0.5))
return {**common, **config, 'max_epochs': epochs, 'train_batch_size': batch_size}
|
@base
def config(dataset, use_baseline):
return {'num_u_channels': 1, 'use_cond_affine': (not use_baseline), 'pure_cond_affine': False, 'dequantize': False, 'batch_norm': False, 'act_norm': False, 'max_epochs': 2000, 'max_grad_norm': None, 'early_stopping': True, 'max_bad_valid_epochs': 250, 'train_batch_size': 1000, 'valid_batch_size': 1000, 'test_batch_size': 10000, 'opt': 'adam', 'lr': 0.001, 'lr_schedule': 'none', 'weight_decay': 0.0, 'epochs_per_test': 5, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 10, 'num_test_importance_samples': 100}
|
@provides('resflow')
def resflow(dataset, model, use_baseline):
config = {'schema_type': 'flat-resflow', 'num_density_layers': 10, 'hidden_channels': ([128] * 4), 'lipschitz_constant': 0.9, 'max_train_lipschitz_iters': 5, 'max_test_lipschitz_iters': 200, 'lipschitz_tolerance': None, 'reduce_memory': True, 'st_nets': ([10] * 2), 'p_nets': ([10] * 2), 'q_nets': ([10] * 2), 'max_epochs': 20000, 'max_bad_valid_epochs': 20000}
if (not use_baseline):
config['test_batch_size'] = 1000
return config
|
@provides('affine')
def affine(dataset, model, use_baseline):
assert use_baseline, 'Must use baseline model for this config'
return {'schema_type': 'affine', 'num_density_layers': 10}
|
@provides('maf')
def maf(dataset, model, use_baseline):
return {'schema_type': 'maf', 'num_density_layers': (20 if use_baseline else 5), 'ar_map_hidden_channels': ([50] * 4), 'st_nets': ([10] * 2), 'p_nets': ([50] * 4), 'q_nets': ([50] * 4)}
|
@provides('maf-grid')
def maf_grid(dataset, model, use_baseline):
return {'schema_type': 'maf', 'num_density_layers': (20 if use_baseline else 5), 'ar_map_hidden_channels': GridParams(([10] * 2), ([50] * 4)), 'num_u_channels': 2, 'st_nets': GridParams(([10] * 2), ([50] * 4)), 'p_nets': ([10] * 2), 'q_nets': ([50] * 4)}
|
@provides('cond-affine-shallow-grid', 'cond-affine-deep-grid')
def cond_affine_grid(dataset, model, use_baseline):
assert (not use_baseline), 'Cannot use baseline model for this config'
if ('deep' in model):
num_layers = 5
net_factor = 1
else:
num_layers = 1
net_factor = 5
return {'schema_type': 'cond-affine', 'num_density_layers': num_layers, 'num_u_channels': 2, 'st_nets': GridParams((([10] * 2) * net_factor), (([50] * 4) * net_factor)), 'p_nets': (([10] * 2) * net_factor), 'q_nets': (([50] * 4) * net_factor)}
|
@provides('dlgm-deep', 'dlgm-shallow')
def dlgm_deep(dataset, model, use_baseline):
assert (not use_baseline), 'Cannot use baseline model for this config'
if ('deep' in model):
cond_affine_model = 'cond-affine-deep-grid'
else:
cond_affine_model = 'cond-affine-shallow-grid'
config = cond_affine_grid(dataset=dataset, model=cond_affine_model, use_baseline=False)
del config['st_nets']
config['s_nets'] = 'fixed-constant'
config['t_nets'] = 'identity'
return config
|
@provides('realnvp')
def realnvp(dataset, model, use_baseline):
return {'schema_type': 'flat-realnvp', 'num_density_layers': 1, 'coupler_shared_nets': True, 'coupler_hidden_channels': ([10] * 2), 'use_cond_affine': True, 'st_nets': ([10] * 2), 'p_nets': ([10] * 2), 'q_nets': ([10] * 2)}
|
@provides('sos')
def sos(dataset, model, use_baseline):
return {'schema_type': 'sos', 'num_density_layers': (3 if use_baseline else 2), 'g_hidden_channels': ([40] * 2), 'num_polynomials_per_layer': 2, 'polynomial_degree': 4, 'st_nets': ([40] * 2), 'p_nets': ([40] * 4), 'q_nets': ([40] * 4)}
|
@provides('planar')
def planar(dataset, model, use_baseline):
return {'schema_type': 'planar', 'num_density_layers': 10, 'use_cond_affine': False, 'cond_hidden_channels': ([10] * 2), 'p_nets': ([50] * 4), 'q_nets': ([10] * 2)}
|
@provides('nsf-ar')
def nsf(dataset, model, use_baseline):
return {'schema_type': 'nsf', 'autoregressive': True, 'use_linear': False, 'max_grad_norm': 5, 'num_density_layers': 5, 'num_bins': 8, 'num_hidden_channels': 256, 'num_hidden_layers': 2, 'tail_bound': 3, 'dropout_probability': 0.0, 'lr_schedule': 'cosine', 'lr': 0.0005, 'max_epochs': 1000, 'st_nets': ([10] * 2), 'p_nets': ([10] * 2), 'q_nets': ([10] * 2)}
|
@provides('bnaf')
def bnaf(dataset, model, use_baseline):
return {'schema_type': 'bnaf', 'num_density_layers': 1, 'num_hidden_layers': 2, 'hidden_channels_factor': (50 if use_baseline else 45), 'activation': 'soft-leaky-relu', 'st_nets': ([24] * 2), 'p_nets': ([24] * 3), 'q_nets': ([24] * 3), 'test_batch_size': 1000}
|
@provides('ffjord')
def ffjord(dataset, model, use_baseline):
raise NotImplementedError('Currently broken; require changes in experiment.py')
return {'schema_type': 'ffjord', 'num_density_layers': 1, 'hidden_channels': ([64] * 3), 'numerical_tolerance': 1e-05, 'st_nets': ([24] * 2), 'p_nets': ([24] * 3), 'q_nets': ([24] * 3)}
|
def parse_config_arg(key_value):
assert ('=' in key_value), "Must specify config items with format `key=value'"
(k, v) = key_value.split('=', maxsplit=1)
assert k, "Config item can't have empty key"
assert v, "Config item can't have empty value"
try:
v = ast.literal_eval(v)
except ValueError:
v = str(v)
return (k, v)
|
def test_cif_realnvp_config():
config = get_config(dataset='mnist', model='realnvp', use_baseline=False)
true_config = {'schema_type': 'multiscale-realnvp', 'use_cond_affine': True, 'pure_cond_affine': False, 'g_hidden_channels': [64, 64, 64, 64], 'num_u_channels': 1, 'st_nets': [8, 8], 'p_nets': [64, 64], 'q_nets': [64, 64], 'early_stopping': True, 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0, 'logit_tf_lambda': 1e-06, 'logit_tf_scale': 256, 'dequantize': True, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': False, 'batch_norm_use_running_averages': True, 'batch_norm_momentum': 0.1, 'lr_schedule': 'none', 'max_bad_valid_epochs': 50, 'max_grad_norm': None, 'max_epochs': 1000, 'epochs_per_test': 1, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 5, 'num_test_importance_samples': 10}
assert (true_config == config)
|
def test_baseline_realnvp_config():
config = get_config(dataset='mnist', model='realnvp', use_baseline=True)
true_config = {'schema_type': 'multiscale-realnvp', 'use_cond_affine': False, 'pure_cond_affine': False, 'g_hidden_channels': [64, 64, 64, 64, 64, 64, 64, 64], 'num_u_channels': 0, 'early_stopping': True, 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0, 'logit_tf_lambda': 1e-06, 'logit_tf_scale': 256, 'dequantize': True, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': True, 'batch_norm_use_running_averages': True, 'batch_norm_momentum': 0.1, 'lr_schedule': 'none', 'max_bad_valid_epochs': 50, 'max_grad_norm': None, 'max_epochs': 1000, 'epochs_per_test': 1, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 1, 'num_test_importance_samples': 1}
assert (true_config == config)
|
class TestDiagonalGaussianDensity(unittest.TestCase):
def setUp(self):
self.shape = (10, 4, 2)
self.mean = torch.rand(self.shape)
self.stddev = (1 + (torch.rand(self.shape) ** 2))
self.density = DiagonalGaussianDensity(self.mean, self.stddev, num_fixed_samples=64)
flat_mean = self.mean.flatten().numpy()
flat_vars = (self.stddev ** 2).flatten().numpy()
self.scipy_density = stats.multivariate_normal(mean=flat_mean, cov=flat_vars)
def test_elbo(self):
batch_size = 1000
num_importance_samples = 1
noise = torch.rand(batch_size, *self.shape)
with torch.no_grad():
log_prob = self.density.elbo(noise, num_importance_samples=num_importance_samples)['log-w']
flat_noise = noise.flatten(start_dim=1).numpy()
scipy_log_prob = self.scipy_density.logpdf(flat_noise).reshape(batch_size, 1, num_importance_samples)
self.assertEqual(log_prob.shape, scipy_log_prob.shape)
self.assertLessEqual(abs((log_prob.numpy() - scipy_log_prob).max()), 0.0001)
def test_samples(self):
num_samples = 100000
samples = self.density.sample(num_samples)
self.assertEqual(samples.shape, (num_samples, *self.shape))
flat_samples = samples.flatten(start_dim=1)
flat_mean = self.mean.flatten()
flat_stddev = self.stddev.flatten()
self._assert_moments_accurate(flat_samples, flat_mean, flat_stddev)
def _assert_moments_accurate(self, flat_samples, flat_mean, flat_stddev):
num_moments = 4
eps = 0.5
(_, dim) = flat_samples.shape
tot_errors = 0
tot_trials = 0
for m in range(1, (num_moments + 1)):
moments = torch.mean((flat_samples ** m), dim=0)
for i in range(dim):
tot_trials += 1
ground_truth = stats.norm.moment(m, loc=flat_mean[i], scale=flat_stddev[i])
if ((ground_truth - moments[i]).abs() > eps):
tot_errors += 1
self.assertLess((tot_errors / tot_trials), 0.05)
|
class TestDiagonalGaussianConditionalDensity(unittest.TestCase):
def setUp(self):
dim = 25
cond_dim = 15
self.shape = (dim,)
self.cond_shape = (cond_dim,)
self.mean_log_std_map = ChunkedSharedCoupler(shift_log_scale_net=get_mlp(num_input_channels=cond_dim, hidden_channels=[10, 10, 10], num_output_channels=(2 * dim), activation=nn.Tanh))
self.density = DiagonalGaussianConditionalDensity(self.mean_log_std_map)
def test_log_prob(self):
batch_size = 100
inputs = torch.rand(batch_size, *self.shape)
cond_inputs = torch.rand(batch_size, *self.cond_shape)
with torch.no_grad():
log_prob = self.density.log_prob(inputs, cond_inputs)['log-prob']
mean_log_std = self.mean_log_std_map(cond_inputs)
means = mean_log_std['shift']
stds = torch.exp(mean_log_std['log-scale'])
scipy_log_probs = stats.norm.logpdf(inputs, loc=means, scale=stds)
scipy_log_prob = scipy_log_probs.reshape((batch_size, (- 1))).sum(axis=1, keepdims=True)
self.assertLessEqual(abs((log_prob.numpy() - scipy_log_prob).max()), 0.0001)
def test_samples(self):
batch_size = 10
num_samples = 10000
num_moments = 2
cond_inputs = torch.rand(batch_size, *self.cond_shape)
with torch.no_grad():
result = self.density.sample(cond_inputs.repeat_interleave(num_samples, dim=0))
mean_log_std = self.mean_log_std_map(cond_inputs)
samples = result['sample']
self.assertEqual(samples.shape, ((batch_size * num_samples), *self.shape))
samples = samples.view(batch_size, num_samples, *self.shape)
means = mean_log_std['shift'].flatten()
stds = torch.exp(mean_log_std['log-scale']).flatten()
for m in range(1, (num_moments + 1)):
moments = torch.mean((samples ** m), dim=1)
ground_truth = torch.empty_like(moments)
for (i, x) in enumerate(moments.flatten()):
ground_truth.view((- 1))[i] = stats.norm.moment(m, loc=means[i], scale=stds[i])
errs = (moments - ground_truth).abs()
self.assertLess(errs.max(), 0.5)
self.assertLess(errs.mean(), 0.05)
|
class TestCIFDensity(unittest.TestCase):
def test_log_prob_format(self):
batch_size = 1000
x_dim = 40
input_shape = (x_dim,)
u_dim = 15
num_importance_samples = 5
prior = DiagonalGaussianDensity(mean=torch.zeros(input_shape), stddev=torch.ones(input_shape))
p_u_density = self._u_density(u_dim, x_dim)
bijection = ConditionalAffineBijection(x_shape=input_shape, coupler=get_coupler(input_shape=(u_dim,), num_channels_per_output=x_dim, config={'independent_nets': False, 'shift_log_scale_net': {'type': 'mlp', 'hidden_channels': [40, 30], 'activation': 'tanh'}}))
q_u_density = self._u_density(u_dim, x_dim)
density = CIFDensity(prior=prior, p_u_density=p_u_density, bijection=bijection, q_u_density=q_u_density)
x = torch.rand(batch_size, *input_shape)
elbo = density.elbo(x, num_importance_samples=num_importance_samples)['log-w']
self.assertEqual(elbo.shape, (batch_size, num_importance_samples, 1))
def _u_density(self, u_dim, x_dim):
return DiagonalGaussianConditionalDensity(coupler=ChunkedSharedCoupler(shift_log_scale_net=get_mlp(num_input_channels=x_dim, hidden_channels=[10, 10, 10], num_output_channels=(2 * u_dim), activation=nn.Tanh)))
|
class TestConcreteConditionalDensity(unittest.TestCase):
def setUp(self):
self.shape = (25,)
self.cond_shape = (5,)
self.lam = torch.exp(torch.rand(1))
self.log_alpha_map = get_mlp(num_input_channels=np.prod(self.cond_shape), hidden_channels=[10, 10, 10], num_output_channels=np.prod(self.shape), activation=nn.Tanh)
self.density = ConcreteConditionalDensity(self.log_alpha_map, self.lam)
def test_samples(self):
batch_size = 10
num_samples = 10000
num_moments = 2
cond_inputs = torch.rand(batch_size, *self.cond_shape)
with torch.no_grad():
samples = self.density.sample(cond_inputs.repeat_interleave(num_samples, dim=0))['sample']
self.assertEqual(samples.shape, ((batch_size * num_samples), *self.shape))
|
def load_schema(name):
with open(((Path('tests') / 'schemas') / f'{name}.json'), 'r') as f:
return json.load(f)
|
def test_baseline_multiscale_realnvp_schema():
config = get_config(dataset='mnist', model='realnvp', use_baseline=True)
schema = get_schema(config)
true_schema = load_schema('realnvp_schema')
assert (schema == true_schema)
|
def test_cif_multiscale_realnvp_schema():
config = get_config(dataset='mnist', model='realnvp', use_baseline=False)
schema = get_schema(config)
true_schema = load_schema('cif_realnvp_schema')
assert (schema == true_schema)
|
def main(args=None):
if (args is None):
parser = argparse.ArgumentParser()
parser.add_argument('--game', type=str)
parser.add_argument('--config', type=str, default='default')
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--buffer_device', type=str, default=None)
parser.add_argument('--cpu_p', type=float, default=0.5)
parser.add_argument('--wandb', type=str, default='disabled')
parser.add_argument('--project', type=str, default=None)
parser.add_argument('--group', type=str, default=None)
parser.add_argument('--save', action='store_true', default=False)
args = parser.parse_args()
else:
args = argparse.Namespace(**args)
if (args.seed is not None):
seed = args.seed
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = True
if __debug__:
print('Running in debug mode, consider using the -O python flag to improve performance')
wandb.require(experiment='service')
buffer_device = (args.buffer_device if (args.buffer_device is not None) else args.device)
config = deepcopy(CONFIGS[args.config])
config.update({'game': args.game, 'seed': args.seed, 'model_device': args.device, 'buffer_device': buffer_device, 'cpu_p': args.cpu_p, 'save': args.save})
wandb.init(config=config, project=args.project, group=args.group, mode=args.wandb)
config = dict(wandb.config)
trainer = Trainer(config)
trainer.print_stats()
try:
trainer.run()
finally:
trainer.close()
|
def update_metrics(metrics, new_metrics, prefix=None):
def process(key, t):
if isinstance(t, (int, float)):
return t
assert torch.is_tensor(t), key
assert (not t.requires_grad), key
assert ((t.ndim == 0) or (t.shape == (1,))), key
return t.clone()
if (prefix is None):
metrics.update({key: process(key, value) for (key, value) in new_metrics.items()})
else:
metrics.update({f'{prefix}{key}': process(key, value) for (key, value) in new_metrics.items()})
return metrics
|
def combine_metrics(metrics, prefix=None):
result = {}
if (prefix is None):
for met in metrics:
update_metrics(result, met)
else:
for (met, pre) in zip(metrics, prefix):
update_metrics(result, met, pre)
return result
|
def mean_metrics(metrics_history, except_keys=None):
if (len(metrics_history) == 0):
return {}
if (len(metrics_history) == 1):
return metrics_history[0]
except_keys = (set() if (except_keys is None) else set(except_keys))
result = {}
value_history = collections.defaultdict((lambda : []))
for metrics in metrics_history:
for (key, value) in metrics.items():
if ((key in except_keys) or isinstance(value, WBValue)):
result[key] = value
else:
value_history[key].append(value)
result.update({key: compute_mean(values) for (key, values) in value_history.items()})
return result
|
class MetricsSummarizer():
def __init__(self, except_keys=None):
self.metrics_history = []
self.except_keys = (set() if (except_keys is None) else set(except_keys))
def append(self, metrics):
self.metrics_history.append(metrics)
def summarize(self):
summary = mean_metrics(self.metrics_history, except_keys=self.except_keys)
self.metrics_history = []
return summary
|
def compute_mean(values):
if torch.is_tensor(values):
return values.float().mean()
if isinstance(values, (tuple, list)):
return torch.stack([torch.as_tensor(x).detach() for x in values]).float().mean()
raise ValueError()
|
def random_choice(n, num_samples, replacement=False, device=None):
if replacement:
return torch.randint(0, n, (num_samples,), device=device)
weights = torch.ones(n, device=device)
return torch.multinomial(weights, num_samples, replacement=False)
|
def windows(x, window_size, window_stride=1):
x = x.unfold(1, window_size, window_stride)
dims = list(range(x.ndim))[:(- 1)]
dims.insert(2, (x.ndim - 1))
x = x.permute(dims)
return x
|
def same_batch_shape(tensors, ndim=2):
batch_shape = tensors[0].shape[:ndim]
assert all(((t.ndim >= ndim) for t in tensors))
return all(((tensors[i].shape[:ndim] == batch_shape) for i in range(1, len(tensors))))
|
def same_batch_shape_time_offset(a, b, offset):
assert ((a.ndim >= 2) and (b.ndim >= 2))
return (a.shape[:2] == (b.shape[0], (b.shape[1] + offset)))
|
def check_no_grad(*tensors):
return all((((t is None) or (not t.requires_grad)) for t in tensors))
|
class AdamOptim():
def __init__(self, parameters, lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, grad_clip=0):
self.parameters = list(parameters)
self.grad_clip = grad_clip
self.optimizer = optim.Adam(self.parameters, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
def step(self, loss):
self.optimizer.zero_grad()
loss.backward()
if (self.grad_clip > 0):
nn.utils.clip_grad_norm_(self.parameters, self.grad_clip)
self.optimizer.step()
|
def create_reward_transform(transform_type):
if (transform_type == 'tanh'):
def transform(r):
if torch.is_tensor(r):
return torch.tanh(r)
return math.tanh(r)
elif (transform_type == 'clip'):
def transform(r):
if torch.is_tensor(r):
return torch.clip(r, (- 1), 1)
return np.clip(r, (- 1), 1)
elif ((transform_type == 'none') or (transform_type is None)):
def transform(r):
return r
else:
raise ValueError(transform_type)
return transform
|
def preprocess_atari_obs(obs, device=None):
if isinstance(obs, gym.wrappers.LazyFrames):
obs = np.array(obs)
return (torch.as_tensor(obs, device=device).float() / 255.0)
|
def create_atari_env(game, noop_max=30, frame_skip=4, frame_stack=4, frame_size=84, episodic_lives=True, grayscale=True, time_limit=27000):
env = AtariEnv(rom_name_to_id(game), frameskip=1, repeat_action_probability=0.0)
env.spec = gym.spec((game + 'NoFrameskip-v4'))
has_fire_action = (env.get_action_meanings()[1] == 'FIRE')
env = gym.wrappers.AtariPreprocessing(env, noop_max=(0 if has_fire_action else noop_max), frame_skip=frame_skip, screen_size=frame_size, terminal_on_life_loss=False, grayscale_obs=grayscale)
if has_fire_action:
env = FireAfterLifeLoss(env)
if (noop_max > 0):
env = NoopStart(env, noop_max)
if episodic_lives:
env = EpisodicLives(env)
env = gym.wrappers.FrameStack(env, frame_stack)
env = gym.wrappers.TimeLimit(env, max_episode_steps=time_limit)
return env
|
def create_vector_env(num_envs, env_fn):
if (num_envs == 1):
return gym.vector.SyncVectorEnv([env_fn])
else:
return gym.vector.AsyncVectorEnv([env_fn for _ in range(num_envs)])
|
def compute_atari_hns(game, agent_score):
random_score = atari_random_scores[game]
human_score = atari_human_scores[game]
return (((agent_score - random_score) / (human_score - random_score)) * 100.0)
|
class EpisodicLives(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.ale = env.unwrapped.ale
self.lives = 0
self.was_real_done = True
def reset(self, seed=None, options=None):
if (self.was_real_done or ((options is not None) and options.get('force', False))):
(obs, info) = self.env.reset(seed=seed, options=options)
else:
(obs, _, _, _, info) = self.env.step(0)
self.lives = self.ale.lives()
return (obs, info)
def step(self, action):
(obs, reward, terminated, truncated, info) = self.env.step(action)
self.was_real_done = (terminated or truncated)
lives = self.ale.lives()
if ((lives < self.lives) and (lives > 0)):
terminated = True
self.lives = lives
return (obs, reward, terminated, truncated, info)
|
class NoAutoReset(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.final_observation = None
self.final_info = None
def reset(self, seed=None, options=None):
if ((self.final_observation is None) or ((options is not None) and options.get('force', False))):
return self.env.reset(seed=seed, options=options)
return (self.final_observation, self.final_info)
def step(self, action):
(obs, reward, terminated, truncated, info) = self.env.step(action)
if (terminated or truncated):
self.final_observation = obs
self.final_info = info
return (obs, reward, terminated, truncated, info)
|
class FireAfterLifeLoss(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
unwrapped = env.unwrapped
action_meanings = unwrapped.get_action_meanings()
assert (action_meanings[1] == 'FIRE')
assert (len(action_meanings) >= 3)
self.ale = unwrapped.ale
self.lives = 0
def reset(self, seed=None, options=None):
(obs, info) = self.env.reset(seed=seed, options=options)
(obs, _, terminated, truncated, _) = self.env.step(1)
if (terminated or truncated):
(obs, info) = self.env.reset(seed=seed, options=options)
(obs, _, terminated, truncated, _) = self.env.step(2)
if (terminated or truncated):
(obs, info) = self.env.reset(seed=seed, options=options)
self.lives = self.ale.lives()
return (obs, info)
def step(self, action):
(obs, reward, terminated, truncated, info) = self.env.step(action)
lives = self.ale.lives()
if (lives < self.lives):
(obs, reward2, terminated2, truncated2, info2) = self.env.step(1)
reward += reward2
terminated = (terminated or terminated2)
truncated = (truncated or truncated2)
info.update(info2)
self.lives = lives
return (obs, reward, terminated, truncated, info)
|
class NoopStart(gym.Wrapper):
def __init__(self, env, noop_max):
super().__init__(env)
self.noop_max = noop_max
def reset(self, seed=None, options=None):
(obs, reset_info) = self.env.reset(seed=seed, options=options)
noops = (self.env.unwrapped.np_random.integers(1, (self.noop_max + 1)) if (self.noop_max > 0) else 0)
for _ in range(noops):
(obs, _, terminated, truncated, step_info) = self.env.step(0)
reset_info.update(step_info)
if (terminated or truncated):
(obs, reset_info) = self.env.reset(seed=seed, options=options)
return (obs, reset_info)
|
@torch.no_grad()
def make_grid(tensor, nrow, padding, pad_value=0):
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil((float(nmaps) / xmaps)))
(height, width) = (int((tensor.size(2) + padding[0])), int((tensor.size(3) + padding[1])))
num_channels = tensor.size(1)
grid = tensor.new_full((num_channels, ((height * ymaps) + padding[0]), ((width * xmaps) + padding[1])), pad_value)
k = 0
for y in range(ymaps):
for x in range(xmaps):
if (k >= nmaps):
break
grid.narrow(1, ((y * height) + padding[0]), (height - padding[0])).narrow(2, ((x * width) + padding[1]), (width - padding[1])).copy_(tensor[k])
k = (k + 1)
return grid
|
def to_image(tensor):
from PIL import Image
tensor = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8)
if (tensor.shape[2] == 1):
tensor = tensor.squeeze(2)
return Image.fromarray(tensor.numpy()).convert('RGB')
|
def download_file_from_google_drive(id, destination):
URL = 'https://docs.google.com/uc?export=download'
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
|
def get_confirm_token(response):
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk:
f.write(chunk)
|
def download_pretrained_model():
destination = os.path.join(PRETRAINED_MODEL_DIR, 'default_model.zip')
if (not os.path.isdir(PRETRAINED_MODEL_DIR)):
os.mkdir(PRETRAINED_MODEL_DIR)
download_file_from_google_drive(FILE_ID, destination)
with zipfile.ZipFile(destination, 'r') as zip_ref:
zip_ref.extractall(PRETRAINED_MODEL_DIR)
|
def add_arguments(parser):
'Helper function to fill the parser object.\n\n Args:\n parser: Parser object\n Returns:\n None\n '
parser.add_argument('-m', '--model', help='which model?', default='NoisyGRUSeq2SeqWithFeatures', type=str)
parser.add_argument('-i', '--input_pipeline', default='InputPipelineWithFeatures', type=str)
parser.add_argument('--input_sequence_key', default='random_smiles', type=str)
parser.add_argument('--output_sequence_key', default='canonical_smiles', type=str)
parser.add_argument('-c', '--cell_size', help='hidden layers of cell. multiple numbers for multi layer rnn', nargs='+', default=[128], type=int)
parser.add_argument('-e', '--emb_size', help='size of bottleneck layer', default=128, type=int)
parser.add_argument('-l', '--learning_rate', default=0.0005, type=float)
parser.add_argument('-s', '--save_dir', help='path to save and log files', default=os.path.join(DEFAULT_DATA_DIR, 'default_model'), type=str)
parser.add_argument('-d', '--device', help='number of cuda visible devise', default='-1', type=str)
parser.add_argument('-gmf', '--gpu_mem_frac', default=1.0, type=float)
parser.add_argument('-n', '--num_steps', help='number of train steps', default=250000, type=int)
parser.add_argument('--summary_freq', help='save model and log translation accuracy', default=1000, type=int)
parser.add_argument('--inference_freq', help='log qsar modelling performance', default=5000, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--one_hot_embedding', default=False, type=bool)
parser.add_argument('--char_embedding_size', default=32, type=int)
parser.add_argument('--train_file', default='../data/pretrain_dataset.tfrecords', type=str)
parser.add_argument('--val_file', default='../data/pretrain_dataset_val.tfrecords', type=str)
parser.add_argument('--infer_file', default='../data/val_dataset_preprocessed3.csv', type=str)
parser.add_argument('--allow_soft_placement', default=True, type=bool)
parser.add_argument('--cpu_threads', default=5, type=int)
parser.add_argument('--overwrite_saves', default=False, type=bool)
parser.add_argument('--input_dropout', default=0.0, type=float)
parser.add_argument('--emb_noise', default=0.0, type=float)
parser.add_argument('-ks', '--kernel_size', nargs='+', default=[2], type=int)
parser.add_argument('-chs', '--conv_hidden_size', nargs='+', default=[128], type=int)
parser.add_argument('--reverse_decoding', default=False, type=bool)
parser.add_argument('--buffer_size', default=10000, type=int)
parser.add_argument('--lr_decay', default=True, type=bool)
parser.add_argument('--lr_decay_frequency', default=50000, type=int)
parser.add_argument('--lr_decay_factor', default=0.9, type=float)
parser.add_argument('--num_buckets', default=8.0, type=float)
parser.add_argument('--min_bucket_length', default=20.0, type=float)
parser.add_argument('--max_bucket_length', default=60.0, type=float)
parser.add_argument('--num_features', default=7, type=int)
parser.add_argument('--save_hparams', default=True, type=bool)
parser.add_argument('--hparams_from_file', default=False, type=bool)
parser.add_argument('--hparams_file_name', default=None, type=str)
parser.add_argument('--rand_input_swap', default=False, type=bool)
parser.add_argument('--infer_input', default='random', type=str)
parser.add_argument('--emb_activation', default='tanh', type=str)
parser.add_argument('--div_loss_scale', default=1.0, type=float)
parser.add_argument('--div_loss_rate', default=0.9, type=float)
|
def create_hparams(flags):
'Create training hparams.'
hparams = tf.contrib.training.HParams(model=flags.model, input_pipeline=flags.input_pipeline, input_sequence_key=flags.input_sequence_key, output_sequence_key=flags.output_sequence_key, cell_size=flags.cell_size, emb_size=flags.emb_size, save_dir=flags.save_dir, device=flags.device, lr=flags.learning_rate, gpu_mem_frac=flags.gpu_mem_frac, num_steps=flags.num_steps, summary_freq=flags.summary_freq, inference_freq=flags.inference_freq, batch_size=flags.batch_size, one_hot_embedding=flags.one_hot_embedding, char_embedding_size=flags.char_embedding_size, train_file=flags.train_file, val_file=flags.val_file, infer_file=flags.infer_file, allow_soft_placement=flags.allow_soft_placement, cpu_threads=flags.cpu_threads, overwrite_saves=flags.overwrite_saves, input_dropout=flags.input_dropout, emb_noise=flags.emb_noise, conv_hidden_size=flags.conv_hidden_size, kernel_size=flags.kernel_size, reverse_decoding=flags.reverse_decoding, buffer_size=flags.buffer_size, lr_decay=flags.lr_decay, lr_decay_frequency=flags.lr_decay_frequency, lr_decay_factor=flags.lr_decay_factor, num_buckets=flags.num_buckets, min_bucket_length=flags.min_bucket_length, max_bucket_length=flags.max_bucket_length, num_features=flags.num_features, rand_input_swap=flags.rand_input_swap, infer_input=flags.infer_input, emb_activation=flags.emb_activation, div_loss_scale=flags.div_loss_scale, div_loss_rate=flags.div_loss_rate)
hparams.add_hparam('encode_vocabulary_file', os.path.join(DEFAULT_DATA_DIR, 'indices_char.npy'))
hparams.add_hparam('decode_vocabulary_file', os.path.join(DEFAULT_DATA_DIR, 'indices_char.npy'))
hparams_file_name = flags.hparams_file_name
if (hparams_file_name is None):
hparams_file_name = os.path.join(hparams.save_dir, 'hparams.json')
if flags.hparams_from_file:
hparams.cell_size = list()
hparams = hparams.parse_json(json.load(open(hparams_file_name)))
hparams.set_hparam('encode_vocabulary_file', os.path.join(DEFAULT_DATA_DIR, 'indices_char.npy'))
hparams.set_hparam('decode_vocabulary_file', os.path.join(DEFAULT_DATA_DIR, 'indices_char.npy'))
return hparams
|
def sequence2embedding(model, hparams, seq_list):
'Helper Function to run a forwards path up to the bottneck layer (ENCODER).\n Encodes a list of sequences into the molecular descriptor.\n\n Args:\n model: The translation model instance to use.\n hparams: Hyperparameter object.\n seq_list: list of sequnces that should be encoded.\n Returns:\n Embedding of the input sequnces as numpy array.\n '
emb_list = []
with model.graph.as_default():
input_pipeline = InputPipelineInferEncode(seq_list, hparams)
input_pipeline.initilize()
model.model.restore(model.sess)
while 1:
try:
(input_seq, input_len) = input_pipeline.get_next()
emb = model.model.seq2emb(model.sess, input_seq, input_len)
emb_list.append(emb)
except StopIteration:
break
embedding_array = np.concatenate(emb_list)
return embedding_array
|
def embedding2sequence(model, hparams, embedding, num_top=1, maximum_iterations=1000):
'Helper Function to run a forwards path from thebottneck layer to\n output (DECODER).\n\n Args:\n model: The translation model instance to use.\n hparams: Hyperparameter object.\n embedding: Array with samples x num_features\n Returns:\n List of sequences decoded from the input embedding (descriptor).\n '
seq_list = []
with model.graph.as_default():
input_pipeline = InputPipelineInferDecode(embedding, hparams)
input_pipeline.initilize()
model.model.restore(model.sess)
while 1:
try:
emb = input_pipeline.get_next()
seq = model.model.emb2seq(model.sess, emb, num_top, maximum_iterations)
if (num_top == 1):
seq = [s[0] for s in seq]
seq_list.extend(seq)
except StopIteration:
break
if ((len(seq_list) == 1) & isinstance(seq_list, str)):
return seq_list[0]
return seq_list
|
class InferenceModel(object):
'Class that handles the inference of a trained model.'
def __init__(self, model_dir=_default_model_dir, use_gpu=True, batch_size=256, gpu_mem_frac=0.1, beam_width=10, num_top=1, maximum_iterations=1000, cpu_threads=5, emb_activation=None):
'Constructor for the inference model.\n\n Args:\n model_dir: Path to the model directory.\n use_gpu: Flag for GPU usage.\n batch_size: Number of samples to process per step.\n gpu_mem_frac: If GPU is used, what memory fraction should be used?\n beam_width: Width of the the window used for the beam search decoder.\n num_top: Number of most probable sequnces as output of the beam search decoder.\n emb_activation: Activation function used in the bottleneck layer.\n Returns:\n None\n '
self.num_top = num_top
self.use_gpu = use_gpu
parser = argparse.ArgumentParser()
add_arguments(parser)
flags = parser.parse_args([])
flags.hparams_from_file = True
flags.save_dir = model_dir
self.hparams = create_hparams(flags)
self.hparams.set_hparam('save_dir', model_dir)
self.hparams.set_hparam('batch_size', batch_size)
self.hparams.set_hparam('gpu_mem_frac', gpu_mem_frac)
self.hparams.add_hparam('beam_width', beam_width)
self.hparams.set_hparam('cpu_threads', cpu_threads)
(self.encode_model, self.decode_model) = build_models(self.hparams, modes=['ENCODE', 'DECODE'])
self.maximum_iterations = maximum_iterations
def seq_to_emb(self, seq):
'Helper function to calculate the embedding (molecular descriptor) for input sequnce(s)\n\n Args:\n seq: Single sequnces or list of sequnces to encode.\n Returns:\n Embedding of the input sequnce(s).\n '
if isinstance(seq, str):
seq = [seq]
if self.use_gpu:
emb = sequence2embedding(self.encode_model, self.hparams, seq)
else:
with tf.device('/cpu:0'):
emb = sequence2embedding(self.encode_model, self.hparams, seq)
return emb
def emb_to_seq(self, embedding):
'Helper function to calculate the sequnce(s) for one or multiple (concatinated)\n embedding.\n\n Args:\n embedding: array with n_samples x num_features.\n Returns:\n sequnce(s).\n '
if (embedding.ndim == 1):
embedding = np.expand_dims(embedding, 0)
if self.use_gpu:
seq = embedding2sequence(self.decode_model, self.hparams, embedding, self.num_top, self.maximum_iterations)
else:
with tf.device('/cpu:0'):
seq = embedding2sequence(self.decode_model, self.hparams, embedding, self.num_top, self.maximum_iterations)
if (len(seq) == 1):
seq = seq[0]
if (len(seq) == 1):
seq = seq[0]
return seq
|
class InferenceServer():
def __init__(self, model_dir=_default_model_dir, num_servers=1, port_frontend='5559', port_backend='5560', batch_size=256, gpu_mem_frac=0.3, beam_width=10, num_top=1, maximum_iterations=1000, use_running=False):
self.model_dir = model_dir
self.port_frontend = port_frontend
self.port_backend = port_backend
self.batch_size = batch_size
self.gpu_mem_frac = gpu_mem_frac
self.beam_width = beam_width
self.maximum_iterations = maximum_iterations
self.num_top = num_top
if (not use_running):
self.gpus = os.environ.get('CUDA_VISIBLE_DEVICES').split(',')
mp.Process(target=self._init_device).start()
for i in range(num_servers):
os.environ['CUDA_VISIBLE_DEVICES'] = self.gpus[(i % len(self.gpus))]
mp.Process(target=self._init_server).start()
def _init_device(self):
try:
context = zmq.Context(1)
frontend = context.socket(zmq.XREP)
frontend.bind(('tcp://*:%s' % self.port_frontend))
backend = context.socket(zmq.XREQ)
backend.bind(('tcp://*:%s' % self.port_backend))
zmq.device(zmq.QUEUE, frontend, backend)
except:
print('bringing down zmq device')
finally:
pass
frontend.close()
backend.close()
context.term()
def _init_server(self):
infer_model = InferenceModel(model_dir=self.model_dir, gpu_mem_frac=self.gpu_mem_frac, use_gpu=True, batch_size=self.batch_size, beam_width=self.beam_width, maximum_iterations=self.maximum_iterations)
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.connect(('tcp://localhost:%s' % self.port_backend))
print('Server running on GPU ', os.environ['CUDA_VISIBLE_DEVICES'])
while True:
inp = json.loads(socket.recv())
if inp[0]:
embeddings = infer_model.seq_to_emb(inp[1])
socket.send_string(json.dumps(embeddings.tolist()))
else:
smiles = infer_model.emb_to_seq(np.array(inp[1]))
socket.send_string(json.dumps(smiles))
def seq_to_emb(self, smiles):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(('tcp://localhost:%s' % self.port_frontend))
socket.send_string(json.dumps((1, smiles)))
emb = np.array(json.loads(socket.recv()))
return emb
def emb_to_seq(self, emb):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(('tcp://localhost:%s' % self.port_frontend))
socket.send_string(json.dumps((0, emb.tolist())))
emb = json.loads(socket.recv())
return emb
|
class InputPipeline():
'Base input pipeline class. Iterates through tf-record file to produce inputs\n for training the translation model.\n\n Atributes:\n mode: The mode the model is supposed to run (e.g. Train).\n batch_size: Number of samples per batch.\n buffer_size: Number of samples in the shuffle buffer.\n input_sequence_key: Identifier of the input_sequence feature in the\n tf-record file.\n output_sequence_key: Identifier of the output_sequence feature in the\n tf-record file.\n encode_vocabulary: Dictonary that maps integers to unique tokens of the\n input strings.\n decode_vocabulary: Dictonary that maps integers to unique tokens of the\n output strings.\n num_buckets: Number of buckets for batching together sequnces of\n similar length.\n min_bucket_lenght: All sequnces below this legth are put in the\n same bucket.\n max_bucket_lenght: All sequnces above this legth are put in the\n same bucket.\n regex_pattern_input: Expression to toeknize the input sequnce with.\n regex_pattern_output: Expression to toeknize the output sequnce with.\n '
def __init__(self, mode, hparams):
'Constructor for base input pipeline class.\n\n Args:\n mode: The mode the model is supposed to run (e.g. Train).\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n '
self.mode = mode
self.batch_size = hparams.batch_size
self.buffer_size = hparams.buffer_size
self.input_sequence_key = hparams.input_sequence_key
self.output_sequence_key = hparams.output_sequence_key
if (self.mode == 'TRAIN'):
self.file = hparams.train_file
else:
self.input_sequence_key = 'canonical_smiles'
self.file = hparams.val_file
self.encode_vocabulary = {v: k for (k, v) in np.load(hparams.encode_vocabulary_file, allow_pickle=True).item().items()}
self.decode_vocabulary = {v: k for (k, v) in np.load(hparams.decode_vocabulary_file, allow_pickle=True).item().items()}
self.num_buckets = hparams.num_buckets
self.min_bucket_lenght = hparams.min_bucket_length
self.max_bucket_lenght = hparams.max_bucket_length
if ('inchi' in self.input_sequence_key):
self.regex_pattern_input = REGEX_INCHI
elif ('smiles' in self.input_sequence_key):
self.regex_pattern_input = REGEX_SML
else:
raise ValueError('Could not understand the input typ. SMILES or INCHI?')
if ('inchi' in self.output_sequence_key):
self.regex_pattern_output = REGEX_INCHI
elif ('smiles' in self.output_sequence_key):
self.regex_pattern_output = REGEX_SML
else:
raise ValueError('Could not understand the output typ. SMILES or INCHI?')
def make_dataset_and_iterator(self):
'Method that builds a TFRecordDataset and creates a iterator.'
self.dataset = tf.data.TFRecordDataset(self.file)
if (self.mode == 'TRAIN'):
self.dataset = self.dataset.repeat()
self.dataset = self.dataset.map(self._parse_element, num_parallel_calls=32)
self.dataset = self.dataset.map((lambda element: tf.py_func(self._process_element, [element[self.input_sequence_key], element[self.output_sequence_key]], [tf.int32, tf.int32, tf.int32, tf.int32])), num_parallel_calls=32)
self.dataset = self.dataset.apply(tf.contrib.data.group_by_window(key_func=(lambda in_seq, out_seq, in_len, out_len: self._length_bucket(in_len)), reduce_func=(lambda key, ds: self._pad_batch(ds, self.batch_size, ([None], [None], [1], [1]), (self.encode_vocabulary['</s>'], self.decode_vocabulary['</s>'], 0, 0))), window_size=self.batch_size))
if (self.mode == 'TRAIN'):
self.dataset = self.dataset.shuffle(buffer_size=self.buffer_size)
self.iterator = self.dataset.make_initializable_iterator()
def _parse_element(self, example_proto):
'Method that parses an element from a tf-record file.'
feature_dict = {self.input_sequence_key: tf.FixedLenFeature([], tf.string), self.output_sequence_key: tf.FixedLenFeature([], tf.string)}
parsed_features = tf.parse_single_example(example_proto, feature_dict)
element = {name: parsed_features[name] for name in list(feature_dict.keys())}
return element
def _process_element(self, input_seq, output_seq):
'Method that tokenizes input an output sequnce, pads it with start and stop token.\n\n Args:\n input_seq: Input sequnce.\n output_seq: Target sequnce.\n Returns\n Array with ids of each token in the tokenzized input sequence.\n Array with ids of each token in the tokenzized output sequence.\n Array with length of the input sequnce.\n Array with length of output sequence.\n '
input_seq = input_seq.decode('ascii')
output_seq = output_seq.decode('ascii')
input_seq = np.array(self._char_to_idx(input_seq, self.regex_pattern_input, self.encode_vocabulary)).astype(np.int32)
output_seq = np.array(self._char_to_idx(output_seq, self.regex_pattern_output, self.decode_vocabulary)).astype(np.int32)
input_seq = self._pad_start_end_token(input_seq, self.encode_vocabulary)
output_seq = self._pad_start_end_token(output_seq, self.decode_vocabulary)
input_seq_len = np.array([len(input_seq)]).astype(np.int32)
output_seq_len = np.array([len(output_seq)]).astype(np.int32)
return (input_seq, output_seq, input_seq_len, output_seq_len)
def _char_to_idx(self, seq, regex_pattern, vocabulary):
'Helper function to tokenize a sequnce.\n\n Args:\n seq: Sequence to tokenize.\n regex_pattern: Expression to toeknize the input sequnce with.\n vocabulary: Dictonary that maps integers to unique tokens.\n Returns:\n List with ids of the tokens in the tokenized sequnce.\n '
char_list = re.findall(regex_pattern, seq)
return [vocabulary[char_list[j]] for j in range(len(char_list))]
def _pad_start_end_token(self, seq, vocabulary):
'Helper function to pad start and stop token to a tokenized sequnce.\n\n Args:\n seq: Tokenized sequnce to pad.\n vocabulary: Dictonary that maps integers to unique tokens.\n Returns:\n Array with ids of each token in the tokenzized input sequence\n padded by start and stop token.\n '
seq = np.concatenate([np.array([vocabulary['<s>']]), seq, np.array([vocabulary['</s>']])]).astype(np.int32)
return seq
def _length_bucket(self, length):
'Helper function to assign the a bucked for certain sequnce length.\n\n Args:\n length: The length of a sequnce.\n Returns:\n ID of the assigned bucket.\n '
length = tf.cast(length, tf.float32)
num_buckets = tf.cast(self.num_buckets, tf.float32)
cast_value = ((self.max_bucket_lenght - self.min_bucket_lenght) / num_buckets)
minimum = (self.min_bucket_lenght / cast_value)
bucket_id = (((length / cast_value) - minimum) + 1)
bucket_id = tf.cast(tf.clip_by_value(bucket_id, 0, (self.num_buckets + 1)), tf.int64)
return bucket_id
def _pad_batch(self, ds, batch_size, padded_shapes, padded_values):
'Helper function that pads a batch.'
return ds.padded_batch(batch_size, padded_shapes=padded_shapes, padding_values=padded_values, drop_remainder=True)
|
class InputPipelineWithFeatures(InputPipeline):
'Input pipeline class with addtional molecular feature output. Iterates through tf-record\n file to produce inputs for training the translation model.\n\n Atributes:\n mode: The mode the model is supposed to run (e.g. Train).\n batch_size: Number of samples per batch.\n buffer_size: Number of samples in the shuffle buffer.\n input_sequence_key: Identifier of the input_sequence feature in the\n tf-record file.\n output_sequence_key: Identifier of the output_sequence feature in the\n tf-record file.\n encode_vocabulary: Dictonary that maps integers to unique tokens of the\n input strings.\n decode_vocabulary: Dictonary that maps integers to unique tokens of the\n output strings.\n num_buckets: Number of buckets for batching together sequnces of\n similar length.\n min_bucket_lenght: All sequnces below this legth are put in the\n same bucket.\n max_bucket_lenght: All sequnces above this legth are put in the\n same bucket.\n regex_pattern_input: Expression to toeknize the input sequnce with.\n regex_pattern_output: Expression to toeknize the output sequnce with.\n '
def __init__(self, mode, hparams):
'Constructor for input pipeline class with features.\n\n Args:\n mode: The mode the model is supposed to run (e.g. Train).\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n '
super().__init__(mode, hparams)
self.features_key = 'mol_features'
self.num_features = hparams.num_features
def make_dataset_and_iterator(self):
'Method that builds a TFRecordDataset and creates a iterator.'
self.dataset = tf.data.TFRecordDataset(self.file)
self.dataset = self.dataset.map(self._parse_element, num_parallel_calls=32)
if (self.mode == 'TRAIN'):
self.dataset = self.dataset.repeat()
self.dataset = self.dataset.map((lambda element: tf.py_func(self._process_element, [element[self.input_sequence_key], element[self.output_sequence_key], element[self.features_key]], [tf.int32, tf.int32, tf.int32, tf.int32, tf.float32])), num_parallel_calls=32)
self.dataset = self.dataset.apply(tf.contrib.data.group_by_window(key_func=(lambda in_seq, out_seq, in_len, out_len, feat: self._length_bucket(in_len)), reduce_func=(lambda key, ds: self._pad_batch(ds, self.batch_size, ([None], [None], [1], [1], [self.num_features]), (self.encode_vocabulary['</s>'], self.decode_vocabulary['</s>'], 0, 0, 0.0))), window_size=self.batch_size))
if (self.mode == 'TRAIN'):
self.dataset = self.dataset.shuffle(buffer_size=self.buffer_size)
self.iterator = self.dataset.make_initializable_iterator()
def _parse_element(self, example_proto):
'Method that parses an element from a tf-record file.'
feature_dict = {self.input_sequence_key: tf.FixedLenFeature([], tf.string), self.output_sequence_key: tf.FixedLenFeature([], tf.string), self.features_key: tf.FixedLenFeature([self.num_features], tf.float32)}
parsed_features = tf.parse_single_example(example_proto, feature_dict)
element = {name: parsed_features[name] for name in list(feature_dict.keys())}
return element
def _process_element(self, input_seq, output_seq, features):
'Method that tokenizes input an output sequnce, pads it with start and stop token.\n\n Args:\n input_seq: Input sequnce.\n output_seq: target sequnce.\n Returns\n Array with ids of each token in the tokenzized input sequence.\n Array with ids of each token in the tokenzized output sequence.\n Array with length of the input sequnce.\n Array with length of output sequence.\n Array with molecular features.\n '
input_seq = input_seq.decode('ascii')
output_seq = output_seq.decode('ascii')
input_seq = np.array(self._char_to_idx(input_seq, self.regex_pattern_input, self.encode_vocabulary)).astype(np.int32)
output_seq = np.array(self._char_to_idx(output_seq, self.regex_pattern_output, self.decode_vocabulary)).astype(np.int32)
input_seq = self._pad_start_end_token(input_seq, self.encode_vocabulary)
output_seq = self._pad_start_end_token(output_seq, self.decode_vocabulary)
input_seq_len = np.array([len(input_seq)]).astype(np.int32)
output_seq_len = np.array([len(output_seq)]).astype(np.int32)
return (input_seq, output_seq, input_seq_len, output_seq_len, features)
|
class InputPipelineInferEncode():
'Class that creates a python generator for list of sequnces. Used to feed\n sequnces to the encoing part during inference time.\n\n Atributes:\n seq_list: List with sequnces to iterate over.\n batch_size: Number of samples to output per iterator call.\n encode_vocabulary: Dictonary that maps integers to unique tokens of the\n input strings.\n input_sequence_key: Identifier of the input_sequence feature in the\n tf-record file.\n regex_pattern_input: Expression to toeknize the input sequnce with.\n '
def __init__(self, seq_list, hparams):
'Constructor for the inference input pipeline class.\n\n Args:\n seq_list: List with sequnces to iterate over.\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n '
self.seq_list = seq_list
self.batch_size = hparams.batch_size
self.encode_vocabulary = {v: k for (k, v) in np.load(hparams.encode_vocabulary_file, allow_pickle=True).item().items()}
self.input_sequence_key = hparams.input_sequence_key
if ('inchi' in self.input_sequence_key):
self.regex_pattern_input = REGEX_INCHI
elif ('smiles' in self.input_sequence_key):
self.regex_pattern_input = REGEX_SML
else:
raise ValueError('Could not understand the input typ. SMILES or INCHI?')
def _input_generator(self):
'Function that defines the generator.'
l = len(self.seq_list)
for ndx in range(0, l, self.batch_size):
samples = self.seq_list[ndx:min((ndx + self.batch_size), l)]
samples = [self._seq_to_idx(seq) for seq in samples]
seq_len_batch = np.array([len(entry) for entry in samples])
max_length = seq_len_batch.max()
seq_batch = np.concatenate([np.expand_dims(np.append(seq, np.array(([self.encode_vocabulary['</s>']] * (max_length - len(seq))))), 0) for seq in samples]).astype(np.int32)
(yield (seq_batch, seq_len_batch))
def initilize(self):
'Helper function to initialiize the generator'
self.generator = self._input_generator()
def get_next(self):
'Helper function to get the next batch from the iterator'
return next(self.generator)
def _char_to_idx(self, seq):
'Helper function to tokenize a sequnce.\n\n Args:\n seq: Sequence to tokenize.\n Returns:\n List with ids of the tokens in the tokenized sequnce.\n '
char_list = re.findall(self.regex_pattern_input, seq)
return [self.encode_vocabulary[char_list[j]] for j in range(len(char_list))]
def _seq_to_idx(self, seq):
'Method that tokenizes a sequnce and pads it with start and stop token.\n\n Args:\n seq: Sequence to tokenize.\n Returns:\n seq: List with ids of the tokens in the tokenized sequnce.\n '
seq = np.concatenate([np.array([self.encode_vocabulary['<s>']]), np.array(self._char_to_idx(seq)).astype(np.int32), np.array([self.encode_vocabulary['</s>']])]).astype(np.int32)
return seq
|
class InputPipelineInferDecode():
'Class that creates a python generator for arrays of embeddings (molecular descriptor).\n Used to feed embeddings to the decoding part during inference time.\n\n Atributes:\n embedding: Array with embeddings (molecular descriptors) (n_samples x n_features).\n batch_size: Number of samples to output per iterator call.\n '
def __init__(self, embedding, hparams):
'Constructor for the inference input pipeline class.\n\n Args:\n embedding: Array with embeddings (molecular descriptors) (n_samples x n_features).\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n '
self.embedding = embedding
self.batch_size = hparams.batch_size
def _input_generator(self):
'Function that defines the generator.'
l = len(self.embedding)
for ndx in range(0, l, self.batch_size):
samples = self.embedding[ndx:min((ndx + self.batch_size), l)]
(yield samples)
def initilize(self):
'Helper function to initialiize the generator'
self.generator = self._input_generator()
def get_next(self):
'Helper function to get the next batch from the iterator'
return next(self.generator)
|
def build_models(hparams, modes=['TRAIN', 'EVAL', 'ENCODE']):
'Helper function to build a translation model for one or many different modes.\n\n Args:\n hparams: Hyperparameters defined in file or flags.\n modes: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).\n Can be a list if multiple models should be build.\n Returns:\n One model or a list of multiple models.\n '
model = getattr(models, hparams.model)
input_pipe = getattr(input_pipeline, hparams.input_pipeline)
model_list = []
if isinstance(modes, list):
for mode in modes:
model_list.append(create_model(mode, model, input_pipe, hparams))
return tuple(model_list)
else:
model = create_model(modes, model, input_pipe, hparams)
return model
|
def create_model(mode, model_creator, input_pipeline_creator, hparams):
'Helper function to build a translation model for a certain mode.\n\n Args:cpu_threads\n mode: The mode the model is supposed to run(e.g. Train, EVAL, ENCODE, DECODE).\n model_creator: Type of model class (e.g. NoisyGRUSeq2SeqWithFeatures).\n input_pipeline_creator: Type of input pipeline class (e.g. InputPipelineWithFeatures).\n hparams: Hyperparameters defined in file or flags.\n Returns:\n One model as named tuple with a graph, model and session object.\n '
sess_config = tf.ConfigProto(allow_soft_placement=hparams.allow_soft_placement, gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=hparams.gpu_mem_frac), inter_op_parallelism_threads=hparams.cpu_threads, intra_op_parallelism_threads=hparams.cpu_threads)
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
if (mode in ['TRAIN', 'EVAL']):
input_pipe = input_pipeline_creator(mode, hparams)
input_pipe.make_dataset_and_iterator()
iterator = input_pipe.iterator
else:
iterator = None
model = model_creator(mode=mode, iterator=iterator, hparams=hparams)
model.build_graph()
sess = tf.Session(graph=graph, config=sess_config)
return Model(graph=graph, model=model, sess=sess)
|
def add_arguments(parser):
'Helper function to fill the parser object.\n\n Args:\n parser: Parser object\n Returns:\n None\n '
parser.add_argument('-i', '--input', help='input file. Either .smi or .csv file.', type=str)
parser.add_argument('-o', '--output', help='output .csv file with a descriptor for each SMILES per row.', type=str)
parser.add_argument('--smiles_header', help='if .csv, specify the name of the SMILES column header here.', default='smiles', type=str)
parser.add_argument('--preprocess', dest='preprocess', action='store_true')
parser.add_argument('--no-preprocess', dest='preprocess', action='store_false')
parser.set_defaults(preprocess=True)
parser.add_argument('--model_dir', default=_default_model_dir, type=str)
parser.add_argument('--use_gpu', dest='gpu', action='store_true')
parser.set_defaults(gpu=False)
parser.add_argument('--device', default='2', type=str)
parser.add_argument('--cpu_threads', default=5, type=int)
parser.add_argument('--batch_size', default=512, type=int)
|
def read_input(file):
'Function that read teh provided file into a pandas dataframe.\n Args:\n file: File to read.\n Returns:\n pandas dataframe\n Raises:\n ValueError: If file is not a .smi or .csv file.\n '
if file.endswith('.csv'):
sml_df = pd.read_csv(file)
elif file.endswith('.smi'):
sml_df = pd.read_table(file, header=None).rename({0: FLAGS.smiles_header, 1: 'EXTREG'}, axis=1)
else:
raise ValueError('use .csv or .smi format...')
return sml_df
|
def main(unused_argv):
'Main function that extracts the contineous data-driven descriptors for a file of SMILES.'
if FLAGS.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.device)
model_dir = FLAGS.model_dir
file = FLAGS.input
df = read_input(file)
if FLAGS.preprocess:
print('start preprocessing SMILES...')
df['new_smiles'] = df[FLAGS.smiles_header].map(preprocess_smiles)
sml_list = df[(~ df.new_smiles.isna())].new_smiles.tolist()
print('finished preprocessing SMILES!')
else:
sml_list = df[FLAGS.smiles_header].tolist()
print('start calculating descriptors...')
infer_model = InferenceModel(model_dir=model_dir, use_gpu=FLAGS.gpu, batch_size=FLAGS.batch_size, cpu_threads=FLAGS.cpu_threads)
descriptors = infer_model.seq_to_emb(sml_list)
print(('finished calculating descriptors! %d out of %d input SMILES could be interpreted' % (len(sml_list), len(df))))
if FLAGS.preprocess:
df = df.join(pd.DataFrame(descriptors, index=df[(~ df.new_smiles.isna())].index, columns=[('cddd_' + str((i + 1))) for i in range(512)]))
else:
df = df.join(pd.DataFrame(descriptors, index=df.index, columns=[('cddd_' + str((i + 1))) for i in range(512)]))
print('writing descriptors to file...')
df.to_csv(FLAGS.output)
|
def main_wrapper():
global FLAGS
PARSER = argparse.ArgumentParser()
add_arguments(PARSER)
(FLAGS, UNPARSED) = PARSER.parse_known_args()
tf.app.run(main=main, argv=([sys.argv[0]] + UNPARSED))
|
def train_loop(train_model, eval_model, encoder_model, hparams):
'Main training loop function for training and evaluating.\n Args:\n train_model: The model used for training.\n eval_model: The model used evaluating the translation accuracy.\n encoder_model: The model used for evaluating the QSAR modeling performance.\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n '
qsar_process = []
with train_model.graph.as_default():
train_model.sess.run(train_model.model.iterator.initializer)
step = train_model.model.initilize(train_model.sess, overwrite_saves=hparams.overwrite_saves)
hparams_file_name = FLAGS.hparams_file_name
if (hparams_file_name is None):
hparams_file_name = os.path.join(hparams.save_dir, 'hparams.json')
with open(hparams_file_name, 'w') as outfile:
json.dump(hparams.to_json(), outfile)
while (step < hparams.num_steps):
with train_model.graph.as_default():
step = train_model.model.train(train_model.sess)
if ((step % hparams.summary_freq) == 0):
with train_model.graph.as_default():
train_model.model.save(train_model.sess)
with eval_model.graph.as_default():
eval_model.model.restore(eval_model.sess)
eval_model.sess.run(eval_model.model.iterator.initializer)
eval_reconstruct(eval_model, step, hparams)
if ((step % hparams.inference_freq) == 0):
with encoder_model.graph.as_default():
qsar_process.append(parallel_eval_qsar(encoder_model, step, hparams))
for process in qsar_process:
process.join()
|
def main(unused_argv):
'Main function that trains and evaluats the translation model'
hparams = create_hparams(FLAGS)
os.environ['CUDA_VISIBLE_DEVICES'] = str(hparams.device)
(train_model, eval_model, encode_model) = build_models(hparams)
train_loop(train_model, eval_model, encode_model, hparams)
|
def add_arguments(parser):
'Helper function to fill the parser object.\n\n Args:\n parser: Parser object\n Returns:\n None\n '
parser.add_argument('--model_dir', default=_default_model_dir, type=str)
parser.add_argument('--use_gpu', dest='gpu', action='store_true')
parser.set_defaults(gpu=False)
parser.add_argument('--device', default='0', type=str)
parser.add_argument('--cpu_threads', default=5, type=int)
|
def main(unused_argv):
'Main function to test the performance of the translation model to extract\n meaningfull features for a QSAR modelling'
if FLAGS.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.device)
print('use gpu {}'.format(str(FLAGS.device)))
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
model_dir = FLAGS.model_dir
infer_model = InferenceModel(model_dir, use_gpu=FLAGS.gpu, cpu_threads=FLAGS.cpu_threads)
ames_df = pd.read_csv('ames.csv')
ames_smls = ames_df.smiles.tolist()
ames_labels = ames_df.label.values
ames_fold = ames_df.fold.values
print('Extracting molecular desscriptors for Ames')
ames_emb = infer_model.seq_to_emb(ames_smls)
ames_emb = ((ames_emb - ames_emb.mean()) / ames_emb.std())
lipo_df = pd.read_csv('lipo.csv')
lipo_smls = lipo_df.smiles.tolist()
lipo_labels = lipo_df.label.values
lipo_fold = lipo_df.fold.values
print('Extracting molecular desscriptors for Lipophilicity')
lipo_emb = infer_model.seq_to_emb(lipo_smls)
lipo_emb = ((lipo_emb - lipo_emb.mean()) / lipo_emb.std())
print('Running SVM on Ames mutagenicity...')
clf = SVC(C=5.0)
result = cross_val_score(clf, ames_emb, ames_labels, ames_fold, cv=LeaveOneGroupOut(), n_jobs=5)
print(('Ames mutagenicity accuracy: %0.3f +/- %0.3f' % (np.mean(result), np.std(result))))
print('Running SVM on Lipophilicity...')
clf = SVR(C=5.0)
result = cross_val_score(clf, lipo_emb, lipo_labels, lipo_fold, cv=LeaveOneGroupOut(), n_jobs=5)
print(('Lipophilicity r2: %0.3f +/- %0.3f' % (np.mean(result), np.std(result))))
|
class MyDistributedDataParallel(LightningDistributedDataParallel):
def scatter(self, inputs, kwargs, device_ids):
kwargs['batch_idx'] = inputs[1]
kwargs = (kwargs,)
inputs = ((inputs[0].to(torch.device('cuda:{}'.format(device_ids[0]))),),)
return (inputs, kwargs)
|
class MyDDP(DDPPlugin):
def configure_ddp(self):
self.model = MyDistributedDataParallel(self.model, device_ids=self.determine_ddp_device_ids(), find_unused_parameters=True)
|
class GeometricGraphDataset(Dataset):
def __init__(self, n_min=12, n_max=20, samples_per_epoch=100000, **kwargs):
super().__init__()
self.n_min = n_min
self.n_max = n_max
self.samples_per_epoch = samples_per_epoch
def __len__(self):
return self.samples_per_epoch
def __getitem__(self, idx):
n = np.random.randint(low=self.n_min, high=self.n_max)
g = random_geometric_graph(n=n, radius=0.5)
return g
|
class RegularGraphDataset(Dataset):
def __init__(self, n_min=12, n_max=20, samples_per_epoch=100000, **kwargs):
super().__init__()
self.n_min = n_min
self.n_max = n_max
self.samples_per_epoch = samples_per_epoch
def __len__(self):
return self.samples_per_epoch
def __getitem__(self, idx):
n = np.random.randint(low=self.n_min, high=self.n_max)
g = random_regular_graph(n=n, d=4)
return g
|
class BarabasiAlbertGraphDataset(Dataset):
def __init__(self, n_min=12, n_max=20, m_min=1, m_max=5, samples_per_epoch=100000, **kwargs):
super().__init__()
self.n_min = n_min
self.n_max = n_max
self.m_min = m_min
self.m_max = m_max
self.samples_per_epoch = samples_per_epoch
def __len__(self):
return self.samples_per_epoch
def __getitem__(self, idx):
if (self.n_min == self.n_max):
n = self.m_min
else:
n = np.random.randint(low=self.n_min, high=self.n_max)
if (self.m_min == self.m_max):
m = self.m_min
else:
m = np.random.randint(low=self.m_min, high=self.m_max)
g = barabasi_albert_graph(n, m)
return g
|
class BinomialGraphDataset(Dataset):
def __init__(self, n_min=12, n_max=20, p_min=0.4, p_max=0.6, samples_per_epoch=100000, pyg=False, **kwargs):
super().__init__()
self.n_min = n_min
self.n_max = n_max
self.p_min = p_min
self.p_max = p_max
self.samples_per_epoch = samples_per_epoch
self.pyg = pyg
def __len__(self):
return self.samples_per_epoch
def get_largest_subgraph(self, g):
g = g.subgraph(sorted(nx.connected_components(g), key=len, reverse=True)[0])
g = nx.convert_node_labels_to_integers(g, first_label=0)
return g
def __getitem__(self, idx):
n = np.random.randint(low=self.n_min, high=self.n_max)
if (self.p_min == self.p_max):
p = self.p_min
else:
p = np.random.randint(low=self.p_min, high=self.p_max)
p = np.random.uniform(low=self.p_min, high=self.p_max)
g = binomial_graph(n, p)
if self.pyg:
g = from_networkx(g)
return g
|
class RandomGraphDataset(Dataset):
def __init__(self, n_min=12, n_max=20, samples_per_epoch=100000, **kwargs):
super().__init__()
self.n_min = n_min
self.n_max = n_max
self.samples_per_epoch = samples_per_epoch
self.graph_generator = GraphGenerator()
def __len__(self):
return self.samples_per_epoch
def __getitem__(self, idx):
n = np.random.randint(low=self.n_min, high=self.n_max)
g = self.graph_generator(n)
return g
|
class PyGRandomGraphDataset(RandomGraphDataset):
def __getitem__(self, idx):
n = np.random.randint(low=self.n_min, high=self.n_max)
g = self.graph_generator(n)
g = from_networkx(g)
if (g.pos is not None):
del g.pos
return g
|
class DenseGraphBatch(Data):
def __init__(self, node_features, edge_features, mask, **kwargs):
self.node_features = node_features
self.edge_features = edge_features
self.mask = mask
for (key, item) in kwargs.items():
setattr(self, key, item)
@classmethod
def from_sparse_graph_list(cls, data_list, labels=False):
if labels:
max_num_nodes = max([graph.number_of_nodes() for (graph, label) in data_list])
else:
max_num_nodes = max([graph.number_of_nodes() for graph in data_list])
node_features = []
edge_features = []
mask = []
y = []
props = []
for data in data_list:
if labels:
(graph, label) = data
y.append(label)
else:
graph = data
num_nodes = graph.number_of_nodes()
props.append(torch.Tensor([num_nodes]))
graph.add_nodes_from([i for i in range(num_nodes, max_num_nodes)])
nf = torch.ones(max_num_nodes, 1)
node_features.append(nf.unsqueeze(0))
dm = torch.from_numpy(floyd_warshall_numpy(graph)).long()
dm = torch.clamp(dm, 0, 5).unsqueeze((- 1))
num_nodes = dm.size(1)
dm = torch.zeros((num_nodes, num_nodes, 6)).type_as(dm).scatter_(2, dm, 1).float()
edge_features.append(dm)
mask.append((torch.arange(max_num_nodes) < num_nodes).unsqueeze(0))
node_features = torch.cat(node_features, dim=0)
edge_features = torch.stack(edge_features, dim=0)
mask = torch.cat(mask, dim=0)
props = torch.cat(props, dim=0)
batch = cls(node_features=node_features, edge_features=edge_features, mask=mask, properties=props)
if labels:
batch.y = torch.Tensor(y)
return batch
def __repr__(self):
repr_list = ['{}={}'.format(key, list(value.shape)) for (key, value) in self.__dict__.items()]
return 'DenseGraphBatch({})'.format(', '.join(repr_list))
|
class DenseGraphDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, labels=False, **kwargs):
super().__init__(dataset, batch_size, shuffle, collate_fn=(lambda data_list: DenseGraphBatch.from_sparse_graph_list(data_list, labels)), **kwargs)
|
class GraphDataModule(pl.LightningDataModule):
def __init__(self, graph_family, graph_kwargs=None, samples_per_epoch=100000, batch_size=32, distributed_sampler=True, num_workers=1):
super().__init__()
if (graph_kwargs is None):
graph_kwargs = {}
self.graph_family = graph_family
self.graph_kwargs = graph_kwargs
self.samples_per_epoch = samples_per_epoch
self.num_workers = num_workers
self.batch_size = batch_size
self.distributed_sampler = distributed_sampler
self.train_dataset = None
self.eval_dataset = None
self.train_sampler = None
self.eval_sampler = None
def make_dataset(self, samples_per_epoch):
if (self.graph_family == 'binomial'):
ds = BinomialGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs)
elif (self.graph_family == 'barabasi_albert'):
ds = BarabasiAlbertGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs)
elif (self.graph_family == 'regular'):
ds = RegularGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs)
elif (self.graph_family == 'geometric'):
ds = GeometricGraphDataset(samples_per_epoch=samples_per_epoch)
elif (self.graph_family == 'all'):
ds = RandomGraphDataset(samples_per_epoch=samples_per_epoch)
else:
raise NotImplementedError
return ds
def train_dataloader(self):
self.train_dataset = self.make_dataset(samples_per_epoch=self.samples_per_epoch)
if self.distributed_sampler:
train_sampler = DistributedSampler(dataset=self.train_dataset, shuffle=False)
else:
train_sampler = None
return DenseGraphDataLoader(dataset=self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, sampler=train_sampler)
def val_dataloader(self):
self.eval_dataset = self.make_dataset(samples_per_epoch=4096)
if self.distributed_sampler:
eval_sampler = DistributedSampler(dataset=self.eval_dataset, shuffle=False)
else:
eval_sampler = None
return DenseGraphDataLoader(dataset=self.eval_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, sampler=eval_sampler)
|
def binomial_ego_graph(n, p):
g = ego_graph(binomial_graph(n, p), 0)
g = nx.convert_node_labels_to_integers(g, first_label=0)
return g
|
class GraphGenerator(object):
def __init__(self):
self.graph_params = {'binominal': {'func': binomial_graph, 'kwargs_float_ranges': {'p': (0.2, 0.6)}}, '"binominal_ego": {\n "func": binomial_ego_graph,\n "kwargs_float_ranges": {\n "p": (0.2, 0.6)\n }\n },newman_watts_strogatz': {'func': newman_watts_strogatz_graph, 'kwargs_int_ranges': {'k': (2, 6)}, 'kwargs_float_ranges': {'p': (0.2, 0.6)}}, 'watts_strogatz': {'func': watts_strogatz_graph, 'kwargs_int_ranges': {'k': (2, 6)}, 'kwargs_float_ranges': {'p': (0.2, 0.6)}}, 'random_regular': {'func': random_regular_graph, 'kwargs_int_ranges': {'d': (3, 6)}}, 'barabasi_albert': {'func': barabasi_albert_graph, 'kwargs_int_ranges': {'m': (1, 6)}}, 'dual_barabasi_albert': {'func': dual_barabasi_albert_graph, 'kwargs_int_ranges': {'m1': (1, 6), 'm2': (1, 6)}, 'kwargs_float_ranges': {'p': (0.1, 0.9)}}, 'extended_barabasi_albert': {'func': extended_barabasi_albert_graph, 'kwargs_int_ranges': {'m': (1, 6)}, 'kwargs_float_ranges': {'p': (0.1, 0.49), 'q': (0.1, 0.49)}}, 'powerlaw_cluster': {'func': powerlaw_cluster_graph, 'kwargs_int_ranges': {'m': (1, 6)}, 'kwargs_float_ranges': {'p': (0.1, 0.9)}}, 'random_powerlaw_tree': {'func': random_powerlaw_tree, 'kwargs': {'gamma': 3, 'tries': 1000}}, 'random_geometric': {'func': random_geometric_graph, 'kwargs_float_ranges': {'p': (0.4, 0.5)}, 'kwargs': {'radius': 1}}}
self.graph_types = list(self.graph_params.keys())
def __call__(self, n, graph_type=None):
if (graph_type is None):
graph_type = random.choice(self.graph_types)
params = self.graph_params[graph_type]
kwargs = {}
if ('kwargs' in params):
kwargs = {**params['kwargs']}
if ('kwargs_int_ranges' in params):
for (key, arg) in params['kwargs_int_ranges'].items():
kwargs[key] = np.random.randint(arg[0], (arg[1] + 1))
if ('kwargs_float_ranges' in params):
for (key, arg) in params['kwargs_float_ranges'].items():
kwargs[key] = np.random.uniform(arg[0], arg[1])
if (graph_type == 'random_regular'):
if (((n * kwargs['d']) % 2) != 0):
n -= 1
try:
g = params['func'](n=n, **kwargs)
except nx.exception.NetworkXError:
g = self(n)
return g
|
class EvalRandomGraphDataset(Dataset):
def __init__(self, n, pyg=False):
self.n = n
self.pyg = pyg
self.graph_params = {'binominal': {'func': binomial_graph, 'kwargs': {'p': (0.25, 0.35, 0.5)}}, 'newman_watts_strogatz': {'func': newman_watts_strogatz_graph, 'kwargs': {'k': (2, 2, 5, 5), 'p': (0.25, 0.75, 0.25, 0.75)}}, 'watts_strogatz': {'func': watts_strogatz_graph, 'kwargs': {'k': (2, 2, 5, 5), 'p': (0.25, 0.75, 0.25, 0.75)}}, 'random_regular': {'func': random_regular_graph, 'kwargs': {'d': (3, 4, 5, 6)}}, 'barabasi_albert': {'func': barabasi_albert_graph, 'kwargs': {'m': (1, 2, 3, 4)}}, 'dual_barabasi_albert': {'func': dual_barabasi_albert_graph, 'kwargs': {'m1': (2, 2), 'm2': (4, 1), 'p': (0.5, 0.5)}}, 'extended_barabasi_albert': {'func': extended_barabasi_albert_graph, 'kwargs': {'m': (1, 2, 4), 'p': (0.5, 0.5, 0.5), 'q': (0.25, 0.25, 0.25)}}, 'powerlaw_cluster': {'func': powerlaw_cluster_graph, 'kwargs': {'m': (2, 3, 4)}, 'kwargs_fix': {'p': 0.5}}, 'random_geometric': {'func': random_geometric_graph, 'kwargs': {'p': (0.35, 0.55)}, 'kwargs_fix': {'radius': 1}}}
self.graph_types = ['binominal', 'barabasi_albert', 'random_geometric', 'random_regular', 'random_powerlaw_tree', 'watts_strogatz', 'extended_barabasi_albert', 'newman_watts_strogatz', 'dual_barabasi_albert']
(graphs, labels) = self.generate_dataset()
c = list(zip(graphs, labels))
random.shuffle(c)
(self.graphs, self.labels) = zip(*c)
def generate_dataset(self):
label = 0
graphs = []
labels = []
for (j, graph_type) in enumerate(self.graph_types):
params = self.graph_params[graph_type]
func = params['func']
if ('kwargs' in params):
kwargs = params['kwargs']
else:
kwargs = None
if ('kwargs_fix' in params):
kwargs_fix = params['kwargs_fix']
else:
kwargs_fix = None
if (kwargs is not None):
num_settings = len(list(kwargs.values())[0])
else:
num_settings = 1
for i in range(num_settings):
final_kwargs = {}
if (kwargs is not None):
for (key, args) in kwargs.items():
if (num_settings > 1):
final_kwargs[key] = args[i]
else:
final_kwargs[key] = args
num_graphs = int((256 / num_settings))
if (kwargs_fix is not None):
final_kwargs2 = {**final_kwargs, **kwargs_fix}
elif (kwargs is None):
final_kwargs2 = kwargs_fix
else:
final_kwargs2 = final_kwargs
gs = [func(n=self.n, **final_kwargs2) for _ in range(num_graphs)]
graphs.extend(gs)
labels.extend((len(gs) * [label]))
label += 1
return (graphs, labels)
def __len__(self):
return len(self.graphs)
def __getitem__(self, idx):
graph = self.graphs[idx]
label = self.labels[idx]
if self.pyg:
g = from_networkx(graph)
if (g.pos is not None):
del g.pos
if (g.edge_index.dtype != torch.long):
print(g)
g.y = torch.Tensor([label]).long()
return g
else:
return (graph, label)
|
class EvalRandomBinomialGraphDataset(Dataset):
def __init__(self, n_min, n_max, p_min, p_max, num_samples, pyg=False):
self.n_min = n_min
self.n_max = n_max
self.p_min = p_min
self.p_max = p_max
self.num_samples = num_samples
self.pyg = pyg
(self.graphs, self.labels) = self.generate_dataset()
def generate_dataset(self):
graphs = []
labels = []
for i in range(self.num_samples):
n = np.random.randint(low=self.n_min, high=self.n_max)
p = np.random.uniform(low=self.p_min, high=self.p_max)
g = binomial_graph(n, p)
if self.pyg:
g = from_networkx(g)
g.y = p
graphs.append(g)
labels.append(p)
return (graphs, labels)
def __len__(self):
return len(self.graphs)
def __getitem__(self, idx):
graph = self.graphs[idx]
if self.pyg:
return graph
else:
label = self.labels[idx]
return (graph, label)
|
def add_arguments(parser):
'Helper function to fill the parser object.\n\n Args:\n parser: Parser object\n Returns:\n parser: Updated parser object\n '
parser.add_argument('--test', dest='test', action='store_true')
parser.add_argument('-i', '--id', type=int, default=0)
parser.add_argument('-g', '--gpus', default=1, type=int)
parser.add_argument('-e', '--num_epochs', default=5000, type=int)
parser.add_argument('--num_eval_samples', default=8192, type=int)
parser.add_argument('--eval_freq', default=1000, type=int)
parser.add_argument('-s', '--save_dir', default=DEFAULT_SAVE_DIR, type=str)
parser.add_argument('--precision', default=32, type=int)
parser.add_argument('--progress_bar', dest='progress_bar', action='store_true')
parser.set_defaults(test=False)
parser.set_defaults(progress_bar=False)
parser.add_argument('--resume_ckpt', default='', type=str)
parser.add_argument('-b', '--batch_size', default=32, type=int)
parser.add_argument('--lr', default=5e-05, type=float)
parser.add_argument('--kld_loss_scale', default=0.001, type=float)
parser.add_argument('--perm_loss_scale', default=0.5, type=float)
parser.add_argument('--property_loss_scale', default=0.1, type=float)
parser.add_argument('--vae', dest='vae', action='store_true')
parser.set_defaults(vae=False)
parser.add_argument('--num_node_features', default=1, type=int)
parser.add_argument('--num_edge_features', default=6, type=int)
parser.add_argument('--emb_dim', default=64, type=int)
parser.add_argument('--graph_encoder_hidden_dim', default=256, type=int)
parser.add_argument('--graph_encoder_k_dim', default=64, type=int)
parser.add_argument('--graph_encoder_v_dim', default=64, type=int)
parser.add_argument('--graph_encoder_num_heads', default=16, type=int)
parser.add_argument('--graph_encoder_ppf_hidden_dim', default=1024, type=int)
parser.add_argument('--graph_encoder_num_layers', default=16, type=int)
parser.add_argument('--graph_decoder_hidden_dim', default=256, type=int)
parser.add_argument('--graph_decoder_k_dim', default=64, type=int)
parser.add_argument('--graph_decoder_v_dim', default=64, type=int)
parser.add_argument('--graph_decoder_num_heads', default=16, type=int)
parser.add_argument('--graph_decoder_ppf_hidden_dim', default=1024, type=int)
parser.add_argument('--graph_decoder_num_layers', default=16, type=int)
parser.add_argument('--graph_decoder_pos_emb_dim', default=64, type=int)
parser.add_argument('--property_predictor_hidden_dim', default=256, type=int)
parser.add_argument('--num_properties', default=1, type=int)
parser.add_argument('--num_workers', default=32, type=int)
parser.add_argument('--shuffle', default=1, type=int)
parser.add_argument('--graph_family', default='barabasi_albert', type=str)
parser.add_argument('--n_min', default=12, type=int)
parser.add_argument('--n_max', default=20, type=int)
parser.add_argument('--p_min', default=0.4, type=float)
parser.add_argument('--p_max', default=0.6, type=float)
parser.add_argument('--m_min', default=1, type=int)
parser.add_argument('--m_max', default=5, type=int)
return parser
|
def main(hparams):
if (not os.path.isdir((hparams.save_dir + '/run{}/'.format(hparams.id)))):
print('Creating directory')
os.mkdir((hparams.save_dir + '/run{}/'.format(hparams.id)))
print('Starting Run {}'.format(hparams.id))
checkpoint_callback = ModelCheckpoint(dirpath=(hparams.save_dir + '/run{}/'.format(hparams.id)), save_last=True, save_top_k=1, monitor='val_loss')
lr_logger = LearningRateMonitor()
tb_logger = TensorBoardLogger((hparams.save_dir + '/run{}/'.format(hparams.id)))
critic = Critic
model = PLGraphAE(hparams.__dict__, critic)
graph_kwargs = {'n_min': hparams.n_min, 'n_max': hparams.n_max, 'm_min': hparams.m_min, 'm_max': hparams.m_max, 'p_min': hparams.p_min, 'p_max': hparams.p_max}
datamodule = GraphDataModule(graph_family=hparams.graph_family, graph_kwargs=graph_kwargs, batch_size=hparams.batch_size, num_workers=hparams.num_workers, samples_per_epoch=100000000)
my_ddp_plugin = MyDDP()
trainer = pl.Trainer(gpus=hparams.gpus, progress_bar_refresh_rate=(5 if hparams.progress_bar else 0), logger=tb_logger, checkpoint_callback=True, val_check_interval=(hparams.eval_freq if (not hparams.test) else 100), accelerator='ddp', plugins=[my_ddp_plugin], gradient_clip_val=0.1, callbacks=[lr_logger, checkpoint_callback], terminate_on_nan=True, replace_sampler_ddp=False, precision=hparams.precision, max_epochs=hparams.num_epochs, reload_dataloaders_every_epoch=True, resume_from_checkpoint=(hparams.resume_ckpt if (hparams.resume_ckpt != '') else None))
trainer.fit(model=model, datamodule=datamodule)
|
class PLGraphAE(pl.LightningModule):
def __init__(self, hparams, critic):
super().__init__()
self.save_hyperparameters(hparams)
self.graph_ae = GraphAE(hparams)
self.critic = critic(hparams)
def forward(self, graph, training):
(graph_pred, perm, mu, logvar) = self.graph_ae(graph, training, tau=1.0)
return (graph_pred, perm, mu, logvar)
def training_step(self, graph, batch_idx):
(graph_pred, perm, mu, logvar) = self(graph=graph, training=True)
loss = self.critic(graph_true=graph, graph_pred=graph_pred, perm=perm, mu=mu, logvar=logvar)
self.log_dict(loss)
return loss
def validation_step(self, graph, batch_idx):
(graph_pred, perm, mu, logvar) = self(graph=graph, training=True)
metrics_soft = self.critic.evaluate(graph_true=graph, graph_pred=graph_pred, perm=perm, mu=mu, logvar=logvar, prefix='val')
(graph_pred, perm, mu, logvar) = self(graph=graph, training=False)
metrics_hard = self.critic.evaluate(graph_true=graph, graph_pred=graph_pred, perm=perm, mu=mu, logvar=logvar, prefix='val_hard')
metrics = {**metrics_soft, **metrics_hard}
self.log_dict(metrics)
self.log_dict(metrics_soft)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.graph_ae.parameters(), lr=self.hparams['lr'], betas=(0.9, 0.98))
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.999)
if ('eval_freq' in self.hparams):
scheduler = {'scheduler': lr_scheduler, 'interval': 'step', 'frequency': (2 * (self.hparams['eval_freq'] + 1))}
else:
scheduler = {'scheduler': lr_scheduler, 'interval': 'epoch'}
return ([optimizer], [scheduler])
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure=None, second_order_closure=None, on_tpu=False, using_native_amp=False, using_lbfgs=False):
if (self.trainer.global_step < 10000):
lr_scale = min(1.0, (float((self.trainer.global_step + 1)) / 10000.0))
for pg in optimizer.param_groups:
pg['lr'] = (lr_scale * self.hparams.lr)
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
|
class NetworkConfig(object):
scale = 100
max_step = (1000 * scale)
initial_learning_rate = 0.0001
learning_rate_decay_rate = 0.96
learning_rate_decay_step = (5 * scale)
moving_average_decay = 0.9999
entropy_weight = 0.1
save_step = (10 * scale)
max_to_keep = 1000
Conv2D_out = 128
Dense_out = 128
optimizer = 'RMSprop'
logit_clipping = 10
|
class Config(NetworkConfig):
version = 'TE_v2'
project_name = 'CFR-RL'
method = 'actor_critic'
model_type = 'Conv'
topology_file = 'Abilene'
traffic_file = 'TM'
test_traffic_file = 'TM2'
tm_history = 1
max_moves = 10
baseline = 'avg'
|
def get_config(FLAGS):
config = Config
for (k, v) in FLAGS.__flags.items():
if hasattr(config, k):
setattr(config, k, v.value)
return config
|
class Topology(object):
def __init__(self, config, data_dir='./data/'):
self.topology_file = (data_dir + config.topology_file)
self.shortest_paths_file = (self.topology_file + '_shortest_paths')
self.DG = nx.DiGraph()
self.load_topology()
self.calculate_paths()
def load_topology(self):
print('[*] Loading topology...', self.topology_file)
f = open(self.topology_file, 'r')
header = f.readline()
self.num_nodes = int(header[(header.find(':') + 2):header.find('\t')])
self.num_links = int(header[(header.find(':', 10) + 2):])
f.readline()
self.link_idx_to_sd = {}
self.link_sd_to_idx = {}
self.link_capacities = np.empty(self.num_links)
self.link_weights = np.empty(self.num_links)
for line in f:
link = line.split('\t')
(i, s, d, w, c) = link
self.link_idx_to_sd[int(i)] = (int(s), int(d))
self.link_sd_to_idx[(int(s), int(d))] = int(i)
self.link_capacities[int(i)] = float(c)
self.link_weights[int(i)] = int(w)
self.DG.add_weighted_edges_from([(int(s), int(d), int(w))])
assert ((len(self.DG.nodes()) == self.num_nodes) and (len(self.DG.edges()) == self.num_links))
f.close()
def calculate_paths(self):
self.pair_idx_to_sd = []
self.pair_sd_to_idx = {}
self.shortest_paths = []
if os.path.exists(self.shortest_paths_file):
print('[*] Loading shortest paths...', self.shortest_paths_file)
f = open(self.shortest_paths_file, 'r')
self.num_pairs = 0
for line in f:
sd = line[:line.find(':')]
s = int(sd[:sd.find('-')])
d = int(sd[(sd.find('>') + 1):])
self.pair_idx_to_sd.append((s, d))
self.pair_sd_to_idx[(s, d)] = self.num_pairs
self.num_pairs += 1
self.shortest_paths.append([])
paths = line[(line.find(':') + 1):].strip()[1:(- 1)]
while (paths != ''):
idx = paths.find(']')
path = paths[1:idx]
node_path = np.array(path.split(',')).astype(np.int16)
assert (node_path.size == np.unique(node_path).size)
self.shortest_paths[(- 1)].append(node_path)
paths = paths[(idx + 3):]
else:
print('[!] Calculating shortest paths...')
f = open(self.shortest_paths_file, 'w+')
self.num_pairs = 0
for s in range(self.num_nodes):
for d in range(self.num_nodes):
if (s != d):
self.pair_idx_to_sd.append((s, d))
self.pair_sd_to_idx[(s, d)] = self.num_pairs
self.num_pairs += 1
self.shortest_paths.append(list(nx.all_shortest_paths(self.DG, s, d, weight='weight')))
line = ((((str(s) + '->') + str(d)) + ': ') + str(self.shortest_paths[(- 1)]))
f.writelines((line + '\n'))
assert (self.num_pairs == (self.num_nodes * (self.num_nodes - 1)))
f.close()
print(('pairs: %d, nodes: %d, links: %d\n' % (self.num_pairs, self.num_nodes, self.num_links)))
|
class Traffic(object):
def __init__(self, config, num_nodes, data_dir='./data/', is_training=False):
if is_training:
self.traffic_file = ((data_dir + config.topology_file) + config.traffic_file)
else:
self.traffic_file = ((data_dir + config.topology_file) + config.test_traffic_file)
self.num_nodes = num_nodes
self.load_traffic(config)
def load_traffic(self, config):
assert os.path.exists(self.traffic_file)
print('[*] Loading traffic matrices...', self.traffic_file)
f = open(self.traffic_file, 'r')
traffic_matrices = []
for line in f:
volumes = line.strip().split(' ')
total_volume_cnt = len(volumes)
assert (total_volume_cnt == (self.num_nodes * self.num_nodes))
matrix = np.zeros((self.num_nodes, self.num_nodes))
for v in range(total_volume_cnt):
i = int((v / self.num_nodes))
j = (v % self.num_nodes)
if (i != j):
matrix[i][j] = float(volumes[v])
traffic_matrices.append(matrix)
f.close()
self.traffic_matrices = np.array(traffic_matrices)
tms_shape = self.traffic_matrices.shape
self.tm_cnt = tms_shape[0]
print(('Traffic matrices dims: [%d, %d, %d]\n' % (tms_shape[0], tms_shape[1], tms_shape[2])))
|
class Environment(object):
def __init__(self, config, is_training=False):
self.data_dir = './data/'
self.topology = Topology(config, self.data_dir)
self.traffic = Traffic(config, self.topology.num_nodes, self.data_dir, is_training=is_training)
self.traffic_matrices = ((((self.traffic.traffic_matrices * 100) * 8) / 300) / 1000)
self.tm_cnt = self.traffic.tm_cnt
self.traffic_file = self.traffic.traffic_file
self.num_pairs = self.topology.num_pairs
self.pair_idx_to_sd = self.topology.pair_idx_to_sd
self.pair_sd_to_idx = self.topology.pair_sd_to_idx
self.num_nodes = self.topology.num_nodes
self.num_links = self.topology.num_links
self.link_idx_to_sd = self.topology.link_idx_to_sd
self.link_sd_to_idx = self.topology.link_sd_to_idx
self.link_capacities = self.topology.link_capacities
self.link_weights = self.topology.link_weights
self.shortest_paths_node = self.topology.shortest_paths
self.shortest_paths_link = self.convert_to_edge_path(self.shortest_paths_node)
def convert_to_edge_path(self, node_paths):
edge_paths = []
num_pairs = len(node_paths)
for i in range(num_pairs):
edge_paths.append([])
num_paths = len(node_paths[i])
for j in range(num_paths):
edge_paths[i].append([])
path_len = len(node_paths[i][j])
for n in range((path_len - 1)):
e = self.link_sd_to_idx[(node_paths[i][j][n], node_paths[i][j][(n + 1)])]
assert ((e >= 0) and (e < self.num_links))
edge_paths[i][j].append(e)
return edge_paths
|
def sim(config, network, game):
for tm_idx in game.tm_indexes:
state = game.get_state(tm_idx)
if (config.method == 'actor_critic'):
policy = network.actor_predict(np.expand_dims(state, 0)).numpy()[0]
elif (config.method == 'pure_policy'):
policy = network.policy_predict(np.expand_dims(state, 0)).numpy()[0]
actions = policy.argsort()[(- game.max_moves):]
game.evaluate(tm_idx, actions, eval_delay=FLAGS.eval_delay)
|
def main(_):
tf.config.experimental.set_visible_devices([], 'GPU')
tf.get_logger().setLevel('INFO')
config = (get_config(FLAGS) or FLAGS)
env = Environment(config, is_training=False)
game = CFRRL_Game(config, env)
network = Network(config, game.state_dims, game.action_dim, game.max_moves)
step = network.restore_ckpt(FLAGS.ckpt)
if (config.method == 'actor_critic'):
learning_rate = network.lr_schedule(network.actor_optimizer.iterations.numpy()).numpy()
elif (config.method == 'pure_policy'):
learning_rate = network.lr_schedule(network.optimizer.iterations.numpy()).numpy()
print(('\nstep %d, learning rate: %f\n' % (step, learning_rate)))
sim(config, network, game)
|
def central_agent(config, game, model_weights_queues, experience_queues):
network = Network(config, game.state_dims, game.action_dim, game.max_moves, master=True)
network.save_hyperparams(config)
start_step = network.restore_ckpt()
for step in tqdm(range(start_step, config.max_step), ncols=70, initial=start_step):
network.ckpt.step.assign_add(1)
model_weights = network.model.get_weights()
for i in range(FLAGS.num_agents):
model_weights_queues[i].put(model_weights)
if (config.method == 'actor_critic'):
s_batch = []
a_batch = []
r_batch = []
for i in range(FLAGS.num_agents):
(s_batch_agent, a_batch_agent, r_batch_agent) = experience_queues[i].get()
assert (len(s_batch_agent) == FLAGS.num_iter), (len(s_batch_agent), len(a_batch_agent), len(r_batch_agent))
s_batch += s_batch_agent
a_batch += a_batch_agent
r_batch += r_batch_agent
assert ((len(s_batch) * game.max_moves) == len(a_batch))
actions = np.eye(game.action_dim, dtype=np.float32)[np.array(a_batch)]
(value_loss, entropy, actor_gradients, critic_gradients) = network.actor_critic_train(np.array(s_batch), actions, np.array(r_batch).astype(np.float32), config.entropy_weight)
if GRADIENTS_CHECK:
for g in range(len(actor_gradients)):
assert (np.any(np.isnan(actor_gradients[g])) == False), ('actor_gradients', s_batch, a_batch, r_batch, entropy)
for g in range(len(critic_gradients)):
assert (np.any(np.isnan(critic_gradients[g])) == False), ('critic_gradients', s_batch, a_batch, r_batch)
if ((step % config.save_step) == (config.save_step - 1)):
network.save_ckpt(_print=True)
actor_learning_rate = network.lr_schedule(network.actor_optimizer.iterations.numpy()).numpy()
avg_value_loss = np.mean(value_loss)
avg_reward = np.mean(r_batch)
avg_entropy = np.mean(entropy)
network.inject_summaries({'learning rate': actor_learning_rate, 'value loss': avg_value_loss, 'avg reward': avg_reward, 'avg entropy': avg_entropy}, step)
print(('lr:%f, value loss:%f, avg reward:%f, avg entropy:%f' % (actor_learning_rate, avg_value_loss, avg_reward, avg_entropy)))
elif (config.method == 'pure_policy'):
s_batch = []
a_batch = []
r_batch = []
ad_batch = []
for i in range(FLAGS.num_agents):
(s_batch_agent, a_batch_agent, r_batch_agent, ad_batch_agent) = experience_queues[i].get()
assert (len(s_batch_agent) == FLAGS.num_iter), (len(s_batch_agent), len(a_batch_agent), len(r_batch_agent), len(ad_batch_agent))
s_batch += s_batch_agent
a_batch += a_batch_agent
r_batch += r_batch_agent
ad_batch += ad_batch_agent
assert ((len(s_batch) * game.max_moves) == len(a_batch))
actions = np.eye(game.action_dim, dtype=np.float32)[np.array(a_batch)]
(entropy, gradients) = network.policy_train(np.array(s_batch), actions, np.vstack(ad_batch).astype(np.float32), config.entropy_weight)
if GRADIENTS_CHECK:
for g in range(len(gradients)):
assert (np.any(np.isnan(gradients[g])) == False), (s_batch, a_batch, r_batch)
if ((step % config.save_step) == (config.save_step - 1)):
network.save_ckpt(_print=True)
learning_rate = network.lr_schedule(network.optimizer.iterations.numpy()).numpy()
avg_reward = np.mean(r_batch)
avg_advantage = np.mean(ad_batch)
avg_entropy = np.mean(entropy)
network.inject_summaries({'learning rate': learning_rate, 'avg reward': avg_reward, 'avg advantage': avg_advantage, 'avg entropy': avg_entropy}, step)
print(('lr:%f, avg reward:%f, avg advantage:%f, avg entropy:%f' % (learning_rate, avg_reward, avg_advantage, avg_entropy)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.