code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import os
import torch
from torch import nn
import torch.distributed as dist
import gym
import numpy as np
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.common import schedulers
class CentralValueTrain(nn.Module):
def __init__(self, state_shape, value_size, ppo_device, num_agents, horizon_length, num_actors, num_actions,
seq_len, normalize_value, network, config, writter, max_epochs, multi_gpu, zero_rnn_on_done):
nn.Module.__init__(self)
self.ppo_device = ppo_device
self.num_agents, self.horizon_length, self.num_actors, self.seq_len = num_agents, horizon_length, num_actors, seq_len
self.normalize_value = normalize_value
self.num_actions = num_actions
self.state_shape = state_shape
self.value_size = value_size
self.max_epochs = max_epochs
self.multi_gpu = multi_gpu
self.truncate_grads = config.get('truncate_grads', False)
self.config = config
self.normalize_input = config['normalize_input']
self.zero_rnn_on_done = zero_rnn_on_done
state_config = {
'value_size' : value_size,
'input_shape' : state_shape,
'actions_num' : num_actions,
'num_agents' : num_agents,
'num_seqs' : num_actors,
'normalize_input' : self.normalize_input,
'normalize_value': self.normalize_value,
}
self.model = network.build(state_config)
self.lr = float(config['learning_rate'])
self.linear_lr = config.get('lr_schedule') == 'linear'
# todo: support max frames as well
if self.linear_lr:
self.scheduler = schedulers.LinearScheduler(self.lr,
max_steps = self.max_epochs,
apply_to_entropy = False,
start_entropy_coef = 0)
else:
self.scheduler = schedulers.IdentityScheduler()
self.mini_epoch = config['mini_epochs']
assert(('minibatch_size_per_env' in self.config) or ('minibatch_size' in self.config))
self.minibatch_size_per_env = self.config.get('minibatch_size_per_env', 0)
self.minibatch_size = self.config.get('minibatch_size', self.num_actors * self.minibatch_size_per_env)
self.num_minibatches = self.horizon_length * self.num_actors // self.minibatch_size
self.clip_value = config['clip_value']
self.writter = writter
self.weight_decay = config.get('weight_decay', 0.0)
self.optimizer = torch.optim.Adam(self.model.parameters(), float(self.lr), eps=1e-08, weight_decay=self.weight_decay)
self.frame = 0
self.epoch_num = 0
self.running_mean_std = None
self.grad_norm = config.get('grad_norm', 1)
self.truncate_grads = config.get('truncate_grads', False)
self.e_clip = config.get('e_clip', 0.2)
self.truncate_grad = self.config.get('truncate_grads', False)
self.is_rnn = self.model.is_rnn()
self.rnn_states = None
self.batch_size = self.horizon_length * self.num_actors
if self.is_rnn:
self.rnn_states = self.model.get_default_rnn_state()
self.rnn_states = [s.to(self.ppo_device) for s in self.rnn_states]
total_agents = self.num_actors #* self.num_agents
num_seqs = self.horizon_length // self.seq_len
assert ((self.horizon_length * total_agents // self.num_minibatches) % self.seq_len == 0)
self.mb_rnn_states = [ torch.zeros((num_seqs, s.size()[0], total_agents, s.size()[2]), dtype=torch.float32, device=self.ppo_device) for s in self.rnn_states]
if self.multi_gpu:
self.rank = int(os.getenv("LOCAL_RANK", "0"))
self.rank_size = int(os.getenv("WORLD_SIZE", "1"))
# dist.init_process_group("nccl", rank=self.rank, world_size=self.rank_size)
self.device_name = 'cuda:' + str(self.rank)
config['device'] = self.device_name
if self.rank != 0:
config['print_stats'] = False
config['lr_schedule'] = None
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, True, self.is_rnn, self.ppo_device, self.seq_len)
def update_lr(self, lr):
if self.multi_gpu:
lr_tensor = torch.tensor([lr], device=self.device_name)
dist.broadcast(lr_tensor, 0)
lr = lr_tensor.item()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def get_stats_weights(self, model_stats=False):
state = {}
if model_stats:
if self.normalize_input:
state['running_mean_std'] = self.model.running_mean_std.state_dict()
if self.normalize_value:
state['reward_mean_std'] = self.model.value_mean_std.state_dict()
return state
def set_stats_weights(self, weights):
pass
def update_dataset(self, batch_dict):
value_preds = batch_dict['old_values']
returns = batch_dict['returns']
actions = batch_dict['actions']
dones = batch_dict['dones']
rnn_masks = batch_dict['rnn_masks']
if self.num_agents > 1:
res = self.update_multiagent_tensors(value_preds, returns, actions, dones)
batch_dict['old_values'] = res[0]
batch_dict['returns'] = res[1]
batch_dict['actions'] = res[2]
batch_dict['dones'] = res[3]
if self.is_rnn:
states = []
for mb_s in self.mb_rnn_states:
t_size = mb_s.size()[0] * mb_s.size()[2]
h_size = mb_s.size()[3]
states.append(mb_s.permute(1,2,0,3).reshape(-1, t_size, h_size))
batch_dict['rnn_states'] = states
if self.num_agents > 1:
rnn_masks = res[3]
batch_dict['rnn_masks'] = rnn_masks
self.dataset.update_values_dict(batch_dict)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
obs_batch = copy.copy(obs_batch)
for k,v in obs_batch.items():
if v.dtype == torch.uint8:
obs_batch[k] = v.float() / 255.0
else:
obs_batch[k] = v
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
return obs_batch
def pre_step_rnn(self, n):
if not self.is_rnn:
return
if n % self.seq_len == 0:
for s, mb_s in zip(self.rnn_states, self.mb_rnn_states):
mb_s[n // self.seq_len,:,:,:] = s
def post_step_rnn(self, all_done_indices, zero_rnn_on_done=True):
if not self.is_rnn:
return
if not self.zero_rnn_on_done:
return
all_done_indices = all_done_indices[::self.num_agents] // self.num_agents
for s in self.rnn_states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
def forward(self, input_dict):
return self.model(input_dict)
def get_value(self, input_dict):
self.eval()
obs_batch = input_dict['states']
actions = input_dict.get('actions', None)
obs_batch = self._preproc_obs(obs_batch)
res_dict = self.forward({'obs' : obs_batch, 'actions': actions,
'rnn_states': self.rnn_states,
'is_train' : False})
value, self.rnn_states = res_dict['values'], res_dict['rnn_states']
if self.num_agents > 1:
value = value.repeat(1, self.num_agents)
value = value.view(value.size()[0]*self.num_agents, -1)
return value
def train_critic(self, input_dict):
self.train()
loss = self.calc_gradients(input_dict)
return loss.item()
def update_multiagent_tensors(self, value_preds, returns, actions, dones):
batch_size = self.batch_size
ma_batch_size = self.num_actors * self.num_agents * self.horizon_length
value_preds = value_preds.view(self.num_actors, self.num_agents, self.horizon_length, self.value_size).transpose(0,1)
returns = returns.view(self.num_actors, self.num_agents, self.horizon_length, self.value_size).transpose(0,1)
value_preds = value_preds.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
returns = returns.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
dones = dones.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
return value_preds, returns, actions, dones
def train_net(self):
self.train()
loss = 0
for _ in range(self.mini_epoch):
for idx in range(len(self.dataset)):
loss += self.train_critic(self.dataset[idx])
if self.normalize_input:
self.model.running_mean_std.eval() # don't need to update statstics more than one miniepoch
avg_loss = loss / (self.mini_epoch * self.num_minibatches)
self.epoch_num += 1
self.lr, _ = self.scheduler.update(self.lr, 0, self.epoch_num, 0, 0)
self.update_lr(self.lr)
self.frame += self.batch_size
if self.writter != None:
self.writter.add_scalar('losses/cval_loss', avg_loss, self.frame)
self.writter.add_scalar('info/cval_lr', self.lr, self.frame)
return avg_loss
def calc_gradients(self, batch):
obs_batch = self._preproc_obs(batch['obs'])
value_preds_batch = batch['old_values']
returns_batch = batch['returns']
actions_batch = batch['actions']
dones_batch = batch['dones']
rnn_masks_batch = batch.get('rnn_masks')
batch_dict = {'obs' : obs_batch,
'actions' : actions_batch,
'seq_length' : self.seq_len,
'dones' : dones_batch}
if self.is_rnn:
batch_dict['rnn_states'] = batch['rnn_states']
res_dict = self.model(batch_dict)
values = res_dict['values']
loss = common_losses.critic_loss(value_preds_batch, values, self.e_clip, returns_batch, self.clip_value)
losses, _ = torch_ext.apply_masks([loss], rnn_masks_batch)
loss = losses[0]
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
loss.backward()
if self.multi_gpu:
# batch allreduce ops: see https://github.com/entity-neural-network/incubator/pull/220
all_grads_list = []
for param in self.model.parameters():
if param.grad is not None:
all_grads_list.append(param.grad.view(-1))
all_grads = torch.cat(all_grads_list)
dist.all_reduce(all_grads, op=dist.ReduceOp.SUM)
offset = 0
for param in self.model.parameters():
if param.grad is not None:
param.grad.data.copy_(
all_grads[offset : offset + param.numel()].view_as(param.grad.data) / self.rank_size
)
offset += param.numel()
if self.truncate_grads:
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.optimizer.step()
return loss | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/algos_torch/central_value.py | 0.718594 | 0.343507 | central_value.py | pypi |
import torch
import torch.nn as nn
import numpy as np
import rl_games.algos_torch.torch_ext as torch_ext
'''
updates moving statistics with momentum
'''
class MovingMeanStd(nn.Module):
def __init__(self, insize, momentum = 0.25, epsilon=1e-05, per_channel=False, norm_only=False):
super(MovingMeanStd, self).__init__()
self.insize = insize
self.epsilon = epsilon
self.momentum = momentum
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("moving_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("moving_var", torch.ones(in_size, dtype = torch.float64))
def forward(self, input, mask=None, unnorm=False):
if self.training:
if mask is not None:
mean, var = torch_ext.get_mean_std_with_masks(input, mask)
else:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.moving_mean = self.moving_mean * self.momentum + mean * (1 - self.momentum)
self.moving_var = self.moving_var * self.momentum + var * (1 - self.momentum)
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.moving_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.moving_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.moving_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.moving_mean
current_var = self.moving_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/algos_torch/moving_mean_std.py | 0.910698 | 0.385808 | moving_mean_std.py | pypi |
from rl_games.algos_torch import torch_ext
import torch
import torch.nn as nn
import numpy as np
'''
updates statistic from a full data
'''
class RunningMeanStd(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
super(RunningMeanStd, self).__init__()
print('RunningMeanStd: ', insize)
self.insize = insize
self.epsilon = epsilon
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("running_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("running_var", torch.ones(in_size, dtype = torch.float64))
self.register_buffer("count", torch.ones((), dtype = torch.float64))
def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta**2 * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def forward(self, input, unnorm=False, mask=None):
if self.training:
if mask is not None:
mean, var = torch_ext.get_mean_std_with_masks(input, mask)
else:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.running_mean, self.running_var, self.count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count,
mean, var, input.size()[0] )
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.running_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.running_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.running_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.running_mean
current_var = self.running_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
if self.norm_only:
y = input/ torch.sqrt(current_var.float() + self.epsilon)
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
class RunningMeanStdObs(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
assert(isinstance(insize, dict))
super(RunningMeanStdObs, self).__init__()
self.running_mean_std = nn.ModuleDict({
k : RunningMeanStd(v, epsilon, per_channel, norm_only) for k,v in insize.items()
})
def forward(self, input, unnorm=False):
res = {k : self.running_mean_std[k](v, unnorm) for k,v in input.items()}
return res | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/algos_torch/running_mean_std.py | 0.863895 | 0.440529 | running_mean_std.py | pypi |
from rl_games.common.player import BasePlayer
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common.tr_helpers import unsqueeze_obs
import gym
import torch
from torch import nn
import numpy as np
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class PpoPlayerContinuous(BasePlayer):
def __init__(self, params):
BasePlayer.__init__(self, params)
self.network = self.config['network']
self.actions_num = self.action_space.shape[0]
self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device)
self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device)
self.mask = [False]
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config.get('normalize_value', False)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.env_info.get('value_size',1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
def get_action(self, obs, is_deterministic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
mu = res_dict['mus']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if is_deterministic:
current_action = mu
else:
current_action = action
if self.has_batch_dimension == False:
current_action = torch.squeeze(current_action.detach())
if self.clip_actions:
return rescale_actions(self.actions_low, self.actions_high, torch.clamp(current_action, -1.0, 1.0))
else:
return current_action
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input and 'running_mean_std' in checkpoint:
self.model.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
env_state = checkpoint.get('env_state', None)
if self.env is not None and env_state is not None:
self.env.set_env_state(env_state)
def reset(self):
self.init_rnn()
class PpoPlayerDiscrete(BasePlayer):
def __init__(self, params):
BasePlayer.__init__(self, params)
self.network = self.config['network']
if type(self.action_space) is gym.spaces.Discrete:
self.actions_num = self.action_space.n
self.is_multi_discrete = False
if type(self.action_space) is gym.spaces.Tuple:
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
self.mask = [False]
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config.get('normalize_value', False)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.env_info.get('value_size',1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
def get_masked_action(self, obs, action_masks, is_deterministic = True):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
action_masks = torch.Tensor(action_masks).to(self.device).bool()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'action_masks' : action_masks,
'rnn_states' : self.states
}
self.model.eval()
with torch.no_grad():
res_dict = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_deterministic:
action = [torch.argmax(logit.detach(), axis=-1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_deterministic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def get_action(self, obs, is_deterministic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
self.model.eval()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_deterministic:
action = [torch.argmax(logit.detach(), axis=1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_deterministic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input and 'running_mean_std' in checkpoint:
self.model.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
env_state = checkpoint.get('env_state', None)
if self.env is not None and env_state is not None:
self.env.set_env_state(env_state)
def reset(self):
self.init_rnn()
class SACPlayer(BasePlayer):
def __init__(self, params):
BasePlayer.__init__(self, params)
self.network = self.config['network']
self.actions_num = self.action_space.shape[0]
self.action_range = [
float(self.env_info['action_space'].low.min()),
float(self.env_info['action_space'].high.max())
]
obs_shape = self.obs_shape
self.normalize_input = False
config = {
'obs_dim': self.env_info["observation_space"].shape[0],
'action_dim': self.env_info["action_space"].shape[0],
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'value_size': self.env_info.get('value_size', 1),
'normalize_value': False,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.sac_network.actor.load_state_dict(checkpoint['actor'])
self.model.sac_network.critic.load_state_dict(checkpoint['critic'])
self.model.sac_network.critic_target.load_state_dict(checkpoint['critic_target'])
if self.normalize_input and 'running_mean_std' in checkpoint:
self.model.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
env_state = checkpoint.get('env_state', None)
if self.env is not None and env_state is not None:
self.env.set_env_state(env_state)
def get_action(self, obs, is_deterministic=False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
dist = self.model.actor(obs)
actions = dist.sample() if is_deterministic else dist.mean
actions = actions.clamp(*self.action_range).to(self.device)
if self.has_batch_dimension == False:
actions = torch.squeeze(actions.detach())
return actions
def reset(self):
pass | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/algos_torch/players.py | 0.763924 | 0.334753 | players.py | pypi |
import gym
import numpy as np
from pettingzoo.classic import connect_four_v0
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
class ConnectFourSelfPlay(gym.Env):
def __init__(self, name="connect_four_v0", **kwargs):
gym.Env.__init__(self)
self.name = name
self.is_deterministic = kwargs.pop('is_deterministic', False)
self.is_human = kwargs.pop('is_human', False)
self.random_agent = kwargs.pop('random_agent', False)
self.config_path = kwargs.pop('config_path')
self.agent = None
self.env = connect_four_v0.env() # gym.make(name, **kwargs)
self.action_space = self.env.action_spaces['player_0']
observation_space = self.env.observation_spaces['player_0']
shp = observation_space.shape
self.observation_space = gym.spaces.Box(
low=0, high=1, shape=(shp[:-1] + (shp[-1] * 2,)), dtype=np.uint8)
self.obs_deque = deque([], maxlen=2)
self.agent_id = 0
def _get_legal_moves(self, agent_id):
name = 'player_0' if agent_id == 0 else 'player_1'
action_ids = self.env.infos[name]['legal_moves']
mask = np.zeros(self.action_space.n, dtype=np.bool)
mask[action_ids] = True
return mask, action_ids
def env_step(self, action):
obs = self.env.step(action)
info = {}
name = 'player_0' if self.agent_id == 0 else 'player_1'
reward = self.env.rewards[name]
done = self.env.dones[name]
return obs, reward, done, info
def get_obs(self):
return np.concatenate(self.obs_deque, -1).astype(np.uint8) * 255
def reset(self):
if self.agent == None:
self.create_agent(self.config_path)
self.agent_id = np.random.randint(2)
obs = self.env.reset()
self.obs_deque.append(obs)
self.obs_deque.append(obs)
if self.agent_id == 1:
op_obs = self.get_obs()
op_obs = self.agent.obs_to_torch(op_obs)
mask, ids = self._get_legal_moves(0)
if self.is_human:
self.render()
opponent_action = int(input())
else:
if self.random_agent:
opponent_action = np.random.choice(ids, 1)[0]
else:
opponent_action = self.agent.get_masked_action(
op_obs, mask, self.is_deterministic).item()
obs, _, _, _ = self.env_step(opponent_action)
self.obs_deque.append(obs)
return self.get_obs()
def create_agent(self, config):
with open(config, 'r') as stream:
config = yaml.safe_load(stream)
runner = Runner()
runner.load(config)
config = runner.get_prebuilt_config()
# 'RAYLIB has bug here, CUDA_VISIBLE_DEVICES become unset'
if 'CUDA_VISIBLE_DEVICES' in os.environ:
os.environ.pop('CUDA_VISIBLE_DEVICES')
self.agent = runner.create_player()
self.agent.model.eval()
def step(self, action):
obs, reward, done, info = self.env_step(action)
self.obs_deque.append(obs)
if done:
if reward == 1:
info['battle_won'] = 1
else:
info['battle_won'] = 0
return self.get_obs(), reward, done, info
op_obs = self.get_obs()
op_obs = self.agent.obs_to_torch(op_obs)
mask, ids = self._get_legal_moves(1-self.agent_id)
if self.is_human:
self.render()
opponent_action = int(input())
else:
if self.random_agent:
opponent_action = np.random.choice(ids, 1)[0]
else:
opponent_action = self.agent.get_masked_action(
op_obs, mask, self.is_deterministic).item()
obs, reward, done, _ = self.env_step(opponent_action)
if done:
if reward == -1:
info['battle_won'] = 0
else:
info['battle_won'] = 1
self.obs_deque.append(obs)
return self.get_obs(), reward, done, info
def render(self, mode='ansi'):
self.env.render(mode)
def update_weights(self, weigths):
self.agent.set_weights(weigths)
def get_action_mask(self):
mask, _ = self._get_legal_moves(self.agent_id)
return mask
def has_action_mask(self):
return True | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/envs/connect4_selfplay.py | 0.507812 | 0.179279 | connect4_selfplay.py | pypi |
from rl_games.common.ivecenv import IVecEnv
import gym
import torch
import numpy as np
class CuleEnv(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
import torchcule
from torchcule.atari import Env as AtariEnv
self.batch_size = num_actors
env_name=kwargs.pop('env_name')
self.has_lives = kwargs.pop('has_lives', False)
self.device = kwargs.pop('device', 'cuda:0')
self.episodic_life = kwargs.pop('episodic_life', False)
self.use_dict_obs_space = kwargs.pop('use_dict_obs_space', False)
self.env = AtariEnv(env_name, num_actors, color_mode='gray', repeat_prob=0.0, device=self.device, rescale=True, episodic_life=self.episodic_life, frameskip=4)
if self.use_dict_obs_space:
self.observation_space= gym.spaces.Dict({
'observation' : self.env.observation_space,
'reward' : gym.spaces.Box(low=0, high=1, shape=( ), dtype=np.float32),
'last_action': gym.spaces.Box(low=0, high=self.env.action_space.n, shape=(), dtype=int)
})
else:
self.observation_space = gym.spaces.Box(0, 255, (84, 84, 1), np.uint8) #self.env.observation_space
self.ids = np.arange(0, num_actors)
self.action_space = self.env.action_space
self.scores = np.zeros(num_actors)
self.returned_scores = np.zeros(num_actors)
def _set_scores(self, infos, dones):
# thanks to cleanrl: https://github.com/vwxyzjn/cleanrl/blob/3d20d11f45a5f1d764934e9851b816d0b03d2d10/cleanrl/ppo_atari_envpool.py#L111
if 'reward' not in infos:
return
self.scores += infos["reward"]
self.returned_scores[:] = self.scores
infos["scores"] = self.returned_scores
if self.has_lives:
all_lives_exhausted = infos["lives"] == 0
self.scores *= 1 - all_lives_exhausted
else:
# removing lives otherwise default observer will use them
if 'lives' in infos:
del infos['lives']
self.scores *= 1 - dones
def step(self, action):
next_obs, reward, is_done, info = self.env.step(action)
#print(next_obs.size(), 'step!')
#info['time_outs'] = info['TimeLimit.truncated']
#self._set_scores(info, is_done)
if self.use_dict_obs_space:
next_obs = {
'observation': next_obs,
'reward': torch.clip(reward, -1, 1),
'last_action': action
}
return next_obs, reward, is_done, info
def reset(self):
obs = self.env.reset()
#print(obs.size(), 'reset!')
if self.use_dict_obs_space:
obs = {
'observation': obs,
'reward': torch.zeros(obs.shape[0], device=self.device),
'last_action': torch.zeros(obs.shape[0], device=self.device),
}
return obs
def get_number_of_agents(self):
return 1
def get_env_info(self):
info = {}
info['action_space'] = self.action_space
info['observation_space'] = self.observation_space
return info
def create_cule(**kwargs):
return CuleEnv("", kwargs.pop('num_actors', 16), **kwargs) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/envs/cule.py | 0.690142 | 0.239816 | cule.py | pypi |
import gym
import numpy as np
from pettingzoo.sisl import multiwalker_v6
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
import rl_games.envs.connect4_network
class MultiWalker(gym.Env):
def __init__(self, name="multiwalker", **kwargs):
gym.Env.__init__(self)
self.name = name
self.env = multiwalker_v6.parallel_env()
self.use_central_value = kwargs.pop('central_value', False)
self.use_prev_actions = kwargs.pop('use_prev_actions', False)
self.apply_agent_ids = kwargs.pop('apply_agent_ids', False)
self.add_timeouts = kwargs.pop('add_timeouts', False)
self.action_space = self.env.action_spaces['walker_0']
self.steps_count = 0
obs_len = self.env.observation_spaces['walker_0'].shape[0]
add_obs = 0
if self.apply_agent_ids:
add_obs = 3
if self.use_prev_actions:
obs_len += self.action_space.shape[0]
self.observation_space = gym.spaces.Box(-1, 1, shape =(obs_len + add_obs,))
if self.use_central_value:
self.state_space = gym.spaces.Box(-1, 1, shape =(obs_len*3,))
def step(self, action):
self.steps_count += 1
actions = {'walker_0' : action[0], 'walker_1' : action[1], 'walker_2' : action[2],}
obs, reward, done, info = self.env.step(actions)
if self.use_prev_actions:
obs = {
k: np.concatenate([v, actions[k]]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
rewards = np.stack([reward['walker_0'], reward['walker_1'], reward['walker_2']])
dones = np.stack([done['walker_0'], done['walker_1'], done['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses, rewards, dones, info
def reset(self):
obs = self.env.reset()
self.steps_count = 0
if self.use_prev_actions:
zero_actions = np.zeros(self.action_space.shape[0])
obs = {
k: np.concatenate([v, zero_actions]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses
def render(self, mode='ansi'):
self.env.render(mode)
def get_number_of_agents(self):
return 3
def has_action_mask(self):
return False | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/envs/multiwalker.py | 0.460532 | 0.24608 | multiwalker.py | pypi |
from rl_games.common.ivecenv import IVecEnv
import gym
import numpy as np
class Envpool(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
import envpool
self.batch_size = num_actors
env_name=kwargs.pop('env_name')
self.has_lives = kwargs.pop('has_lives', False)
self.use_dict_obs_space = kwargs.pop('use_dict_obs_space', False)
self.env = envpool.make( env_name,
env_type=kwargs.pop('env_type', 'gym'),
num_envs=num_actors,
batch_size=self.batch_size,
**kwargs
)
if self.use_dict_obs_space:
self.observation_space= gym.spaces.Dict({
'observation' : self.env.observation_space,
'reward' : gym.spaces.Box(low=0, high=1, shape=( ), dtype=np.float32),
'last_action': gym.spaces.Box(low=0, high=self.env.action_space.n, shape=(), dtype=int)
})
else:
self.observation_space = self.env.observation_space
self.ids = np.arange(0, num_actors)
self.action_space = self.env.action_space
self.scores = np.zeros(num_actors)
self.returned_scores = np.zeros(num_actors)
def _set_scores(self, infos, dones):
# thanks to cleanrl: https://github.com/vwxyzjn/cleanrl/blob/3d20d11f45a5f1d764934e9851b816d0b03d2d10/cleanrl/ppo_atari_envpool.py#L111
if 'reward' not in infos:
return
self.scores += infos["reward"]
self.returned_scores[:] = self.scores
infos["scores"] = self.returned_scores
if self.has_lives:
all_lives_exhausted = infos["lives"] == 0
self.scores *= 1 - all_lives_exhausted
else:
# removing lives otherwise default observer will use them
if 'lives' in infos:
del infos['lives']
self.scores *= 1 - dones
def step(self, action):
next_obs, reward, is_done, info = self.env.step(action , self.ids)
info['time_outs'] = info['TimeLimit.truncated']
self._set_scores(info, is_done)
if self.use_dict_obs_space:
next_obs = {
'observation': next_obs,
'reward': np.clip(reward, -1, 1),
'last_action': action
}
return next_obs, reward, is_done, info
def reset(self):
obs = self.env.reset(self.ids)
if self.use_dict_obs_space:
obs = {
'observation': obs,
'reward': np.zeros(obs.shape[0]),
'last_action': np.zeros(obs.shape[0]),
}
return obs
def get_number_of_agents(self):
return 1
def get_env_info(self):
info = {}
info['action_space'] = self.action_space
info['observation_space'] = self.observation_space
return info
def create_envpool(**kwargs):
return Envpool("", kwargs.pop('num_actors', 16), **kwargs) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/envs/envpool.py | 0.752831 | 0.185246 | envpool.py | pypi |
import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self):
super(ConvBlock, self).__init__()
self.action_size = 7
self.conv1 = nn.Conv2d(4, 128, 3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(128)
def forward(self, s):
s = s['obs'].contiguous()
#s = s.view(-1, 3, 6, 7) # batch_size x channels x board_x x board_y
s = F.relu(self.bn1(self.conv1(s)))
return s
class ResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += residual
out = F.relu(out)
return out
class OutBlock(nn.Module):
def __init__(self):
super(OutBlock, self).__init__()
self.conv = nn.Conv2d(128, 3, kernel_size=1) # value head
self.bn = nn.BatchNorm2d(3)
self.fc1 = nn.Linear(3*6*7, 32)
self.fc2 = nn.Linear(32, 1)
self.conv1 = nn.Conv2d(128, 32, kernel_size=1) # policy head
self.bn1 = nn.BatchNorm2d(32)
self.fc = nn.Linear(6*7*32, 7)
def forward(self,s):
v = F.relu(self.bn(self.conv(s))) # value head
v = v.view(-1, 3*6*7) # batch_size X channel X height X width
v = F.relu(self.fc1(v))
v = F.relu(self.fc2(v))
v = torch.tanh(v)
p = F.relu(self.bn1(self.conv1(s))) # policy head
p = p.view(-1, 6*7*32)
p = self.fc(p)
return p, v, None
class ConnectNet(nn.Module):
def __init__(self, blocks):
super(ConnectNet, self).__init__()
self.blocks = blocks
self.conv = ConvBlock()
for block in range(self.blocks):
setattr(self, "res_%i" % block,ResBlock())
self.outblock = OutBlock()
def is_rnn(self):
return False
def forward(self,s):
s = s.permute((0, 3, 1, 2))
s = self.conv(s)
for block in range(self.blocks):
s = getattr(self, "res_%i" % block)(s)
s = self.outblock(s)
return s
from rl_games.algos_torch.network_builder import NetworkBuilder
class ConnectBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
self.blocks = params['blocks']
def build(self, name, **kwargs):
return ConnectNet(self.blocks)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/envs/connect4_network.py | 0.915847 | 0.421909 | connect4_network.py | pypi |
import time
import gym
import numpy as np
import torch
import copy
from rl_games.common import vecenv
from rl_games.common import env_configurations
from rl_games.algos_torch import model_builder
class BasePlayer(object):
def __init__(self, params):
self.config = config = params['config']
self.load_networks(params)
self.env_name = self.config['env_name']
self.player_config = self.config.get('player', {})
self.env_config = self.config.get('env_config', {})
self.env_config = self.player_config.get('env_config', self.env_config)
self.env_info = self.config.get('env_info')
self.clip_actions = config.get('clip_actions', True)
self.seed = self.env_config.pop('seed', None)
if self.env_info is None:
use_vecenv = self.player_config.get('use_vecenv', False)
if use_vecenv:
print('[BasePlayer] Creating vecenv: ', self.env_name)
self.env = vecenv.create_vec_env(
self.env_name, self.config['num_actors'], **self.env_config)
self.env_info = self.env.get_env_info()
else:
print('[BasePlayer] Creating regular env: ', self.env_name)
self.env = self.create_env()
self.env_info = env_configurations.get_env_info(self.env)
else:
self.env = config.get('vec_env')
self.num_agents = self.env_info.get('agents', 1)
self.value_size = self.env_info.get('value_size', 1)
self.action_space = self.env_info['action_space']
self.observation_space = self.env_info['observation_space']
if isinstance(self.observation_space, gym.spaces.Dict):
self.obs_shape = {}
for k, v in self.observation_space.spaces.items():
self.obs_shape[k] = v.shape
else:
self.obs_shape = self.observation_space.shape
self.is_tensor_obses = False
self.states = None
self.player_config = self.config.get('player', {})
self.use_cuda = True
self.batch_size = 1
self.has_batch_dimension = False
self.has_central_value = self.config.get(
'central_value_config') is not None
self.device_name = self.config.get('device_name', 'cuda')
self.render_env = self.player_config.get('render', False)
self.games_num = self.player_config.get('games_num', 2000)
if 'deterministic' in self.player_config:
self.is_deterministic = self.player_config['deterministic']
else:
self.is_deterministic = self.player_config.get(
'deterministic', True)
self.n_game_life = self.player_config.get('n_game_life', 1)
self.print_stats = self.player_config.get('print_stats', True)
self.render_sleep = self.player_config.get('render_sleep', 0.002)
self.max_steps = 108000 // 4
self.device = torch.device(self.device_name)
def load_networks(self, params):
builder = model_builder.ModelBuilder()
self.config['network'] = builder.load(params)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
obs_batch = copy.copy(obs_batch)
for k, v in obs_batch.items():
if v.dtype == torch.uint8:
obs_batch[k] = v.float() / 255.0
else:
obs_batch[k] = v
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
return obs_batch
def env_step(self, env, actions):
if not self.is_tensor_obses:
actions = actions.cpu().numpy()
obs, rewards, dones, infos = env.step(actions)
if hasattr(obs, 'dtype') and obs.dtype == np.float64:
obs = np.float32(obs)
if self.value_size > 1:
rewards = rewards[0]
if self.is_tensor_obses:
return self.obs_to_torch(obs), rewards.cpu(), dones.cpu(), infos
else:
if np.isscalar(dones):
rewards = np.expand_dims(np.asarray(rewards), 0)
dones = np.expand_dims(np.asarray(dones), 0)
return self.obs_to_torch(obs), torch.from_numpy(rewards), torch.from_numpy(dones), infos
def obs_to_torch(self, obs):
if isinstance(obs, dict):
if 'obs' in obs:
obs = obs['obs']
if isinstance(obs, dict):
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value, False)
else:
upd_obs = self.cast_obs(obs)
else:
upd_obs = self.cast_obs(obs)
return upd_obs
def _obs_to_tensors_internal(self, obs, cast_to_dict=True):
if isinstance(obs, dict):
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value, False)
else:
upd_obs = self.cast_obs(obs)
return upd_obs
def cast_obs(self, obs):
if isinstance(obs, torch.Tensor):
self.is_tensor_obses = True
elif isinstance(obs, np.ndarray):
assert (obs.dtype != np.int8)
if obs.dtype == np.uint8:
obs = torch.ByteTensor(obs).to(self.device)
else:
obs = torch.FloatTensor(obs).to(self.device)
elif np.isscalar(obs):
obs = torch.FloatTensor([obs]).to(self.device)
return obs
def preprocess_actions(self, actions):
if not self.is_tensor_obses:
actions = actions.cpu().numpy()
return actions
def env_reset(self, env):
obs = env.reset()
return self.obs_to_torch(obs)
def restore(self, fn):
raise NotImplementedError('restore')
def get_weights(self):
weights = {}
weights['model'] = self.model.state_dict()
return weights
def set_weights(self, weights):
self.model.load_state_dict(weights['model'])
if self.normalize_input and 'running_mean_std' in weights:
self.model.running_mean_std.load_state_dict(
weights['running_mean_std'])
def create_env(self):
return env_configurations.configurations[self.env_name]['env_creator'](**self.env_config)
def get_action(self, obs, is_deterministic=False):
raise NotImplementedError('step')
def get_masked_action(self, obs, mask, is_deterministic=False):
raise NotImplementedError('step')
def reset(self):
raise NotImplementedError('raise')
def init_rnn(self):
if self.is_rnn:
rnn_states = self.model.get_default_rnn_state()
self.states = [torch.zeros((s.size()[0], self.batch_size, s.size(
)[2]), dtype=torch.float32).to(self.device) for s in rnn_states]
def run(self):
n_games = self.games_num
render = self.render_env
n_game_life = self.n_game_life
is_deterministic = self.is_deterministic
sum_rewards = 0
sum_steps = 0
sum_game_res = 0
n_games = n_games * n_game_life
games_played = 0
has_masks = False
has_masks_func = getattr(self.env, "has_action_mask", None) is not None
op_agent = getattr(self.env, "create_agent", None)
if op_agent:
agent_inited = True
# print('setting agent weights for selfplay')
# self.env.create_agent(self.env.config)
# self.env.set_weights(range(8),self.get_weights())
if has_masks_func:
has_masks = self.env.has_action_mask()
need_init_rnn = self.is_rnn
for _ in range(n_games):
if games_played >= n_games:
break
obses = self.env_reset(self.env)
batch_size = 1
batch_size = self.get_batch_size(obses, batch_size)
if need_init_rnn:
self.init_rnn()
need_init_rnn = False
cr = torch.zeros(batch_size, dtype=torch.float32)
steps = torch.zeros(batch_size, dtype=torch.float32)
print_game_res = False
for n in range(self.max_steps):
if has_masks:
masks = self.env.get_action_mask()
action = self.get_masked_action(
obses, masks, is_deterministic)
else:
action = self.get_action(obses, is_deterministic)
obses, r, done, info = self.env_step(self.env, action)
cr += r
steps += 1
if render:
self.env.render(mode='human')
time.sleep(self.render_sleep)
all_done_indices = done.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
done_count = len(done_indices)
games_played += done_count
if done_count > 0:
if self.is_rnn:
for s in self.states:
s[:, all_done_indices, :] = s[:,
all_done_indices, :] * 0.0
cur_rewards = cr[done_indices].sum().item()
cur_steps = steps[done_indices].sum().item()
cr = cr * (1.0 - done.float())
steps = steps * (1.0 - done.float())
sum_rewards += cur_rewards
sum_steps += cur_steps
game_res = 0.0
if isinstance(info, dict):
if 'battle_won' in info:
print_game_res = True
game_res = info.get('battle_won', 0.5)
if 'scores' in info:
print_game_res = True
game_res = info.get('scores', 0.5)
if self.print_stats:
cur_rewards_done = cur_rewards/done_count
cur_steps_done = cur_steps/done_count
if print_game_res:
print(f'reward: {cur_rewards_done:.1f} steps: {cur_steps_done:.1} w: {game_res:.1}')
else:
print(f'reward: {cur_rewards_done:.1f} steps: {cur_steps_done:.1f}')
sum_game_res += game_res
if batch_size//self.num_agents == 1 or games_played >= n_games:
break
print(sum_rewards)
if print_game_res:
print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps /
games_played * n_game_life, 'winrate:', sum_game_res / games_played * n_game_life)
else:
print('av reward:', sum_rewards / games_played * n_game_life,
'av steps:', sum_steps / games_played * n_game_life)
def get_batch_size(self, obses, batch_size):
obs_shape = self.obs_shape
if type(self.obs_shape) is dict:
if 'obs' in obses:
obses = obses['obs']
keys_view = self.obs_shape.keys()
keys_iterator = iter(keys_view)
if 'observation' in obses:
first_key = 'observation'
else:
first_key = next(keys_iterator)
obs_shape = self.obs_shape[first_key]
obses = obses[first_key]
if len(obses.size()) > len(obs_shape):
batch_size = obses.size()[0]
self.has_batch_dimension = True
self.batch_size = batch_size
return batch_size | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/player.py | 0.624408 | 0.170404 | player.py | pypi |
import torch
import rl_games.algos_torch.torch_ext as torch_ext
class DefaultDiagnostics(object):
def __init__(self):
pass
def send_info(self, writter):
pass
def epoch(self, agent, current_epoch):
pass
def mini_epoch(self, agent, miniepoch):
pass
def mini_batch(self, agent, batch, e_clip, minibatch):
pass
class PpoDiagnostics(DefaultDiagnostics):
def __init__(self):
self.diag_dict = {}
self.clip_fracs = []
self.exp_vars = []
self.current_epoch = 0
def send_info(self, writter):
if writter is None:
return
for k,v in self.diag_dict.items():
writter.add_scalar(k, v.cpu().numpy(), self.current_epoch)
def epoch(self, agent, current_epoch):
self.current_epoch = current_epoch
if agent.normalize_rms_advantage:
self.diag_dict['diagnostics/rms_advantage/mean'] = agent.advantage_mean_std.moving_mean
self.diag_dict['diagnostics/rms_advantage/var'] = agent.advantage_mean_std.moving_var
if agent.normalize_value:
self.diag_dict['diagnostics/rms_value/mean'] = agent.value_mean_std.running_mean
self.diag_dict['diagnostics/rms_value/var'] = agent.value_mean_std.running_var
exp_var = torch.stack(self.exp_vars, axis=0).mean()
self.exp_vars = []
self.diag_dict['diagnostics/exp_var'] = exp_var
def mini_epoch(self, agent, miniepoch):
clip_frac = torch.stack(self.clip_fracs, axis=0).mean()
self.clip_fracs = []
self.diag_dict['diagnostics/clip_frac/{0}'.format(miniepoch)] = clip_frac
def mini_batch(self, agent, batch, e_clip, minibatch):
with torch.no_grad():
values = batch['values'].detach()
returns = batch['returns'].detach()
new_neglogp = batch['new_neglogp'].detach()
old_neglogp = batch['old_neglogp'].detach()
masks = batch['masks']
exp_var = torch_ext.explained_variance(values, returns, masks)
clip_frac = torch_ext.policy_clip_fraction(new_neglogp, old_neglogp, e_clip, masks)
self.exp_vars.append(exp_var)
self.clip_fracs.append(clip_frac) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/diagnostics.py | 0.634317 | 0.234407 | diagnostics.py | pypi |
import time
class IntervalSummaryWriter:
"""
Summary writer wrapper designed to reduce the size of tf.events files.
It will prevent the learner from writing the summaries more often than a specified interval, i.e. if the
current interval is 20 seconds and we wrote our last summary for a particular summary key at 01:00, all summaries
until 01:20 for that key will be ignored.
The interval is adaptive: it will approach 1/200th of the total training time, but no less than interval_sec_min
and no greater than interval_sec_max.
This was created to facilitate really big training runs, such as with Population-Based training, where summary
folders reached tens of gigabytes.
"""
def __init__(self, summary_writer, cfg):
self.experiment_start = time.time()
# prevents noisy summaries when experiments are restarted
self.defer_summaries_sec = cfg.get('defer_summaries_sec', 5)
self.interval_sec_min = cfg.get('summaries_interval_sec_min', 5)
self.interval_sec_max = cfg.get('summaries_interval_sec_max', 300)
self.last_interval = self.interval_sec_min
# interval between summaries will be close to this fraction of the total training time,
# i.e. for a run that lasted 200 minutes we write one summary every minute.
self.summaries_relative_step = 1.0 / 200
self.writer = summary_writer
self.last_write_for_tag = dict()
def _calc_interval(self):
"""Write summaries more often in the beginning of the run."""
if self.last_interval >= self.interval_sec_max:
return self.last_interval
seconds_since_start = time.time() - self.experiment_start
interval = seconds_since_start * self.summaries_relative_step
interval = min(interval, self.interval_sec_max)
interval = max(interval, self.interval_sec_min)
self.last_interval = interval
return interval
def add_scalar(self, tag, value, step, *args, **kwargs):
if step == 0:
# removes faulty summaries that appear after the experiment restart
# print('Skip summaries with step=0')
return
seconds_since_start = time.time() - self.experiment_start
if seconds_since_start < self.defer_summaries_sec:
return
last_write = self.last_write_for_tag.get(tag, 0)
seconds_since_last_write = time.time() - last_write
interval = self._calc_interval()
if seconds_since_last_write >= interval:
self.writer.add_scalar(tag, value, step, *args, **kwargs)
self.last_write_for_tag[tag] = time.time()
def __getattr__(self, attr):
return getattr(self.writer, attr) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/interval_summary_writer.py | 0.730097 | 0.538194 | interval_summary_writer.py | pypi |
from rl_games.algos_torch import torch_ext
import torch
import numpy as np
class AlgoObserver:
def __init__(self):
pass
def before_init(self, base_name, config, experiment_name):
pass
def after_init(self, algo):
pass
def process_infos(self, infos, done_indices):
pass
def after_steps(self):
pass
def after_print_stats(self, frame, epoch_num, total_time):
pass
class DefaultAlgoObserver(AlgoObserver):
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.game_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
if not infos:
return
done_indices = done_indices.cpu().numpy()
if not isinstance(infos, dict) and len(infos) > 0 and isinstance(infos[0], dict):
for ind in done_indices:
ind = ind.item()
if len(infos) <= ind//self.algo.num_agents:
continue
info = infos[ind//self.algo.num_agents]
game_res = None
if 'battle_won' in info:
game_res = info['battle_won']
if 'scores' in info:
game_res = info['scores']
if game_res is not None:
self.game_scores.update(torch.from_numpy(np.asarray([game_res])).to(self.algo.ppo_device))
elif isinstance(infos, dict):
if 'lives' in infos:
# envpool
done_indices = np.argwhere(infos['lives'] == 0).squeeze(1)
for ind in done_indices:
ind = ind.item()
game_res = None
if 'battle_won' in infos:
game_res = infos['battle_won']
if 'scores' in infos:
game_res = infos['scores']
if game_res is not None and len(game_res) > ind//self.algo.num_agents:
self.game_scores.update(torch.from_numpy(np.asarray([game_res[ind//self.algo.num_agents]])).to(self.algo.ppo_device))
def after_clear_stats(self):
self.game_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.game_scores.current_size > 0 and self.writer is not None:
mean_scores = self.game_scores.get_mean()
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/iter', mean_scores, epoch_num)
self.writer.add_scalar('scores/time', mean_scores, total_time)
class IsaacAlgoObserver(AlgoObserver):
"""Log statistics from the environment along with the algorithm running stats."""
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.mean_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.ep_infos = []
self.direct_info = {}
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
if not isinstance(infos, dict):
classname = self.__class__.__name__
raise ValueError(f"{classname} expected 'infos' as dict. Received: {type(infos)}")
# store episode information
if "episode" in infos:
self.ep_infos.append(infos["episode"])
# log other variables directly
if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env
self.direct_info = {}
for k, v in infos.items():
# only log scalars
if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0):
self.direct_info[k] = v
def after_clear_stats(self):
# clear stored buffers
self.mean_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
# log scalars from the episode
if self.ep_infos:
for key in self.ep_infos[0]:
info_tensor = torch.tensor([], device=self.algo.device)
for ep_info in self.ep_infos:
# handle scalar and zero dimensional tensor infos
if not isinstance(ep_info[key], torch.Tensor):
ep_info[key] = torch.Tensor([ep_info[key]])
if len(ep_info[key].shape) == 0:
ep_info[key] = ep_info[key].unsqueeze(0)
info_tensor = torch.cat((info_tensor, ep_info[key].to(self.algo.device)))
value = torch.mean(info_tensor)
self.writer.add_scalar("Episode/" + key, value, epoch_num)
self.ep_infos.clear()
# log scalars from env information
for k, v in self.direct_info.items():
self.writer.add_scalar(f"{k}/frame", v, frame)
self.writer.add_scalar(f"{k}/iter", v, epoch_num)
self.writer.add_scalar(f"{k}/time", v, total_time)
# log mean reward/score from the env
if self.mean_scores.current_size > 0:
mean_scores = self.mean_scores.get_mean()
self.writer.add_scalar("scores/mean", mean_scores, frame)
self.writer.add_scalar("scores/iter", mean_scores, epoch_num)
self.writer.add_scalar("scores/time", mean_scores, total_time) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/algo_observer.py | 0.783988 | 0.283186 | algo_observer.py | pypi |
from torch import nn
import torch
import math
def critic_loss(value_preds_batch, values, curr_e_clip, return_batch, clip_value):
if clip_value:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-curr_e_clip, curr_e_clip)
value_losses = (values - return_batch)**2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses,
value_losses_clipped)
else:
c_loss = (return_batch - values)**2
return c_loss
def smooth_clamp(x, mi, mx):
return 1/(1 + torch.exp((-(x-mi)/(mx-mi)+0.5)*4)) * (mx-mi) + mi
def smoothed_actor_loss(old_action_neglog_probs_batch, action_neglog_probs, advantage, is_ppo, curr_e_clip):
if is_ppo:
ratio = torch.exp(old_action_neglog_probs_batch - action_neglog_probs)
surr1 = advantage * ratio
surr2 = advantage * smooth_clamp(ratio, 1.0 - curr_e_clip,
1.0 + curr_e_clip)
a_loss = torch.max(-surr1, -surr2)
else:
a_loss = (action_neglog_probs * advantage)
return a_loss
def actor_loss(old_action_neglog_probs_batch, action_neglog_probs, advantage, is_ppo, curr_e_clip):
if is_ppo:
ratio = torch.exp(old_action_neglog_probs_batch - action_neglog_probs)
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - curr_e_clip, 1.0 + curr_e_clip)
a_loss = torch.max(-surr1, -surr2)
else:
a_loss = (action_neglog_probs * advantage)
return a_loss
def decoupled_actor_loss(behavior_action_neglog_probs, action_neglog_probs, proxy_neglog_probs, advantage, curr_e_clip):
logratio = proxy_neglog_probs - action_neglog_probs
#neglogp_adj = -torch.max(-behavior_action_neglog_probs, -action_neglog_probs.detach() - math.log(100))
pg_losses1 = -advantage * torch.exp(behavior_action_neglog_probs - action_neglog_probs)
clipped_logratio = torch.clamp(logratio, math.log(1.0 - curr_e_clip), math.log(1.0 + curr_e_clip))
pg_losses2 = -advantage * torch.exp(clipped_logratio - proxy_neglog_probs + behavior_action_neglog_probs)
pg_losses = torch.max(pg_losses1,pg_losses2)
return pg_losses | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/common_losses.py | 0.801276 | 0.468608 | common_losses.py | pypi |
import numpy as np
from collections import defaultdict
class LinearValueProcessor:
def __init__(self, start_eps, end_eps, end_eps_frames):
self.start_eps = start_eps
self.end_eps = end_eps
self.end_eps_frames = end_eps_frames
def __call__(self, frame):
if frame >= self.end_eps_frames:
return self.end_eps
df = frame / self.end_eps_frames
return df * self.end_eps + (1.0 - df) * self.start_eps
class DefaultRewardsShaper:
def __init__(self, scale_value = 1, shift_value = 0, min_val=-np.inf, max_val=np.inf, is_torch=True):
self.scale_value = scale_value
self.shift_value = shift_value
self.min_val = min_val
self.max_val = max_val
self.is_torch = is_torch
def __call__(self, reward):
reward = reward + self.shift_value
reward = reward * self.scale_value
if self.is_torch:
import torch
reward = torch.clamp(reward, self.min_val, self.max_val)
else:
reward = np.clip(reward, self.min_val, self.max_val)
return reward
def dicts_to_dict_with_arrays(dicts, add_batch_dim = True):
def stack(v):
if len(np.shape(v)) == 1:
return np.array(v)
else:
return np.stack(v)
def concatenate(v):
if len(np.shape(v)) == 1:
return np.array(v)
else:
return np.concatenate(v)
dicts_len = len(dicts)
if(dicts_len <= 1):
return dicts
res = defaultdict(list)
{ res[key].append(sub[key]) for sub in dicts for key in sub }
if add_batch_dim:
concat_func = stack
else:
concat_func = concatenate
res = {k : concat_func(v) for k,v in res.items()}
return res
def unsqueeze_obs(obs):
if type(obs) is dict:
for k,v in obs.items():
obs[k] = unsqueeze_obs(v)
else:
if len(obs.size()) > 1 or obs.size()[0] > 1:
obs = obs.unsqueeze(0)
return obs
def flatten_first_two_dims(arr):
if arr.ndim > 2:
return arr.reshape(-1, *arr.shape[-(arr.ndim-2):])
else:
return arr.reshape(-1)
def free_mem():
import ctypes
ctypes.CDLL('libc.so.6').malloc_trim(0) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/tr_helpers.py | 0.569733 | 0.236406 | tr_helpers.py | pypi |
import rl_games.envs.test
from rl_games.common import wrappers
from rl_games.common import tr_helpers
from rl_games.envs.brax import create_brax_env
from rl_games.envs.envpool import create_envpool
from rl_games.envs.cule import create_cule
import gym
from gym.wrappers import FlattenObservation, FilterObservation
import numpy as np
import math
class HCRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.max([-10, reward])
class DMControlWrapper(gym.Wrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self.observation_space = self.env.observation_space['observations']
self.observation_space.dtype = np.dtype('float32')
def reset(self, **kwargs):
self.num_stops = 0
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return observation, reward, done, info
class DMControlObsWrapper(gym.ObservationWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def observation(self, obs):
return obs['observations']
def create_default_gym_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
is_procgen = kwargs.pop('procgen', False)
limit_steps = kwargs.pop('limit_steps', False)
env = gym.make(name, **kwargs)
if frames > 1:
if is_procgen:
env = wrappers.ProcgenStack(env, frames, True)
else:
env = wrappers.FrameStack(env, frames, False)
if limit_steps:
env = wrappers.LimitStepsWrapper(env)
return env
def create_goal_gym_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
env = gym.make(name, **kwargs)
env = FlattenObservation(FilterObservation(env, ['observation', 'desired_goal']))
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
if limit_steps:
env = wrappers.LimitStepsWrapper(env)
return env
def create_slime_gym_env(**kwargs):
import slimevolleygym
from rl_games.envs.slimevolley_selfplay import SlimeVolleySelfplay
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
self_play = kwargs.pop('self_play', False)
if self_play:
env = SlimeVolleySelfplay(name, **kwargs)
else:
env = gym.make(name, **kwargs)
return env
def create_connect_four_env(**kwargs):
from rl_games.envs.connect4_selfplay import ConnectFourSelfPlay
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
self_play = kwargs.pop('self_play', False)
if self_play:
env = ConnectFourSelfPlay(name, **kwargs)
else:
env = gym.make(name, **kwargs)
return env
def create_atari_gym_env(**kwargs):
#frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
skip = kwargs.pop('skip',4)
episode_life = kwargs.pop('episode_life',True)
wrap_impala = kwargs.pop('wrap_impala', False)
env = wrappers.make_atari_deepmind(name, skip=skip,episode_life=episode_life, wrap_impala=wrap_impala, **kwargs)
return env
def create_dm_control_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = 'dm2gym:'+ kwargs.pop('name')
env = gym.make(name, environment_kwargs=kwargs)
env = DMControlWrapper(env)
env = DMControlObsWrapper(env)
env = wrappers.TimeLimit(env, 1000)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
def create_super_mario_env(name='SuperMarioBros-v1'):
import gym
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
import gym_super_mario_bros
env = gym_super_mario_bros.make(name)
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = wrappers.MaxAndSkipEnv(env, skip=4)
env = wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=True)
return env
def create_super_mario_env_stage1(name='SuperMarioBrosRandomStage1-v1'):
import gym
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
import gym_super_mario_bros
stage_names = [
'SuperMarioBros-1-1-v1',
'SuperMarioBros-1-2-v1',
'SuperMarioBros-1-3-v1',
'SuperMarioBros-1-4-v1',
]
env = gym_super_mario_bros.make(stage_names[1])
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = wrappers.MaxAndSkipEnv(env, skip=4)
env = wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=True)
#env = wrappers.AllowBacktracking(env)
return env
def create_quadrupped_env():
import gym
import roboschool
import quadruppedEnv
return wrappers.FrameStack(wrappers.MaxAndSkipEnv(gym.make('QuadruppedWalk-v1'), 4, False), 2, True)
def create_roboschool_env(name):
import gym
import roboschool
return gym.make(name)
def create_smac(name, **kwargs):
from rl_games.envs.smac_env import SMACEnv
frames = kwargs.pop('frames', 1)
transpose = kwargs.pop('transpose', False)
flatten = kwargs.pop('flatten', True)
has_cv = kwargs.get('central_value', False)
env = SMACEnv(name, **kwargs)
if frames > 1:
if has_cv:
env = wrappers.BatchedFrameStackWithStates(env, frames, transpose=False, flatten=flatten)
else:
env = wrappers.BatchedFrameStack(env, frames, transpose=False, flatten=flatten)
return env
def create_smac_cnn(name, **kwargs):
from rl_games.envs.smac_env import SMACEnv
has_cv = kwargs.get('central_value', False)
frames = kwargs.pop('frames', 4)
transpose = kwargs.pop('transpose', False)
env = SMACEnv(name, **kwargs)
if has_cv:
env = wrappers.BatchedFrameStackWithStates(env, frames, transpose=transpose)
else:
env = wrappers.BatchedFrameStack(env, frames, transpose=transpose)
return env
def create_test_env(name, **kwargs):
import rl_games.envs.test
env = gym.make(name, **kwargs)
return env
def create_minigrid_env(name, **kwargs):
import gym_minigrid
import gym_minigrid.wrappers
state_bonus = kwargs.pop('state_bonus', False)
action_bonus = kwargs.pop('action_bonus', False)
rgb_fully_obs = kwargs.pop('rgb_fully_obs', False)
rgb_partial_obs = kwargs.pop('rgb_partial_obs', True)
view_size = kwargs.pop('view_size', 3)
env = gym.make(name, **kwargs)
if state_bonus:
env = gym_minigrid.wrappers.StateBonus(env)
if action_bonus:
env = gym_minigrid.wrappers.ActionBonus(env)
if rgb_fully_obs:
env = gym_minigrid.wrappers.RGBImgObsWrapper(env)
elif rgb_partial_obs:
env = gym_minigrid.wrappers.ViewSizeWrapper(env, view_size)
env = gym_minigrid.wrappers.RGBImgPartialObsWrapper(env, tile_size=84//view_size) # Get pixel observations
env = gym_minigrid.wrappers.ImgObsWrapper(env)
print('minigird_env observation space shape:', env.observation_space)
return env
def create_multiwalker_env(**kwargs):
from rl_games.envs.multiwalker import MultiWalker
env = MultiWalker('', **kwargs)
return env
def create_diambra_env(**kwargs):
from rl_games.envs.diambra.diambra import DiambraEnv
env = DiambraEnv(**kwargs)
return env
def create_env(name, **kwargs):
steps_limit = kwargs.pop('steps_limit', None)
env = gym.make(name, **kwargs)
if steps_limit is not None:
env = wrappers.TimeLimit(env, steps_limit)
return env
configurations = {
'CartPole-v1' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : gym.make('CartPole-v1'),
},
'CartPoleMaskedVelocity-v1' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : wrappers.MaskVelocityWrapper(gym.make('CartPole-v1'), 'CartPole-v1'),
},
'MountainCarContinuous-v0' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : gym.make('MountainCarContinuous-v0'),
},
'MountainCar-v0' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda : gym.make('MountainCar-v0'),
},
'Acrobot-v1' : {
'env_creator' : lambda **kwargs : gym.make('Acrobot-v1'),
'vecenv_type' : 'RAY'
},
'Pendulum-v0' : {
'env_creator' : lambda **kwargs : gym.make('Pendulum-v0'),
'vecenv_type' : 'RAY'
},
'LunarLander-v2' : {
'env_creator' : lambda **kwargs : gym.make('LunarLander-v2'),
'vecenv_type' : 'RAY'
},
'PongNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('PongNoFrameskip-v4', skip=4),
'vecenv_type' : 'RAY'
},
'BreakoutNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('BreakoutNoFrameskip-v4', skip=4,sticky=False),
'vecenv_type' : 'RAY'
},
'MsPacmanNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('MsPacmanNoFrameskip-v4', skip=4),
'vecenv_type' : 'RAY'
},
'CarRacing-v0' : {
'env_creator' : lambda **kwargs : wrappers.make_car_racing('CarRacing-v0', skip=4),
'vecenv_type' : 'RAY'
},
'RoboschoolAnt-v1' : {
'env_creator' : lambda **kwargs : create_roboschool_env('RoboschoolAnt-v1'),
'vecenv_type' : 'RAY'
},
'SuperMarioBros-v1' : {
'env_creator' : lambda : create_super_mario_env(),
'vecenv_type' : 'RAY'
},
'SuperMarioBrosRandomStages-v1' : {
'env_creator' : lambda : create_super_mario_env('SuperMarioBrosRandomStages-v1'),
'vecenv_type' : 'RAY'
},
'SuperMarioBrosRandomStage1-v1' : {
'env_creator' : lambda **kwargs : create_super_mario_env_stage1('SuperMarioBrosRandomStage1-v1'),
'vecenv_type' : 'RAY'
},
'RoboschoolHalfCheetah-v1' : {
'env_creator' : lambda **kwargs : create_roboschool_env('RoboschoolHalfCheetah-v1'),
'vecenv_type' : 'RAY'
},
'RoboschoolHumanoid-v1' : {
'env_creator' : lambda : wrappers.FrameStack(create_roboschool_env('RoboschoolHumanoid-v1'), 1, True),
'vecenv_type' : 'RAY'
},
'LunarLanderContinuous-v2' : {
'env_creator' : lambda **kwargs : gym.make('LunarLanderContinuous-v2'),
'vecenv_type' : 'RAY'
},
'RoboschoolHumanoidFlagrun-v1' : {
'env_creator' : lambda **kwargs : wrappers.FrameStack(create_roboschool_env('RoboschoolHumanoidFlagrun-v1'), 1, True),
'vecenv_type' : 'RAY'
},
'BipedalWalker-v3' : {
'env_creator' : lambda **kwargs : create_env('BipedalWalker-v3', **kwargs),
'vecenv_type' : 'RAY'
},
'BipedalWalkerCnn-v3' : {
'env_creator' : lambda **kwargs : wrappers.FrameStack(HCRewardEnv(gym.make('BipedalWalker-v3')), 4, False),
'vecenv_type' : 'RAY'
},
'BipedalWalkerHardcore-v3' : {
'env_creator' : lambda **kwargs : gym.make('BipedalWalkerHardcore-v3'),
'vecenv_type' : 'RAY'
},
'ReacherPyBulletEnv-v0' : {
'env_creator' : lambda **kwargs : create_roboschool_env('ReacherPyBulletEnv-v0'),
'vecenv_type' : 'RAY'
},
'BipedalWalkerHardcoreCnn-v3' : {
'env_creator' : lambda : wrappers.FrameStack(gym.make('BipedalWalkerHardcore-v3'), 4, False),
'vecenv_type' : 'RAY'
},
'QuadruppedWalk-v1' : {
'env_creator' : lambda **kwargs : create_quadrupped_env(),
'vecenv_type' : 'RAY'
},
'FlexAnt' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/ant.yaml'),
'vecenv_type' : 'ISAAC'
},
'FlexHumanoid' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/humanoid.yaml'),
'vecenv_type' : 'ISAAC'
},
'FlexHumanoidHard' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/humanoid_hard.yaml'),
'vecenv_type' : 'ISAAC'
},
'smac' : {
'env_creator' : lambda **kwargs : create_smac(**kwargs),
'vecenv_type' : 'RAY'
},
'smac_cnn' : {
'env_creator' : lambda **kwargs : create_smac_cnn(**kwargs),
'vecenv_type' : 'RAY'
},
'dm_control' : {
'env_creator' : lambda **kwargs : create_dm_control_env(**kwargs),
'vecenv_type' : 'RAY'
},
'openai_gym' : {
'env_creator' : lambda **kwargs : create_default_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'openai_robot_gym' : {
'env_creator' : lambda **kwargs : create_goal_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'atari_gym' : {
'env_creator' : lambda **kwargs : create_atari_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'slime_gym' : {
'env_creator' : lambda **kwargs : create_slime_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'test_env' : {
'env_creator' : lambda **kwargs : create_test_env(kwargs.pop('name'), **kwargs),
'vecenv_type' : 'RAY'
},
'minigrid_env' : {
'env_creator' : lambda **kwargs : create_minigrid_env(kwargs.pop('name'), **kwargs),
'vecenv_type' : 'RAY'
},
'connect4_env' : {
'env_creator' : lambda **kwargs : create_connect_four_env(**kwargs),
'vecenv_type' : 'RAY'
},
'multiwalker_env' : {
'env_creator' : lambda **kwargs : create_multiwalker_env(**kwargs),
'vecenv_type' : 'RAY'
},
'diambra': {
'env_creator': lambda **kwargs: create_diambra_env(**kwargs),
'vecenv_type': 'RAY'
},
'brax' : {
'env_creator': lambda **kwargs: create_brax_env(**kwargs),
'vecenv_type': 'BRAX'
},
'envpool': {
'env_creator': lambda **kwargs: create_envpool(**kwargs),
'vecenv_type': 'ENVPOOL'
},
'cule': {
'env_creator': lambda **kwargs: create_cule(**kwargs),
'vecenv_type': 'CULE'
},
}
def get_env_info(env):
result_shapes = {}
result_shapes['observation_space'] = env.observation_space
result_shapes['action_space'] = env.action_space
result_shapes['agents'] = 1
result_shapes['value_size'] = 1
if hasattr(env, "get_number_of_agents"):
result_shapes['agents'] = env.get_number_of_agents()
'''
if isinstance(result_shapes['observation_space'], gym.spaces.dict.Dict):
result_shapes['observation_space'] = observation_space['observations']
if isinstance(result_shapes['observation_space'], dict):
result_shapes['observation_space'] = observation_space['observations']
result_shapes['state_space'] = observation_space['states']
'''
if hasattr(env, "value_size"):
result_shapes['value_size'] = env.value_size
print(result_shapes)
return result_shapes
def get_obs_and_action_spaces_from_config(config):
env_config = config.get('env_config', {})
env = configurations[config['env_name']]['env_creator'](**env_config)
result_shapes = get_env_info(env)
env.close()
return result_shapes
def register(name, config):
configurations[name] = config | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/env_configurations.py | 0.512449 | 0.402451 | env_configurations.py | pypi |
import numpy as np
import random
import gym
import torch
from rl_games.common.segment_tree import SumSegmentTree, MinSegmentTree
import torch
from rl_games.algos_torch.torch_ext import numpy_to_torch_dtype_dict
class ReplayBuffer(object):
def __init__(self, size, ob_space):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._obses = np.zeros((size,) + ob_space.shape, dtype=ob_space.dtype)
self._next_obses = np.zeros((size,) + ob_space.shape, dtype=ob_space.dtype)
self._rewards = np.zeros(size)
self._actions = np.zeros(size, dtype=np.int32)
self._dones = np.zeros(size, dtype=np.bool)
self._maxsize = size
self._next_idx = 0
self._curr_size = 0
def __len__(self):
return self._curr_size
def add(self, obs_t, action, reward, obs_tp1, done):
self._curr_size = min(self._curr_size + 1, self._maxsize )
self._obses[self._next_idx] = obs_t
self._next_obses[self._next_idx] = obs_tp1
self._rewards[self._next_idx] = reward
self._actions[self._next_idx] = action
self._dones[self._next_idx] = done
self._next_idx = (self._next_idx + 1) % self._maxsize
def _get(self, idx):
return self._obses[idx], self._actions[idx], self._rewards[idx], self._next_obses[idx], self._dones[idx]
def _encode_sample(self, idxes):
batch_size = len(idxes)
obses_t, actions, rewards, obses_tp1, dones = [None] * batch_size, [None] * batch_size, [None] * batch_size, [None] * batch_size, [None] * batch_size
it = 0
for i in idxes:
data = self._get(i)
obs_t, action, reward, obs_tp1, done = data
obses_t[it] = np.array(obs_t, copy=False)
actions[it] = np.array(action, copy=False)
rewards[it] = reward
obses_tp1[it] = np.array(obs_tp1, copy=False)
dones[it] = done
it = it + 1
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, self._curr_size - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha, ob_space):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size, ob_space)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, self._curr_size - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * self._curr_size) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * self._curr_size) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < self._curr_size
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
class VectorizedReplayBuffer:
def __init__(self, obs_shape, action_shape, capacity, device):
"""Create Vectorized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
See Also
--------
ReplayBuffer.__init__
"""
self.device = device
self.obses = torch.empty((capacity, *obs_shape), dtype=torch.float32, device=self.device)
self.next_obses = torch.empty((capacity, *obs_shape), dtype=torch.float32, device=self.device)
self.actions = torch.empty((capacity, *action_shape), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((capacity, 1), dtype=torch.float32, device=self.device)
self.dones = torch.empty((capacity, 1), dtype=torch.bool, device=self.device)
self.capacity = capacity
self.idx = 0
self.full = False
def add(self, obs, action, reward, next_obs, done):
num_observations = obs.shape[0]
remaining_capacity = min(self.capacity - self.idx, num_observations)
overflow = num_observations - remaining_capacity
if remaining_capacity < num_observations:
self.obses[0: overflow] = obs[-overflow:]
self.actions[0: overflow] = action[-overflow:]
self.rewards[0: overflow] = reward[-overflow:]
self.next_obses[0: overflow] = next_obs[-overflow:]
self.dones[0: overflow] = done[-overflow:]
self.full = True
self.obses[self.idx: self.idx + remaining_capacity] = obs[:remaining_capacity]
self.actions[self.idx: self.idx + remaining_capacity] = action[:remaining_capacity]
self.rewards[self.idx: self.idx + remaining_capacity] = reward[:remaining_capacity]
self.next_obses[self.idx: self.idx + remaining_capacity] = next_obs[:remaining_capacity]
self.dones[self.idx: self.idx + remaining_capacity] = done[:remaining_capacity]
self.idx = (self.idx + num_observations) % self.capacity
self.full = self.full or self.idx == 0
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obses: torch tensor
batch of observations
actions: torch tensor
batch of actions executed given obs
rewards: torch tensor
rewards received as results of executing act_batch
next_obses: torch tensor
next set of observations seen after executing act_batch
not_dones: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not
not_dones_no_max: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not, specifically exlcuding maximum episode steps
"""
idxs = torch.randint(0,
self.capacity if self.full else self.idx,
(batch_size,), device=self.device)
obses = self.obses[idxs]
actions = self.actions[idxs]
rewards = self.rewards[idxs]
next_obses = self.next_obses[idxs]
dones = self.dones[idxs]
return obses, actions, rewards, next_obses, dones
class ExperienceBuffer:
'''
More generalized than replay buffers.
Implemented for on-policy algos
'''
def __init__(self, env_info, algo_info, device, aux_tensor_dict=None):
self.env_info = env_info
self.algo_info = algo_info
self.device = device
self.num_agents = env_info.get('agents', 1)
self.action_space = env_info['action_space']
self.num_actors = algo_info['num_actors']
self.horizon_length = algo_info['horizon_length']
self.has_central_value = algo_info['has_central_value']
self.use_action_masks = algo_info.get('use_action_masks', False)
batch_size = self.num_actors * self.num_agents
self.is_discrete = False
self.is_multi_discrete = False
self.is_continuous = False
self.obs_base_shape = (self.horizon_length, self.num_agents * self.num_actors)
self.state_base_shape = (self.horizon_length, self.num_actors)
if type(self.action_space) is gym.spaces.Discrete:
self.actions_shape = ()
self.actions_num = self.action_space.n
self.is_discrete = True
if type(self.action_space) is gym.spaces.Tuple:
self.actions_shape = (len(self.action_space),)
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
if type(self.action_space) is gym.spaces.Box:
self.actions_shape = (self.action_space.shape[0],)
self.actions_num = self.action_space.shape[0]
self.is_continuous = True
self.tensor_dict = {}
self._init_from_env_info(self.env_info)
self.aux_tensor_dict = aux_tensor_dict
if self.aux_tensor_dict is not None:
self._init_from_aux_dict(self.aux_tensor_dict)
def _init_from_env_info(self, env_info):
obs_base_shape = self.obs_base_shape
state_base_shape = self.state_base_shape
self.tensor_dict['obses'] = self._create_tensor_from_space(env_info['observation_space'], obs_base_shape)
if self.has_central_value:
self.tensor_dict['states'] = self._create_tensor_from_space(env_info['state_space'], state_base_shape)
val_space = gym.spaces.Box(low=0, high=1,shape=(env_info.get('value_size',1),))
self.tensor_dict['rewards'] = self._create_tensor_from_space(val_space, obs_base_shape)
self.tensor_dict['values'] = self._create_tensor_from_space(val_space, obs_base_shape)
self.tensor_dict['neglogpacs'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(), dtype=np.float32), obs_base_shape)
self.tensor_dict['dones'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(), dtype=np.uint8), obs_base_shape)
if self.is_discrete or self.is_multi_discrete:
self.tensor_dict['actions'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=int), obs_base_shape)
if self.use_action_masks:
self.tensor_dict['action_masks'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape + (np.sum(self.actions_num),), dtype=np.bool), obs_base_shape)
if self.is_continuous:
self.tensor_dict['actions'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
self.tensor_dict['mus'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
self.tensor_dict['sigmas'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
def _init_from_aux_dict(self, tensor_dict):
obs_base_shape = self.obs_base_shape
for k,v in tensor_dict.items():
self.tensor_dict[k] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(v), dtype=np.float32), obs_base_shape)
def _create_tensor_from_space(self, space, base_shape):
if type(space) is gym.spaces.Box:
dtype = numpy_to_torch_dtype_dict[space.dtype]
return torch.zeros(base_shape + space.shape, dtype= dtype, device = self.device)
if type(space) is gym.spaces.Discrete:
dtype = numpy_to_torch_dtype_dict[space.dtype]
return torch.zeros(base_shape, dtype= dtype, device = self.device)
if type(space) is gym.spaces.Tuple:
'''
assuming that tuple is only Discrete tuple
'''
dtype = numpy_to_torch_dtype_dict[space.dtype]
tuple_len = len(space)
return torch.zeros(base_shape +(tuple_len,), dtype= dtype, device = self.device)
if type(space) is gym.spaces.Dict:
t_dict = {}
for k,v in space.spaces.items():
t_dict[k] = self._create_tensor_from_space(v, base_shape)
return t_dict
def update_data(self, name, index, val):
if type(val) is dict:
for k,v in val.items():
self.tensor_dict[name][k][index,:] = v
else:
self.tensor_dict[name][index,:] = val
def update_data_rnn(self, name, indices,play_mask, val):
if type(val) is dict:
for k,v in val:
self.tensor_dict[name][k][indices,play_mask] = v
else:
self.tensor_dict[name][indices,play_mask] = val
def get_transformed(self, transform_op):
res_dict = {}
for k, v in self.tensor_dict.items():
if type(v) is dict:
transformed_dict = {}
for kd,vd in v.items():
transformed_dict[kd] = transform_op(vd)
res_dict[k] = transformed_dict
else:
res_dict[k] = transform_op(v)
return res_dict
def get_transformed_list(self, transform_op, tensor_list):
res_dict = {}
for k in tensor_list:
v = self.tensor_dict.get(k)
if v is None:
continue
if type(v) is dict:
transformed_dict = {}
for kd,vd in v.items():
transformed_dict[kd] = transform_op(vd)
res_dict[k] = transformed_dict
else:
res_dict[k] = transform_op(v)
return res_dict | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/experience.py | 0.88666 | 0.529081 | experience.py | pypi |
import torch
import copy
from torch.utils.data import Dataset
class PPODataset(Dataset):
def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len):
self.is_rnn = is_rnn
self.seq_len = seq_len
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
self.length = self.batch_size // self.minibatch_size
self.is_discrete = is_discrete
self.is_continuous = not is_discrete
total_games = self.batch_size // self.seq_len
self.num_games_batch = self.minibatch_size // self.seq_len
self.game_indexes = torch.arange(total_games, dtype=torch.long, device=self.device)
self.flat_indexes = torch.arange(total_games * self.seq_len, dtype=torch.long, device=self.device).reshape(total_games, self.seq_len)
self.special_names = ['rnn_states']
def update_values_dict(self, values_dict):
self.values_dict = values_dict
def update_mu_sigma(self, mu, sigma):
start = self.last_range[0]
end = self.last_range[1]
self.values_dict['mu'][start:end] = mu
self.values_dict['sigma'][start:end] = sigma
def __len__(self):
return self.length
def _get_item_rnn(self, idx):
gstart = idx * self.num_games_batch
gend = (idx + 1) * self.num_games_batch
start = gstart * self.seq_len
end = gend * self.seq_len
self.last_range = (start, end)
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names:
if isinstance(v, dict):
v_dict = {kd:vd[start:end] for kd, vd in v.items()}
input_dict[k] = v_dict
else:
if v is not None:
input_dict[k] = v[start:end]
else:
input_dict[k] = None
rnn_states = self.values_dict['rnn_states']
input_dict['rnn_states'] = [s[:, gstart:gend, :].contiguous() for s in rnn_states]
return input_dict
def _get_item(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
self.last_range = (start, end)
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names and v is not None:
if type(v) is dict:
v_dict = { kd:vd[start:end] for kd, vd in v.items() }
input_dict[k] = v_dict
else:
input_dict[k] = v[start:end]
return input_dict
def __getitem__(self, idx):
if self.is_rnn:
sample = self._get_item_rnn(idx)
else:
sample = self._get_item(idx)
return sample
class DatasetList(Dataset):
def __init__(self):
self.dataset_list = []
def __len__(self):
return self.dataset_list[0].length * len(self.dataset_list)
def add_dataset(self, dataset):
self.dataset_list.append(copy.deepcopy(dataset))
def clear(self):
self.dataset_list = []
def __getitem__(self, idx):
ds_len = len(self.dataset_list)
ds_idx = idx % ds_len
in_idx = idx // ds_len
return self.dataset_list[ds_idx].__getitem__(in_idx) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/datasets.py | 0.635222 | 0.291996 | datasets.py | pypi |
import torch
from torch import nn
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def multiply_hidden(h, mask):
if isinstance(h, torch.Tensor):
return h * mask
else:
return tuple(multiply_hidden(v, mask) for v in h)
class RnnWithDones(nn.Module):
def __init__(self, rnn_layer):
nn.Module.__init__(self)
self.rnn = rnn_layer
#got idea from ikostrikov :)
def forward(self, input, states, done_masks=None, bptt_len = 0):
# ignoring bptt_ln for now
if done_masks == None:
return self.rnn(input, states)
max_steps = input.size()[0]
batch_size = input.size()[1]
out_batch = []
not_dones = 1.0-done_masks
has_zeros = ((not_dones.squeeze()[1:] == 0.0)
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 an`d t=T to the list
has_zeros = [0] + has_zeros + [max_steps]
out_batch = []
for i in range(len(has_zeros) - 1):
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
not_done = not_dones[start_idx].float().unsqueeze(0)
states = multiply_hidden(states, not_done)
out, states = self.rnn(input[start_idx:end_idx], states)
out_batch.append(out)
return torch.cat(out_batch, dim=0), states
"""
def forward(self, input, states, done_masks=None, bptt_len = 0):
max_steps = input.size()[0]
batch_size = input.size()[1]
out_batch = []
for i in range(max_steps):
if done_masks is not None:
dones = done_masks[i].float().unsqueeze(0)
states = multiply_hidden(states, 1.0-dones)
if (bptt_len > 0) and (i % bptt_len == 0):
states = repackage_hidden(states)
out, states = self.rnn(input[i].unsqueeze(0), states)
out_batch.append(out)
return torch.cat(out_batch, dim=0), states
"""
class LSTMWithDones(RnnWithDones):
def __init__(self, *args, **kwargs):
lstm = torch.nn.LSTM(*args, **kwargs)
RnnWithDones.__init__(self, lstm)
class GRUWithDones(RnnWithDones):
def __init__(self, *args, **kwargs):
gru = torch.nn.GRU(*args,**kwargs)
RnnWithDones.__init__(self, gru) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/common/layers/recurrent.py | 0.879244 | 0.37691 | recurrent.py | pypi |
from pathlib import Path
from rl_inventory_api.item import Item
from rl_inventory_api.constants import Types, Rarities, Tradeable, Certifies, Colors
import csv
from dataclasses import astuple
class Inventory:
def __init__(self, items: list[Item]):
self.items = items
@staticmethod
def read(path=Path(str(Path.home()) + "\\AppData\\Roaming\\bakkesmod\\bakkesmod\\data\\inventory.csv")):
with open(path, "r") as file:
inv = list(csv.reader(file, delimiter=","))
return Inventory([Item(*item) for item in inv[1:]])
def save(self, path=str(Path.home()) + "\\inventory.csv"):
with open(path, "w", newline='') as file:
writer = csv.writer(file)
writer.writerow(["product id", "name", "slot", "paint", "certification", "certification value",
"certification label", "quality", "crate", "tradeable", "amount", "instanceid"])
writer.writerows([astuple(item) for item in self.items])
def intuitive_filter(self, name=None, type_=None, color=None, certify=None, rarity=None, crate=None, tradeable=None,
amount=None):
args = [("name", name), ("slot", type_), ("paint", color), ("certification_label", certify), ("quality", rarity),
("crate", crate), ("tradeable", tradeable), ("amount", amount)]
def compare_with_context(item, attr, value):
item_value = getattr(item, attr)
if isinstance(value, str):
return item_value == value
elif isinstance(value, list):
return item_value in value
return self.filter(lambda item: all([compare_with_context(item, attr, value) for attr, value in args if value]))
def quantity_items(self):
return sum([item.quantity() for item in self.items])
def get_values(self, attribute):
return {getattr(item, attribute) for item in self.items}
def get_names(self):
return self.get_values("name")
def get_types(self):
return self.get_values("slot")
def get_rarities(self):
return self.get_values("quality")
def get_colors(self):
return self.get_values("paint")
def get_series(self):
return self.get_values("crate")
def get_certifies(self):
return self.get_values("certification_label")
def filter(self, lamb):
return Inventory(list(filter(lamb, self.items)))
def filter_by_tradeable(self, tradeable):
return self.filter(lambda item: item.tradeable == tradeable)
def filter_tradeable(self):
return self.filter_by_tradeable(Tradeable.TRUE)
def filter_not_tradeable(self):
return self.filter_by_tradeable(Tradeable.FALSE)
def filter_by_name(self, name):
return self.filter(lambda item: item.name == name)
def filter_by_color(self, color):
return self.filter(lambda item: item.paint == color)
def filter_by_type(self, type_):
return self.filter(lambda item: item.slot == type_)
def filter_by_amount(self, amount):
return self.filter(lambda item: int(item.amount) == int(amount))
def filter_by_certify(self, certify):
return self.filter(lambda item: item.certification_label == certify)
def filter_by_series(self, series):
return self.filter(lambda item: item.crate == series)
def filter_by_rarity(self, rarity):
return self.filter(lambda item: item.quality == rarity)
def filter_uncommon(self):
return self.filter_by_rarity(Rarities.UNCOMMON)
def filter_rare(self):
return self.filter_by_rarity(Rarities.RARE)
def filter_very_rare(self):
return self.filter_by_rarity(Rarities.VERY_RARE)
def filter_import(self):
return self.filter_by_rarity(Rarities.IMPORT)
def filter_exotic(self):
return self.filter_by_rarity(Rarities.EXOTIC)
def filter_black_market(self):
return self.filter_by_rarity(Rarities.BLACK_MARKET)
def filter_limited(self):
return self.filter_by_rarity(Rarities.LIMITED)
def filter_painted(self):
return self.filter(lambda item: item.is_painted())
def filter_not_painted(self):
return self.filter(lambda item: not item.is_painted())
def filter_certified(self):
return self.filter(lambda item: item.is_certificate())
def filter_not_certified(self):
return self.filter(lambda item: not item.is_certificate())
def filter_non_crate(self):
return self.filter(lambda item: item.is_non_crate())
def filter_from_crate(self):
return self.filter(lambda item: not item.is_non_crate())
def filter_non_crate_by_rarity(self, rarity):
return self.filter(lambda item: item.is_non_crate() and item.quality == rarity)
def filter_ncr(self):
return self.filter_non_crate_by_rarity(Rarities.RARE)
def filter_ncvr(self):
return self.filter_non_crate_by_rarity(Rarities.VERY_RARE)
def filter_nci(self):
return self.filter_non_crate_by_rarity(Rarities.IMPORT)
def filter_nce(self):
return self.filter_non_crate_by_rarity(Rarities.EXOTIC)
def filter_blueprint(self):
return self.filter(lambda item: item.is_blueprint())
def filter_not_blueprint(self):
return self.filter(lambda item: not item.is_blueprint())
def filter_animated_decal(self):
return self.filter_by_type(Types.ANIMATED_DECAL)
def filter_antennas(self):
return self.filter_by_type(Types.ANTENNA)
def filter_avatar_border(self):
return self.filter_by_type(Types.AVATAR_BORDER)
def filter_cars(self):
return self.filter_by_type(Types.BODY)
def filter_decals(self):
return self.filter_by_type(Types.DECAL)
def filter_engine_audio(self):
return self.filter_by_type(Types.ENGINE_AUDIO)
def filter_goal_explosion(self):
return self.filter_by_type(Types.GOAL_EXPLOSION)
def filter_paint_finish(self):
return self.filter_by_type(Types.PAINT_FINISH)
def filter_player_anthem(self):
return self.filter_by_type(Types.PLAYER_ANTHEM)
def filter_banners(self):
return self.filter_by_type(Types.PLAYER_BANNER)
def filter_titles(self):
return self.filter_by_type(Types.PLAYER_TITLE)
def filter_boost(self):
return self.filter_by_type(Types.ROCKET_BOOST)
def filter_toppers(self):
return self.filter_by_type(Types.TOPPER)
def filter_trails(self):
return self.filter_by_type(Types.TRAIL)
def filter_wheels(self):
return self.filter_by_type(Types.WHEELS)
def filter_crimson(self):
return self.filter_by_color(Colors.CRIMSON)
def filter_sky_blue(self):
return self.filter_by_color(Colors.SKY_BLUE)
def filter_pink(self):
return self.filter_by_color(Colors.PINK)
def filter_orange(self):
return self.filter_by_color(Colors.ORANGE)
def filter_cobalt(self):
return self.filter_by_color(Colors.COBALT)
def filter_burnt_sienna(self):
return self.filter_by_color(Colors.BURNT_SIENNA)
def filter_titanium_white(self):
return self.filter_by_color(Colors.TITANIUM_WHITE)
def filter_grey(self):
return self.filter_by_color(Colors.GREY)
def filter_saffron(self):
return self.filter_by_color(Colors.SAFFRON)
def filter_lime(self):
return self.filter_by_color(Colors.LIME)
def filter_forest_green(self):
return self.filter_by_color(Colors.FOREST_GREEN)
def filter_black(self):
return self.filter_by_color(Colors.BLACK)
def filter_purple(self):
return self.filter_by_color(Colors.PURPLE)
def filter_aviator(self):
return self.filter_by_certify(Certifies.AVIATOR)
def filter_acrobat(self):
return self.filter_by_certify(Certifies.ACROBAT)
def filter_victor(self):
return self.filter_by_certify(Certifies.VICTOR)
def filter_striker(self):
return self.filter_by_certify(Certifies.STRIKER)
def filter_sniper(self):
return self.filter_by_certify(Certifies.SNIPER)
def filter_scorer(self):
return self.filter_by_certify(Certifies.SCORER)
def filter_playmaker(self):
return self.filter_by_certify(Certifies.PLAYMAKER)
def filter_guardian(self):
return self.filter_by_certify(Certifies.GUARDIAN)
def filter_paragon(self):
return self.filter_by_certify(Certifies.PARAGON)
def filter_sweeper(self):
return self.filter_by_certify(Certifies.SWEEPER)
def filter_turtle(self):
return self.filter_by_certify(Certifies.TURTLE)
def filter_tactician(self):
return self.filter_by_certify(Certifies.TACTICIAN)
def filter_showoff(self):
return self.filter_by_certify(Certifies.SHOW_OFF)
def filter_juggler(self):
return self.filter_by_certify(Certifies.JUGGLER)
def filter_goalkeeper(self):
return self.filter_by_certify(Certifies.GOALKEEPER) | /rl-inventory-api-0.0.2.tar.gz/rl-inventory-api-0.0.2/src/rl_inventory_api/inventory.py | 0.808559 | 0.231397 | inventory.py | pypi |
from rl_inventory_api.constants import Colors, Certifies, Series, Tradeable, Rarities, Types
from dataclasses import dataclass, field
@dataclass
class Item:
product_id: int = field(repr=False)
name: str
slot: str
paint: str
certification: str = field(repr=False)
certification_value: int = field(repr=False)
certification_label: str
quality: str
crate: str
tradeable: str
amount: int
instance_id: int = field(repr=False)
def quantity(self):
return int(self.amount)
def is_painted(self):
return self.paint != Colors.NONE
def is_certificate(self):
return self.certification_label != Certifies.NONE
def is_tradeable(self):
return self.tradeable == Tradeable.TRUE
def is_non_crate(self):
return self.crate == Series.NON_CRATE
def is_uncommon(self):
return self.quality == Rarities.UNCOMMON
def is_rare(self):
return self.quality == Rarities.RARE
def is_very_rare(self):
return self.quality == Rarities.VERY_RARE
def is_import(self):
return self.quality == Rarities.IMPORT
def is_exotic(self):
return self.quality == Rarities.EXOTIC
def is_black_market(self):
return self.quality == Rarities.BLACK_MARKET
def is_limited(self):
return self.quality == Rarities.LIMITED
def is_ncr(self):
return self.is_non_crate() and self.is_rare()
def is_ncvr(self):
return self.is_non_crate() and self.is_very_rare()
def is_nci(self):
return self.is_non_crate() and self.is_import()
def is_nce(self):
return self.is_non_crate() and self.is_exotic()
def is_blueprint(self):
return self.slot == Types.BLUEPRINT
@staticmethod
def get_decal_and_car_name_by_name(name):
car, decal = name.split(":")
decal = decal.strip()
return car, decal
def get_decal_and_car_name(self):
return self.get_decal_and_car_name_by_name(self.name) | /rl-inventory-api-0.0.2.tar.gz/rl-inventory-api-0.0.2/src/rl_inventory_api/item.py | 0.663342 | 0.31076 | item.py | pypi |
import numpy as np
from rl_learn.bandits import K, N # Default number of arms and states
class Bandit:
"""
A class used to represent a Bandit environment where there is no concept of
state. Bandits interact with agents of class BanditAgent. This class
serves as an interface that all subclasses of Bandit must implement. The
action space A is represented as {0, ..., k-1}, where k is the number of
bandit arms.
Methods
-------
Bandit(k=K)
Returns an instance of a Bandit with k arms.
interact(a)
Performs action a and returns the numerical reward for doing so.
get_alike_bandit()
Returns a Bandit that is initialized with the same parameters as the
instance that used this method.
Parameters
----------
k : Int >= 2
The number of arms of the k-armed bandit
"""
def __init__(self, k=K):
"""
Returns an instance of a Bandit with k arms.
Parameters
----------
k : Int >= 2
The number of arms of the k-armed bandit
"""
self.k = k
def interact(self, a):
"""
Performs action a in the environment and returns the numerical reward
for doing so.
Parameters
----------
a : Int in {0, ..., k-1}
The choice of arm to pull
Returns
-------
Real scalar
The real-valued reward for taking action a
"""
return None
def get_alike_bandit(self):
"""
Returns a Bandit that is initialized with the same parameters as the
instance that used this method.
Returns
-------
Bandit
A reinitialized Bandit instance with the same parameters
"""
return None
class KArmedTestbed(Bandit):
"""
A class used to represent the specific bandit problem of the k-armed
testbed. Given k, mu, sigma1, and sigma2, a KArmedTestbed will be
constructed where the reward for taking an action a is a draw from its
unique normal distribution with mean Q*(a) and standard deviation sigma2,
where Q*(a) is initialized randomly from a normal distribution with
mean mu and standard deviation sigma1.
Methods
-------
KArmedTestbed(k=K, mu=0, sigma1=1, sigma2=1)
Returns a new testbed with k arms.
interact(a)
Performs action a in the testbed. It returns the reward the agent
received for taking the action.
get_alike_bandit()
Returns a KArmedTestbed initialized with the same parameters.
Parameters
----------
k : Int >= 2
The number of arms, k, of the k-armed bandit
"""
def __init__(self, k=K, mu=0, sigma1=1, sigma2=1):
"""
Returns a new testbed with k arms.
Parameters
----------
k : Int >= 2
The number of bandit arms
mu : Real scalar
The mean of the normal distribution used to select each Q*(a)
sigma1 : Real scalar > 0
The standard deviation of the normal distribution used to select
each Q*(a)
sigma2 : Real scalar > 0
The standard deviation of each normal distribution with mean Q*(a)
"""
self.k = k
self._mu = mu
self._sigma1 = sigma1
self._sigma2 = sigma2
self._rng = np.random.default_rng()
self._means = [self._rng.normal(mu, sigma1) for i in range(k)]
def interact(self, a):
"""
Performs action a in the testbed. It returns the reward the agent
received for taking this action, which is the draw from a normal
distribution with mean Q*(a) and standard deviation sigma2.
Parameters
----------
a : Int in {0, ..., k-1}
The action the agent took.
Returns
-------
Real scalar
The reward received for taking action a
"""
reward = self._rng.normal(self._means[a], self._sigma2)
return reward
def get_alike_bandit(self):
"""
Returns a KArmedTestbed initialized with the same parameters.
Returns
-------
KArmedTestbed
A reinitialized testbed instance
"""
cls = self.__class__
return cls(self.k, self._mu, self._sigma1, self._sigma2)
class KArmedTestbedNonStationary(KArmedTestbed):
"""
A class used to represent a variation of the k-armed testbed where it is
non-stationary. Each of Q*(a)s will change to a new value from the
G(mu, sigma1) distribution every m timesteps.
Methods
-------
KArmedTestbedNonStationary(k=K, mu=0, sigma1=1, sigma2=1, m=300)
Returns a new KArmedTestbedNonStationary.
interact(a)
Performs action a in the testbed. It returns the reward the agent
received for taking the action.
get_alike_bandit()
Returns a KArmedTestbedNonStationary initialized with the same parameters.
Parameters
----------
k : Int >= 2
The number of arms of the k-armed bandit
"""
def __init__(self, k=K, mu=0, sigma1=1, sigma2=1, m=300):
"""
Returns a new KArmedTestbedNonStationary.
Parameters
----------
k : Int >= 2
The number of bandit arms
mu : Real scalar
The mean of the normal distribution used to select each Q*(a)
sigma1 : Real scalar > 0
The standard deviation of the normal distribution used to select
each Q*(a)
sigma2 : Real scalar > 0
The standard deviation of each normal distribution with mean Q*(a)
m : Int in (0, timesteps)
The number of timesteps to randomize the Q*(a)s
"""
super().__init__(k=K, mu=0, sigma1=1, sigma2=1)
self._m = m
self._count = 0
def interact(self, a):
"""
Performs action a in the testbed. It returns the reward the agent
received for taking this action, which is the draw from a normal
distribution with mean Q*(a) and standard deviation sigma2.
Parameters
----------
a : Int in {0, ..., k-1}
The action the agent took.
Returns
-------
Real scalar
The reward received for taking action a
"""
self._count += 1
mu = self._mu
s1 = self._sigma1
rng = self._rng
k = self.k
if (self._count % self._m) == 0:
self._means = [rng.normal(mu, s1) for i in range(k)]
reward = self._rng.normal(self._means[a], self._sigma2)
return reward
def get_alike_bandit(self):
"""
Returns a KArmedTestbedNonStationary initialized with the
same parameters.
Returns
-------
KArmedTestbedNonStationary
A reinitialized testbed instance
"""
cls = self.__class__
return cls(self.k, self._mu, self._sigma1, self._sigma2, self._m)
class ContextualBandit:
"""
This class implements a ContextualBandit environment where each state is
a different KArmedTestbed instance. A contextual bandit problem is one in
which there is a concept of state, but it is simply an indication of which
bandit the agent is facing. Actions have no effect on the future rewards
received, only the next reward, and have no impact on what states the agent
ends up in. ContextualBandits interact with agents of type
ContextualBanditAgent. Each state has a uniform chance of being
the "next" state. The action space A is represented as {0, ..., k-1}, and
the state space is represented as {0, ..., n-1}, where k is the number of
bandit arms and n is the number of states.
Methods
-------
ContextualBandit(k=K, n=N, mu=0, sigma1=1, sigma2=1)
Returns an instance of a ContextualBandit with k arms and n states.
interact(a)
Performs action a, returns the numerical reward for doing so,
and the next state
get_alike_bandit()
Returns a ContextualBandit that is initialized with the same parameters
as the instance that used this method.
get_starting_state()
Returns the starting state of the environment.
Parameters
----------
k : Int >= 2
The number of arms, k, for each of the n k-armed bandits
n : Int >= 2
The number of different states
"""
def __init__(self, k=K, n=N, mu=0, sigma1=1, sigma2=1):
"""
Returns a ContextualBandit with k arms and n states.
Parameters
----------
k : Int >= 2
The number of arms, k, for each of the n k-armed bandits
n : Int >= 2
The number of different states
mu : Real scalar
The mean of the normal distribution used to determine
the Q*(state, action)s
sigma1 : Real scalar > 0
The standard deviation of the normal distribution used to determine
the Q*(state, action)s
sigma2: Real scalar > 0
The standard deviation of each reward distribution
G(Q*(state, action), sigma2)
"""
self.k = k
self.n = n
self._mu = mu
self._sigma1 = sigma1
self._sigma2 = sigma2
self._bandits = [KArmedTestbed(k, mu, sigma1, sigma2) for x in range(k)]
self._current_state = 0 # start at first state
def interact(self, a):
"""
Performs action a. Returns the numerical reward for doing so,
and the next state.
Parameters
----------
a : Int in {0, ..., k-1}
The choice of arm to pull
Returns
-------
2-tuple:
[0] : Real scalar
The real-valued reward for taking action a
[1] : Int in {0, ..., n-1}
The next state
"""
reward = self._bandits[self._current_state].interact(a)
self._current_state = np.random.choice(self.n)
return reward, self._current_state
def get_alike_bandit(self):
"""
Returns a ContextualBandit that is initialized with the same
parameters as the instance that used this method.
Returns
-------
ContextualBandit
A reinitialized Bandit instance with the same params
"""
cls = self.__class__
return cls(self.k, self.n, self._mu, self._sigma1, self._sigma2)
def get_starting_state(self):
"""
Returns the starting state of the environment.
Returns
-------
Int in {0, ..., n-1}
The starting environment state
"""
return self._current_state # state 0 | /rl_learn-1.0.2.tar.gz/rl_learn-1.0.2/rl_learn/bandits/environments.py | 0.889078 | 0.93049 | environments.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rl_mle_distributions-0.1.tar.gz/rl_mle_distributions-0.1/rl_mle_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import functools
import random
from copy import deepcopy
from typing import Any, Dict, List, Optional, NamedTuple
from rlmusician.environment import CounterpointEnv
from rlmusician.utils import generate_copies, imap_in_parallel
class EnvWithActions(NamedTuple):
"""A tuple of `CounterpointEnv` and actions previously applied to it."""
env: CounterpointEnv
actions: List[int]
class Record(NamedTuple):
"""A record with finalized sequence of actions and resulting reward."""
actions: List[int]
reward: float
def roll_in(env: CounterpointEnv, actions: List[int]) -> EnvWithActions:
"""
Do roll-in actions.
:param env:
environment
:param actions:
sequence of roll-in actions
:return:
environment after roll-in actions
"""
env.reset()
for action in actions:
env.step(action)
env_with_actions = EnvWithActions(env, actions)
return env_with_actions
def roll_out_randomly(env_with_actions: EnvWithActions) -> Record:
"""
Continue an episode in progress with random actions until it is finished.
:param env_with_actions:
environment and sequence of actions that have been taken before
:return:
finalized sequence of actions and reward for the episode
"""
random.seed() # Reseed to have independent results amongst processes.
env = env_with_actions.env
past_actions = env_with_actions.actions
done = False
valid_actions = env.valid_actions
while not done:
action = random.choice(valid_actions)
observation, reward, done, info = env.step(action)
past_actions.append(action)
valid_actions = info['next_actions']
record = Record(past_actions, reward)
return record
def estimate_number_of_trials(
env: CounterpointEnv,
n_trials_estimation_depth: int,
n_trials_estimation_width: int,
n_trials_factor: float
) -> int:
"""
Estimate number of trials.
This procedure is an alternative to DFS in Monte Carlo Beam Search.
Its advantages over DFS:
* It is easy to run trials in parallel, no parallel DFS is needed;
* It works even if next steps given current node are stochastic.
Its disadvantages compared to DFS:
* Trials are distributed less even.
:param env:
environment
:param n_trials_estimation_depth:
number of steps ahead to explore in order to collect statistics
for inferring number of random trials to continue each stub
:param n_trials_estimation_width:
number of exploratory random trials that collect statistics
for inferring number of random trials to continue each stub
:param n_trials_factor:
factor such that estimated number of trials is multiplied by it
:return:
number of trials to continue a stub at random
"""
estimations = []
for _ in range(n_trials_estimation_width):
current_env = deepcopy(env)
done = False
valid_actions = current_env.valid_actions
n_steps_passed = 0
n_options = []
while not done and n_steps_passed < n_trials_estimation_depth:
n_options.append(len(valid_actions))
action = random.choice(valid_actions)
observation, reward, done, info = current_env.step(action)
valid_actions = info['next_actions']
n_steps_passed += 1
estimation = functools.reduce(lambda x, y: x * y, n_options, 1)
estimations.append(estimation)
n_trials = n_trials_factor * sum(estimations) / n_trials_estimation_width
n_trials = int(round(n_trials))
return n_trials
def add_records(
env: CounterpointEnv,
stubs: List[List[int]],
records: List[Record],
n_trials_estimation_depth: int,
n_trials_estimation_width: int,
n_trials_factor: float,
paralleling_params: Dict[str, Any]
) -> List[Record]:
"""
Play new episodes given roll-in sequences and add new records with results.
:param env:
environment
:param stubs:
roll-in sequences
:param records:
previously collected statistics of finished episodes as sequences
of actions and corresponding to them rewards
:param n_trials_estimation_depth:
number of steps ahead to explore in order to collect statistics
for inferring number of random trials to continue each stub
:param n_trials_estimation_width:
number of exploratory random trials that collect statistics
for inferring number of random trials to continue each stub
:param n_trials_factor:
factor such that estimated number of trials is multiplied by it
:param paralleling_params:
settings of parallel playing of episodes
:return:
extended statistics of finished episodes as sequences of actions and
corresponding to them rewards
"""
for stub in stubs:
env_with_actions = roll_in(env, stub)
n_trials = estimate_number_of_trials(
env_with_actions.env,
n_trials_estimation_depth,
n_trials_estimation_width,
n_trials_factor
)
records_for_stub = imap_in_parallel(
roll_out_randomly,
generate_copies(env_with_actions, n_trials),
paralleling_params
)
records.extend(records_for_stub)
return records
def create_stubs(
records: List[Record],
n_stubs: int,
stub_length: int,
include_finalized_sequences: bool = True
) -> List[List[int]]:
"""
Create roll-in sequences (stubs) based on collected statistics.
:param records:
sorted statistics of played episodes as sequences of actions
and corresponding to them rewards; elements must be sorted by reward
in descending order
:param n_stubs:
number of stubs to be created
:param stub_length:
number of actions in each stub
:param include_finalized_sequences:
if it is set to `True`, resulting number of stubs can be less than
`n_stubs`, because finalized sequences are also counted
:return:
new stubs that can be extended further (i.e., without those of them
that are finalized)
"""
stubs = []
for record in records:
if len(stubs) == n_stubs:
break
key = record.actions[:stub_length]
if key in stubs:
continue
if len(record.actions) <= stub_length:
if include_finalized_sequences: # pragma: no branch
n_stubs -= 1
continue
stubs.append(key)
return stubs
def select_distinct_best_records(
records: List[Record],
n_records: int
) -> List[Record]:
"""
Select records related to highest rewards (without duplicates).
:param records:
sorted statistics of played episodes as sequences of actions
and corresponding to them rewards; elements must be sorted by reward
in descending order
:param n_records:
number of unique records to select
:return:
best records
"""
results = []
for record in records:
if record not in results:
results.append(record)
if len(results) == n_records:
break
return results
def optimize_with_monte_carlo_beam_search(
env: CounterpointEnv,
beam_width: int,
n_records_to_keep: int,
n_trials_estimation_depth: int,
n_trials_estimation_width: int,
n_trials_factor: float,
paralleling_params: Optional[Dict[str, Any]] = None
) -> List[List[int]]:
"""
Find optimum sequences of actions with Monte Carlo Beam Search.
:param env:
environment
:param beam_width:
number of best subsequences to be kept after each iteration
:param n_records_to_keep:
number of best played episodes to be kept after each iteration
:param n_trials_estimation_depth:
number of steps ahead to explore in order to collect statistics
for inferring number of random trials to continue each stub
:param n_trials_estimation_width:
number of exploratory random trials that collect statistics
for inferring number of random trials to continue each stub
:param n_trials_factor:
factor such that estimated number of trials is multiplied by it
:param paralleling_params:
settings of parallel playing of episodes;
by default, number of processes is set to number of cores
and each worker is not replaced with a newer one after some number of
tasks are finished
:return:
best final sequences of actions
"""
stubs = [[]]
records = []
paralleling_params = paralleling_params or {}
stub_length = 0
while len(stubs) > 0:
records = add_records(
env,
stubs,
records,
n_trials_estimation_depth,
n_trials_estimation_width,
n_trials_factor,
paralleling_params
)
records = sorted(records, key=lambda x: x.reward, reverse=True)
print(
f"Current best reward: {records[0].reward:.5f}, "
f"achieved with: {records[0].actions}."
)
stub_length += 1
stubs = create_stubs(records, beam_width, stub_length)
records = select_distinct_best_records(records, n_records_to_keep)
results = [past_actions for past_actions, reward in records[:beam_width]]
return results | /rl-musician-0.4.6.tar.gz/rl-musician-0.4.6/rlmusician/agent/monte_carlo_beam_search.py | 0.888245 | 0.547585 | monte_carlo_beam_search.py | pypi |
from typing import List, NamedTuple
from sinethesizer.utils.music_theory import get_note_to_position_mapping
NOTE_TO_POSITION = get_note_to_position_mapping()
TONIC_TRIAD_DEGREES = (1, 3, 5)
class ScaleElement(NamedTuple):
"""A pitch from a diatonic scale."""
note: str
position_in_semitones: int
position_in_degrees: int
degree: int
is_from_tonic_triad: bool
class Scale:
"""A diatonic scale."""
def __init__(self, tonic: str, scale_type: str):
"""
Initialize an instance.
:param tonic:
tonic pitch class represented by letter (like C or A#)
:param scale_type:
type of scale (currently, 'major', 'natural_minor', and
'harmonic_minor' are supported)
"""
self.tonic = tonic
self.scale_type = scale_type
self.elements = self.__create_elements()
self.note_to_element = {
element.note: element for element in self.elements
}
self.position_in_semitones_to_element = {
element.position_in_semitones: element for element in self.elements
}
self.position_in_degrees_to_element = {
element.position_in_degrees: element for element in self.elements
}
def __create_elements(self) -> List[ScaleElement]:
"""Create sorted list of scale elements."""
patterns = {
'major': [
1, None, 2, None, 3, 4, None, 5, None, 6, None, 7
],
'natural_minor': [
1, None, 2, 3, None, 4, None, 5, 6, None, 7, None
],
'harmonic_minor': [
1, None, 2, 3, None, 4, None, 5, 6, None, None, 7
],
}
pattern = patterns[self.scale_type]
tonic_position = NOTE_TO_POSITION[self.tonic + '1']
elements = []
position_in_degrees = 0
for note, position_in_semitones in NOTE_TO_POSITION.items():
remainder = (position_in_semitones - tonic_position) % len(pattern)
degree = pattern[remainder]
if degree is not None:
element = ScaleElement(
note=note,
position_in_semitones=position_in_semitones,
position_in_degrees=position_in_degrees,
degree=degree,
is_from_tonic_triad=(degree in TONIC_TRIAD_DEGREES)
)
elements.append(element)
position_in_degrees += 1
return elements
def get_element_by_note(self, note: str) -> ScaleElement:
"""Get scale element by its note (like 'C4' or 'A#5')."""
try:
return self.note_to_element[note]
except KeyError:
raise ValueError(
f"Note {note} is not from {self.tonic}-{self.scale_type}."
)
def get_element_by_position_in_semitones(self, pos: int) -> ScaleElement:
"""Get scale element by its position in semitones."""
try:
return self.position_in_semitones_to_element[pos]
except KeyError:
raise ValueError(
f"Position {pos} is not from {self.tonic}-{self.scale_type}."
)
def get_element_by_position_in_degrees(self, pos: int) -> ScaleElement:
"""Get scale element by its position in scale degrees."""
try:
return self.position_in_degrees_to_element[pos]
except KeyError:
raise ValueError(
f"Position {pos} is out of available positions range."
)
def check_consonance(
first: ScaleElement, second: ScaleElement,
is_perfect_fourth_consonant: bool = False
) -> bool:
"""
Check whether an interval between two pitches is consonant.
:param first:
first pitch
:param second:
second pitch
:param is_perfect_fourth_consonant:
indicator whether to consider perfect fourth a consonant interval
(for two voices, it usually considered dissonant)
:return:
indicator whether the interval is consonant
"""
n_semitones_to_consonance = {
0: True,
1: False,
2: False,
3: True,
4: True,
5: is_perfect_fourth_consonant,
6: False,
7: True,
8: True,
9: True,
10: False,
11: False
}
interval = abs(first.position_in_semitones - second.position_in_semitones)
interval %= len(n_semitones_to_consonance)
return n_semitones_to_consonance[interval] | /rl-musician-0.4.6.tar.gz/rl-musician-0.4.6/rlmusician/utils/music_theory.py | 0.936 | 0.379925 | music_theory.py | pypi |
import copy
import multiprocessing as mp
from typing import Any, Callable, Dict, Iterator, List, Optional
def convert_to_base(
number: int, base: int, min_length: Optional[int] = None
) -> List[int]:
"""
Convert number to its representation in a given system.
:param number:
positive integer number to be converted
:param base:
positive integer number to be used as base
:param min_length:
if result length is less than it, zero padding is added to the left
:return:
list where each element represents a digit in a given system
"""
digits = []
if number == 0:
digits = [0]
while number > 0:
remainder = number % base
digits.append(remainder)
number //= base
if min_length is not None:
padding = [0 for _ in range(max(min_length - len(digits), 0))]
digits.extend(padding)
digits = digits[::-1]
return digits
def imap_in_parallel(
fn: Callable,
args: Iterator[Any],
pool_kwargs: Optional[Dict[str, Any]] = None
) -> Iterator[Any]:
"""
Apply function to each argument from given iterable in parallel.
This function contains boilerplate code that is needed for correct work
of `pytest-cov`. Usage of `mp.Pool` as context manager is not alternative
to this function, because:
1) not all covered lines of code may be marked as covered;
2) some files with names like '.coverage.hostname.*' may be not deleted.
See more: https://github.com/pytest-dev/pytest-cov/issues/250.
:param fn:
function
:param args:
generator of arguments
:param pool_kwargs:
parameters of pool such as number of processes and maximum number of
tasks for a worker before it is replaced with a new one
:return:
results of applying the function to the arguments
"""
pool_kwargs = pool_kwargs or {}
pool_kwargs['processes'] = pool_kwargs.get('n_processes')
pool_kwargs['maxtasksperchild'] = pool_kwargs.get('max_tasks_per_child')
old_keys = ['n_processes', 'max_tasks_per_child']
pool_kwargs = {k: v for k, v in pool_kwargs.items() if k not in old_keys}
pool = mp.Pool(**pool_kwargs)
try:
results = pool.imap(fn, args)
finally:
pool.close()
pool.join()
return results
def generate_copies(something: Any, n_copies: int) -> Iterator[Any]:
"""
Generate shallow copies of an object.
:param something:
object to be copied
:param n_copies:
number of copies to be generated
:return:
shallow copies
"""
for _ in range(n_copies):
yield copy.copy(something)
def rolling_aggregate(
values: List[float],
aggregation_fn: Callable[[List[float]], float],
window_size: int
) -> List[float]:
"""
Compute rolling aggregate.
:param values:
list of values to be aggregated
:param aggregation_fn:
aggregation function
:param window_size:
size of rolling window
:return:
list of rolling aggregates
"""
window = []
results = []
for value in values:
if len(window) == window_size:
window.pop(0)
window.append(value)
results.append(aggregation_fn(window))
return results | /rl-musician-0.4.6.tar.gz/rl-musician-0.4.6/rlmusician/utils/misc.py | 0.89393 | 0.465448 | misc.py | pypi |
import os
import subprocess
import traceback
from pkg_resources import resource_filename
from typing import List
import pretty_midi
from sinethesizer.io import (
convert_events_to_timeline,
convert_tsv_to_events,
create_instruments_registry,
write_timeline_to_wav
)
from sinethesizer.utils.music_theory import get_list_of_notes
N_EIGHTHS_PER_MEASURE = 8
def create_midi_from_piece(
piece: 'rlmusician.environment.Piece',
midi_path: str,
measure_in_seconds: float,
cantus_firmus_instrument: int,
counterpoint_instrument: int,
velocity: int,
trailing_silence_in_measures: int = 2
) -> None:
"""
Create MIDI file from a piece created by this package.
:param piece:
`Piece` instance
:param midi_path:
path where resulting MIDI file is going to be saved
:param measure_in_seconds:
duration of one measure in seconds
:param cantus_firmus_instrument:
for an instrument that plays cantus firmus, its ID (number)
according to General MIDI specification
:param counterpoint_instrument:
for an instrument that plays counterpoint line, its ID (number)
according to General MIDI specification
:param velocity:
one common velocity for all notes
:param trailing_silence_in_measures:
number of measures with silence to add at the end of the composition
:return:
None
"""
numeration_shift = pretty_midi.note_name_to_number('A0')
lines = [
piece.cantus_firmus,
piece.counterpoint
]
pretty_midi_instruments = [
pretty_midi.Instrument(program=cantus_firmus_instrument),
pretty_midi.Instrument(program=counterpoint_instrument)
]
for line, pretty_midi_instrument in zip(lines, pretty_midi_instruments):
for element in line:
pitch = (
element.scale_element.position_in_semitones
+ numeration_shift
)
start_time = (
element.start_time_in_eighths
/ N_EIGHTHS_PER_MEASURE
* measure_in_seconds
)
end_time = (
element.end_time_in_eighths
/ N_EIGHTHS_PER_MEASURE
* measure_in_seconds
)
note = pretty_midi.Note(
velocity=velocity,
pitch=pitch,
start=start_time,
end=end_time
)
pretty_midi_instrument.notes.append(note)
pretty_midi_instrument.notes.sort(key=lambda x: x.start)
start_time = piece.n_measures * measure_in_seconds
end_time = start_time + trailing_silence_in_measures * measure_in_seconds
note = pretty_midi.Note(
velocity=0,
pitch=1, # Arbitrary value that affects nothing.
start=start_time,
end=end_time
)
pretty_midi_instruments[0].notes.append(note)
composition = pretty_midi.PrettyMIDI()
for pretty_midi_instrument in pretty_midi_instruments:
composition.instruments.append(pretty_midi_instrument)
composition.write(midi_path)
def create_events_from_piece(
piece: 'rlmusician.environment.Piece',
events_path: str,
measure_in_seconds: float,
cantus_firmus_instrument: str,
counterpoint_instrument: str,
velocity: float,
effects: str = ''
) -> None:
"""
Create TSV file with `sinethesizer` events from a piece.
:param piece:
`Piece` instance
:param events_path:
path to a file where result is going to be saved
:param measure_in_seconds:
duration of one measure in seconds
:param cantus_firmus_instrument:
instrument to be used to play cantus firmus
:param counterpoint_instrument:
instrument to be used to play counterpoint line
:param velocity:
one common velocity for all notes
:param effects:
sound effects to be applied to the resulting event
:return:
None
"""
all_notes = get_list_of_notes()
eight_in_seconds = measure_in_seconds / N_EIGHTHS_PER_MEASURE
events = []
lines = [piece.cantus_firmus, piece.counterpoint]
line_ids = ['cantus_firmus', 'counterpoint']
instruments = [cantus_firmus_instrument, counterpoint_instrument]
for line, line_id, instrument in zip(lines, line_ids, instruments):
for element in line:
start_time = element.start_time_in_eighths * eight_in_seconds
duration = (
(element.end_time_in_eighths - element.start_time_in_eighths)
* eight_in_seconds
)
pitch_id = element.scale_element.position_in_semitones
note = all_notes[pitch_id]
event = (instrument, start_time, duration, note, pitch_id, line_id)
events.append(event)
events = sorted(events, key=lambda x: (x[1], x[4], x[2]))
events = [
f"{x[0]}\t{x[1]}\t{x[2]}\t{x[3]}\t{velocity}\t{effects}\t{x[5]}"
for x in events
]
columns = [
'instrument', 'start_time', 'duration', 'frequency',
'velocity', 'effects', 'line_id'
]
header = '\t'.join(columns)
results = [header] + events
with open(events_path, 'w') as out_file:
for line in results:
out_file.write(line + '\n')
def create_wav_from_events(events_path: str, output_path: str) -> None:
"""
Create WAV file based on `sinethesizer` TSV file.
:param events_path:
path to TSV file with track represented as `sinethesizer` events
:param output_path:
path where resulting WAV file is going to be saved
:return:
None
"""
presets_path = resource_filename(
'rlmusician',
'configs/sinethesizer_presets.yml'
)
settings = {
'frame_rate': 48000,
'trailing_silence': 2,
'peak_amplitude': 1,
'instruments_registry': create_instruments_registry(presets_path)
}
events = convert_tsv_to_events(events_path, settings)
timeline = convert_events_to_timeline(events, settings)
write_timeline_to_wav(output_path, timeline, settings['frame_rate'])
def make_lilypond_template(tonic: str, scale_type: str) -> str:
"""
Make template of Lilypond text file.
:param tonic:
tonic pitch class represented by letter (like C or A#)
:param scale_type:
type of scale (e.g., 'major', 'natural_minor', or 'harmonic_minor')
:return:
template
"""
raw_template = (
"\\version \"2.18.2\"\n"
"\\layout {{{{\n"
" indent = #0\n"
"}}}}\n"
"\\new StaffGroup <<\n"
" \\new Staff <<\n"
" \\clef treble\n"
" \\time 4/4\n"
" \\key {} \\{}\n"
" {{{{{{}}}}}}\n"
" \\\\\n"
" {{{{{{}}}}}}\n"
" >>\n"
">>"
)
tonic = tonic.replace('#', 'is').replace('b', 'es').lower()
scale_type = scale_type.split('_')[-1]
template = raw_template.format(tonic, scale_type)
return template
def convert_to_lilypond_note(
line_element: 'rlmusician.environment.piece.LineElement'
) -> str:
"""
Convert `LineElement` instance to note in Lilypond absolute notation.
:param line_element:
element of a melodic line
:return:
note in Lilypond absolute notation
"""
pitch_class = line_element.scale_element.note[:-1]
pitch_class = pitch_class.replace('#', 'is').replace('b', 'es')
pitch_class = pitch_class.lower()
octave_id = int(line_element.scale_element.note[-1])
lilypond_default_octave_id = 3
octave_diff = octave_id - lilypond_default_octave_id
octave_sign = "'" if octave_diff >= 0 else ','
octave_info = "".join(octave_sign for _ in range(abs(octave_diff)))
start_time = line_element.start_time_in_eighths
end_time = line_element.end_time_in_eighths
time_from_measure_start = start_time % N_EIGHTHS_PER_MEASURE
duration_in_measures = (end_time - start_time) / N_EIGHTHS_PER_MEASURE
if duration_in_measures == 1.0 and time_from_measure_start > 0:
filled_measure_share = time_from_measure_start / N_EIGHTHS_PER_MEASURE
remaining_duration = int(round(1 / (1 - filled_measure_share)))
remaining_note = f"{pitch_class}{octave_info}{remaining_duration}~"
left_over_bar_duration = int(round(1 / filled_measure_share))
left_over_note = f"{pitch_class}{octave_info}{left_over_bar_duration}"
return f"{remaining_note} {left_over_note}"
else:
duration = int(round((1 / duration_in_measures)))
note = f"{pitch_class}{octave_info}{duration}"
return note
def combine_lilypond_voices(
counterpoint_voice: str,
cantus_firmus_voice: str,
is_counterpoint_above: bool,
counterpoint_start_pause_in_eighths: int
) -> List[str]:
"""
Sort Lilypond voices and add delay to counterpoint voice if needed.
:param counterpoint_voice:
Lilypond representation of counterpoint line (without pauses)
:param cantus_firmus_voice:
Lilypond representation of cantus firmus line
:param is_counterpoint_above:
indicator whether counterpoint is written above cantus firmus
:param counterpoint_start_pause_in_eighths:
duration of pause that opens counterpoint line (in eighths of measure)
:return:
combined Lilypond representations
"""
if counterpoint_start_pause_in_eighths > 0:
pause_duration = int(round(
N_EIGHTHS_PER_MEASURE / counterpoint_start_pause_in_eighths
))
pause = f'r{pause_duration}'
counterpoint_voice = pause + ' ' + counterpoint_voice
if is_counterpoint_above:
return [counterpoint_voice, cantus_firmus_voice]
else:
return [cantus_firmus_voice, counterpoint_voice]
def create_lilypond_file_from_piece(
piece: 'rlmusician.environment.Piece',
output_path: str
) -> None:
"""
Create text file in format of Lilypond sheet music editor.
:param piece:
musical piece
:param output_path:
path where resulting file is going to be saved
:return:
None
"""
template = make_lilypond_template(piece.tonic, piece.scale_type)
lilypond_voices = {}
melodic_lines = {
'counterpoint': piece.counterpoint,
'cantus_firmus': piece.cantus_firmus
}
for line_id, melodic_line in melodic_lines.items():
lilypond_voice = []
for line_element in melodic_line:
note = convert_to_lilypond_note(line_element)
lilypond_voice.append(note)
lilypond_voice = " ".join(lilypond_voice)
lilypond_voices[line_id] = lilypond_voice
lilypond_voices = combine_lilypond_voices(
lilypond_voices['counterpoint'],
lilypond_voices['cantus_firmus'],
piece.is_counterpoint_above,
piece.counterpoint_specifications['start_pause_in_eighths']
)
result = template.format(*lilypond_voices)
with open(output_path, 'w') as out_file:
out_file.write(result)
def create_pdf_sheet_music_with_lilypond(
lilypond_path: str
) -> None: # pragma: no cover
"""
Create PDF file with sheet music.
:param lilypond_path:
path to a text file in Lilypond format
:return:
None:
"""
dir_path, filename = os.path.split(lilypond_path)
bash_command = f"lilypond {filename}"
try:
process = subprocess.Popen(
bash_command.split(),
cwd=dir_path,
stdout=subprocess.PIPE
)
process.communicate()
except Exception:
print("Rendering sheet music to PDF failed. Do you have Lilypond?")
print(traceback.format_exc()) | /rl-musician-0.4.6.tar.gz/rl-musician-0.4.6/rlmusician/utils/io.py | 0.816589 | 0.249859 | io.py | pypi |
import datetime
import os
from typing import Any, Dict, List, NamedTuple
import numpy as np
from sinethesizer.utils.music_theory import get_note_to_position_mapping
from rlmusician.environment.rules import get_rules_registry
from rlmusician.utils import (
Scale,
ScaleElement,
check_consonance,
create_events_from_piece,
create_lilypond_file_from_piece,
create_midi_from_piece,
create_wav_from_events,
create_pdf_sheet_music_with_lilypond
)
NOTE_TO_POSITION = get_note_to_position_mapping()
N_EIGHTHS_PER_MEASURE = 8
class LineElement(NamedTuple):
"""An element of a melodic line."""
scale_element: ScaleElement
start_time_in_eighths: int
end_time_in_eighths: int
class Piece:
"""Piece where florid counterpoint line is created given cantus firmus."""
def __init__(
self,
tonic: str,
scale_type: str,
cantus_firmus: List[str],
counterpoint_specifications: Dict[str, Any],
rules: Dict[str, Any],
rendering_params: Dict[str, Any]
):
"""
Initialize instance.
:param tonic:
tonic pitch class represented by letter (like C or A#)
:param scale_type:
type of scale (currently, 'major', 'natural_minor', and
'harmonic_minor' are supported)
:param cantus_firmus:
cantus firmus as a sequence of notes
:param counterpoint_specifications:
parameters of a counterpoint line
:param rules:
names of applicable rules and parameters of these rules
:param rendering_params:
settings of saving the piece to TSV, MIDI, and WAV files
"""
# Initial inputs.
self.tonic = tonic
self.scale_type = scale_type
self.counterpoint_specifications = counterpoint_specifications
self.names_of_rules = rules['names']
self.rules_params = rules['params']
self.rendering_params = rendering_params
# Calculated attributes.
self.scale = Scale(tonic, scale_type)
self.max_skip = counterpoint_specifications['max_skip_in_degrees']
self.all_movements = list(range(-self.max_skip, self.max_skip + 1))
self.n_measures = len(cantus_firmus)
self.total_duration_in_eighths = (
N_EIGHTHS_PER_MEASURE * self.n_measures
)
# Melodic lines.
self.cantus_firmus = self.__create_cantus_firmus(cantus_firmus)
self.counterpoint = self.__create_beginning_of_counterpoint()
self.is_counterpoint_above = (
self.counterpoint[0].scale_element.position_in_semitones
> self.cantus_firmus[0].scale_element.position_in_semitones
)
# Boundaries.
end_note = counterpoint_specifications['end_note']
self.end_scale_element = self.scale.get_element_by_note(end_note)
self.lowest_element = self.scale.get_element_by_note(
counterpoint_specifications['lowest_note']
)
self.highest_element = self.scale.get_element_by_note(
counterpoint_specifications['highest_note']
)
self.__validate_boundary_notes()
# Piano roll.
self._piano_roll = None
self.__initialize_piano_roll()
self.lowest_row_to_show = None
self.highest_row_to_show = None
self.__set_range_to_show()
# Runtime variables.
self.current_time_in_eighths = None
self.current_measure_durations = None
self.past_movements = None
self.current_motion_start_element = None
self.is_last_element_consonant = None
self.__set_defaults_to_runtime_variables()
def __create_cantus_firmus(
self, cantus_firmus_as_notes: List[str]
) -> List[LineElement]:
"""Create cantus firmus from a sequence of its notes."""
cantus_firmus = [
LineElement(
scale_element=self.scale.get_element_by_note(note),
start_time_in_eighths=N_EIGHTHS_PER_MEASURE * i,
end_time_in_eighths=N_EIGHTHS_PER_MEASURE * (i+1)
)
for i, note in enumerate(cantus_firmus_as_notes)
]
return cantus_firmus
def __create_beginning_of_counterpoint(self) -> List[LineElement]:
"""Create beginning (first measure) of the counterpoint line."""
start_note = self.counterpoint_specifications['start_note']
start_element = LineElement(
self.scale.get_element_by_note(start_note),
self.counterpoint_specifications['start_pause_in_eighths'],
N_EIGHTHS_PER_MEASURE
)
counterpoint = [start_element]
return counterpoint
def __validate_boundary_notes(self) -> None:
"""Check that boundary notes for both lines are valid."""
if not self.cantus_firmus[0].scale_element.is_from_tonic_triad:
raise ValueError(
f"{self.cantus_firmus[0].scale_element.note} is not "
f"a tonic triad member for {self.tonic}-{self.scale_type}; "
f"therefore, cantus firmus can not start with it."
)
if not self.cantus_firmus[-1].scale_element.is_from_tonic_triad:
raise ValueError(
f"{self.cantus_firmus[-1].scale_element.note} is not "
f"a tonic triad member for {self.tonic}-{self.scale_type}; "
f"therefore, cantus firmus can not end with it."
)
if not self.counterpoint[0].scale_element.is_from_tonic_triad:
raise ValueError(
f"{self.counterpoint[0].scale_element.note} is not "
f"a tonic triad member for {self.tonic}-{self.scale_type}; "
f"therefore, counterpoint line can not start with it."
)
if not self.end_scale_element.is_from_tonic_triad:
raise ValueError(
f"{self.end_scale_element.note} is not "
f"a tonic triad member for {self.tonic}-{self.scale_type}; "
f"therefore, counterpoint line can not end with it."
)
lowest_position = self.lowest_element.position_in_semitones
highest_position = self.highest_element.position_in_semitones
if lowest_position >= highest_position:
raise ValueError(
"Lowest note and highest note are in wrong order: "
f"{self.counterpoint_specifications['lowest_note']} "
"is higher than "
f"{self.counterpoint_specifications['highest_note']}."
)
def __initialize_piano_roll(self) -> None:
"""Create piano roll and place all pre-defined notes to it."""
shape = (len(NOTE_TO_POSITION), self.total_duration_in_eighths)
self._piano_roll = np.zeros(shape, dtype=np.int32)
for line_element in self.cantus_firmus:
self.__add_to_piano_roll(line_element)
self.__add_to_piano_roll(self.counterpoint[0])
def __add_to_piano_roll(self, line_element: LineElement) -> None:
"""Add a line element to the piano roll."""
self._piano_roll[
line_element.scale_element.position_in_semitones,
line_element.start_time_in_eighths:line_element.end_time_in_eighths
] = 1
def __set_range_to_show(self) -> None:
"""Set range of pitch positions that can occur in a piece."""
cantus_firmus_positions = [
line_element.scale_element.position_in_semitones
for line_element in self.cantus_firmus
]
cantus_firmus_lower_bound = min(cantus_firmus_positions)
cantus_firmus_upper_bound = max(cantus_firmus_positions)
counterpoint_lower_bound = self.lowest_element.position_in_semitones
counterpoint_upper_bound = self.highest_element.position_in_semitones
self.lowest_row_to_show = min(
cantus_firmus_lower_bound,
counterpoint_lower_bound
)
self.highest_row_to_show = max(
cantus_firmus_upper_bound,
counterpoint_upper_bound
)
def __set_defaults_to_runtime_variables(self) -> None:
"""Set default values to variables that change at runtime."""
self.current_time_in_eighths = N_EIGHTHS_PER_MEASURE
self.current_measure_durations = []
self.past_movements = []
self.current_motion_start_element = self.counterpoint[0]
self.is_last_element_consonant = True
def __find_next_position_in_degrees(self, movement: int) -> int:
"""Find position (in scale degrees) that is reached by movement."""
next_position = (
self.counterpoint[-1].scale_element.position_in_degrees
+ movement
)
return next_position
def __find_next_element(self, movement: int, duration: int) -> LineElement:
"""Find continuation of counterpoint line by movement and duration."""
next_position = self.__find_next_position_in_degrees(movement)
next_line_element = LineElement(
self.scale.get_element_by_position_in_degrees(next_position),
self.current_time_in_eighths,
self.current_time_in_eighths + duration
)
return next_line_element
def __find_cf_elements(self, duration: int) -> List[LineElement]:
"""Find what in cantus firmus sounds simultaneously with a new note."""
start_index = self.current_time_in_eighths // N_EIGHTHS_PER_MEASURE
end_time = self.current_time_in_eighths + duration
end_index = (end_time - 1) // N_EIGHTHS_PER_MEASURE + 1
results = self.cantus_firmus[start_index:end_index]
return results
def __find_previous_cf_element(self) -> LineElement:
"""Find what in cantus firmus sounds before a new note."""
index = (self.current_time_in_eighths - 1) // N_EIGHTHS_PER_MEASURE
result = self.cantus_firmus[index]
return result
def __check_range(self, movement: int) -> bool:
"""Check that movement does not lead beyond a range of a line."""
next_position = self.__find_next_position_in_degrees(movement)
if next_position < self.lowest_element.position_in_degrees:
return False
if next_position > self.highest_element.position_in_degrees:
return False
return True
def __check_total_duration(self, duration: int) -> bool:
"""Check that nothing is suspended to the last measure."""
available_duration = N_EIGHTHS_PER_MEASURE * (self.n_measures - 1)
return self.current_time_in_eighths + duration <= available_duration
def __check_rules(self, movement: int, duration: int) -> bool:
"""Check compliance with the rules."""
registry = get_rules_registry()
continuation = self.__find_next_element(movement, duration)
durations = [x for x in self.current_measure_durations] + [duration]
cantus_firmus_elements = self.__find_cf_elements(duration)
previous_cantus_firmus_element = self.__find_previous_cf_element()
state = {
'line': self.counterpoint,
'counterpoint_continuation': continuation,
'movement': movement,
'past_movements': self.past_movements,
'piece_duration': self.total_duration_in_eighths,
'current_measure_durations': self.current_measure_durations,
'durations': durations,
'cantus_firmus_elements': cantus_firmus_elements,
'previous_cantus_firmus_element': previous_cantus_firmus_element,
'current_motion_start_element': self.current_motion_start_element,
'is_last_element_consonant': self.is_last_element_consonant,
'is_counterpoint_above': self.is_counterpoint_above,
'counterpoint_end': self.end_scale_element,
}
for rule_name in self.names_of_rules:
rule_fn = registry[rule_name]
rule_fn_params = self.rules_params.get(rule_name, {})
is_compliant = rule_fn(**state, **rule_fn_params)
if not is_compliant:
return False
return True
def check_validity(self, movement: int, duration: int) -> bool:
"""
Check whether suggested continuation is valid.
:param movement:
shift (in scale degrees) from previous element to a new one
:param duration:
duration (in eighths) of a new element
:return:
`True` if the continuation is valid, `False` else
"""
if movement not in self.all_movements:
return False
if not self.__check_range(movement):
return False
if not self.__check_total_duration(duration):
return False
if not self.__check_rules(movement, duration):
return False
return True
def __update_current_measure_durations(self, duration: int) -> None:
"""Update division of current measure by played notes."""
total_duration = sum(self.current_measure_durations) + duration
if total_duration < N_EIGHTHS_PER_MEASURE:
self.current_measure_durations.append(duration)
elif total_duration == N_EIGHTHS_PER_MEASURE:
self.current_measure_durations = []
else:
syncopated_duration = total_duration - N_EIGHTHS_PER_MEASURE
self.current_measure_durations = [syncopated_duration]
def __update_current_motion_start(self) -> None:
"""Update element opening continuous motion in one direction."""
if len(self.past_movements) < 2:
return
if self.past_movements[-1] * self.past_movements[-2] < 0:
self.current_motion_start_element = self.counterpoint[-2]
def __update_indicator_of_consonance(self, duration: int) -> None:
"""Update indicator of current vertical consonance between lines."""
cantus_firmus_elements = self.__find_cf_elements(duration)
cantus_firmus_element = cantus_firmus_elements[-1].scale_element
counterpoint_element = self.counterpoint[-1].scale_element
self.is_last_element_consonant = check_consonance(
cantus_firmus_element, counterpoint_element
)
def __update_runtime_variables(self, movement: int, duration: int) -> None:
"""Update runtime variables representing current state."""
self.__update_indicator_of_consonance(duration)
self.current_time_in_eighths += duration
self.past_movements.append(movement)
self.__update_current_measure_durations(duration)
self.__update_current_motion_start()
def __finalize_if_needed(self) -> None:
"""Add final measure of counterpoint line if the piece is finished."""
penultimate_measure_end = N_EIGHTHS_PER_MEASURE * (self.n_measures - 1)
if self.current_time_in_eighths < penultimate_measure_end:
return
end_line_element = LineElement(
self.end_scale_element,
penultimate_measure_end,
self.total_duration_in_eighths
)
self.counterpoint.append(end_line_element)
self.__add_to_piano_roll(end_line_element)
last_movement = (
self.end_scale_element.position_in_degrees
- self.counterpoint[-2].scale_element.position_in_degrees
)
self.past_movements.append(last_movement)
self.current_time_in_eighths = self.total_duration_in_eighths
def add_line_element(self, movement: int, duration: int) -> None:
"""
Add a continuation of counterpoint line.
:param movement:
shift (in scale degrees) from previous element to a new one
:param duration:
duration (in eighths) of a new element
:return:
None
"""
if self.current_time_in_eighths == self.total_duration_in_eighths:
raise RuntimeError("Attempt to add notes to a finished piece.")
if not self.check_validity(movement, duration):
raise ValueError(
"The suggested continuation is not valid. "
"It either breaks some rules or goes beyond ranges."
)
next_line_element = self.__find_next_element(movement, duration)
self.counterpoint.append(next_line_element)
self.__add_to_piano_roll(next_line_element)
self.__update_runtime_variables(movement, duration)
self.__finalize_if_needed()
def reset(self) -> None:
"""
Discard all changes made after initialization.
:return:
None
"""
self.counterpoint = self.counterpoint[0:1]
self.__initialize_piano_roll()
self.__set_defaults_to_runtime_variables()
@property
def piano_roll(self) -> np.ndarray:
"""Get piece representation as piano roll (without irrelevant rows)."""
reverted_roll = self._piano_roll[
self.lowest_row_to_show:self.highest_row_to_show + 1, :
]
roll = np.flip(reverted_roll, axis=0)
return roll
def render(self) -> None: # pragma: no cover
"""
Save piece as TSV, MIDI, and WAV files.
:return:
None
"""
top_level_dir = self.rendering_params['dir']
now = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S,%f")
nested_dir = os.path.join(top_level_dir, f"result_{now}")
os.mkdir(nested_dir)
midi_path = os.path.join(nested_dir, 'music.mid')
midi_params = self.rendering_params['midi']
measure = self.rendering_params['measure_in_seconds']
create_midi_from_piece(self, midi_path, measure, **midi_params)
events_path = os.path.join(nested_dir, 'sinethesizer_events.tsv')
events_params = self.rendering_params['sinethesizer']
create_events_from_piece(self, events_path, measure, **events_params)
wav_path = os.path.join(nested_dir, 'music.wav')
create_wav_from_events(events_path, wav_path)
lilypond_path = os.path.join(nested_dir, 'sheet_music.ly')
create_lilypond_file_from_piece(self, lilypond_path)
create_pdf_sheet_music_with_lilypond(lilypond_path) | /rl-musician-0.4.6.tar.gz/rl-musician-0.4.6/rlmusician/environment/piece.py | 0.795301 | 0.238861 | piece.py | pypi |
from math import ceil
from typing import Callable, Dict, List
from rlmusician.utils.music_theory import ScaleElement, check_consonance
N_EIGHTHS_PER_MEASURE = 8
# Rhythm rules.
def check_validity_of_rhythmic_pattern(durations: List[int], **kwargs) -> bool:
"""
Check that current measure is properly divided by notes.
:param durations:
durations (in eighths) of all notes from a current measure
(including a new note); if a new note prolongs to the next measure,
its full duration is included; however, if the first note starts
in the previous measure, only its duration within the current measure
is included
:return:
indicator whether a continuation is in accordance with the rule
"""
valid_patterns = [
[4, 4],
[4, 2, 2],
[4, 2, 1, 1],
[2, 2, 2, 2],
[2, 2, 2, 1, 1],
[2, 1, 1, 2, 2],
[4, 8],
[2, 2, 8],
[2, 1, 1, 8],
]
for valid_pattern in valid_patterns:
if valid_pattern[:len(durations)] == durations:
return True
return False
# Voice leading rules.
def check_stability_of_rearticulated_pitch(
counterpoint_continuation: 'LineElement',
movement: int,
**kwargs
) -> bool:
"""
Check that a pitch to be rearticulated (repeated) is stable.
:param counterpoint_continuation:
current continuation of counterpoint line
:param movement:
melodic interval (in scale degrees) for line continuation
:return:
indicator whether a continuation is in accordance with the rule
"""
if movement != 0:
return True
return counterpoint_continuation.scale_element.is_from_tonic_triad
def check_absence_of_stalled_pitches(
movement: int,
past_movements: List[int],
max_n_repetitions: int = 2,
**kwargs
) -> bool:
"""
Check that a pitch is not excessively repeated.
:param movement:
melodic interval (in scale degrees) for line continuation
:param past_movements:
list of past movements
:param max_n_repetitions:
maximum allowed number of repetitions in a row
:return:
indicator whether a continuation is in accordance with the rule
"""
if movement != 0:
return True
if len(past_movements) < max_n_repetitions - 1:
return True
changes = [x for x in past_movements[-max_n_repetitions+1:] if x != 0]
return len(changes) > 0
def check_absence_of_monotonous_long_motion(
counterpoint_continuation: 'LineElement',
current_motion_start_element: 'LineElement',
max_distance_in_semitones: int = 9,
**kwargs
) -> bool:
"""
Check that line does not move too far without any changes in direction.
:param counterpoint_continuation:
current continuation of counterpoint line
:param current_motion_start_element:
element of counterpoint line such that there are no
changes in direction after it
:param max_distance_in_semitones:
maximum allowed distance (in semitones)
:return:
indicator whether a continuation is in accordance with the rule
"""
current = counterpoint_continuation.scale_element.position_in_semitones
start = current_motion_start_element.scale_element.position_in_semitones
if abs(current - start) > max_distance_in_semitones:
return False
return True
def check_absence_of_skip_series(
movement: int,
past_movements: List[int],
max_n_skips: int = 2,
**kwargs
) -> bool:
"""
Check that there are no long series of skips.
:param movement:
melodic interval (in scale degrees) for line continuation
:param past_movements:
list of past movements
:param max_n_skips:
maximum allowed number of skips in a row
:return:
indicator whether a continuation is in accordance with the rule
"""
if abs(movement) <= 1:
return True
if len(past_movements) < max_n_skips:
return True
only_skips = all(abs(x) > 1 for x in past_movements[-max_n_skips:])
return not only_skips
def check_that_skip_is_followed_by_opposite_step_motion(
movement: int,
past_movements: List[int],
min_n_scale_degrees: int = 3,
**kwargs
) -> bool:
"""
Check that after a large skip there is a step motion in opposite direction.
:param movement:
melodic interval (in scale degrees) for line continuation
:param past_movements:
list of past movements
:param min_n_scale_degrees:
minimum size of a large enough skip (in scale degrees)
:return:
indicator whether a continuation is in accordance with the rule
"""
if len(past_movements) == 0:
return True
previous_movement = past_movements[-1]
if abs(previous_movement) < min_n_scale_degrees:
return True
return movement == -previous_movement / abs(previous_movement)
def check_resolution_of_submediant_and_leading_tone(
line: List['LineElement'],
movement: int,
**kwargs
) -> bool:
"""
Check that a sequence of submediant and leading tone properly resolves.
If a line has submediant followed by leading tone, tonic must be used
after leading tone, because there is strong attraction to it;
similarly, if a line has leading tone followed by submediant,
dominant must be used after submediant.
:param line:
counterpoint line in progress
:param movement:
melodic interval (in scale degrees) for line continuation
:return:
indicator whether a continuation is in accordance with the rule
"""
if len(line) < 2:
return True
elif line[-1].scale_element.degree == 6 and line[-2].scale_element.degree == 7:
return movement == -1
elif line[-1].scale_element.degree == 7 and line[-2].scale_element.degree == 6:
return movement == 1
return True
def check_step_motion_to_final_pitch(
counterpoint_continuation: 'LineElement',
counterpoint_end: ScaleElement,
piece_duration: int,
prohibit_rearticulation: bool = True,
**kwargs
) -> bool:
"""
Check that there is a way to reach final pitch with step motion.
:param counterpoint_continuation:
current continuation of counterpoint line
:param counterpoint_end:
element that ends counterpoint line
:param piece_duration:
total duration of piece (in eighths)
:param prohibit_rearticulation:
if it is set to `True`, the last but one pitch can not be the same as
the final pitch
:return:
indicator whether a continuation is in accordance with the rule
"""
degrees_to_end_note = abs(
counterpoint_continuation.scale_element.position_in_degrees
- counterpoint_end.position_in_degrees
)
eighths_left = (
(piece_duration - N_EIGHTHS_PER_MEASURE)
- counterpoint_continuation.end_time_in_eighths
)
quarters_left = ceil(eighths_left / 2)
if quarters_left == 0 and degrees_to_end_note == 0:
return not prohibit_rearticulation
return degrees_to_end_note <= quarters_left + 1
# Harmony rules.
def check_consonance_on_strong_beat(
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
**kwargs
) -> bool:
"""
Check that there is consonance if current beat is strong.
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:return:
indicator whether a continuation is in accordance with the rule
"""
if counterpoint_continuation.start_time_in_eighths % 4 != 0:
return True
return check_consonance(
counterpoint_continuation.scale_element,
cantus_firmus_elements[0].scale_element
)
def check_step_motion_to_dissonance(
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
movement: int,
**kwargs
) -> bool:
"""
Check that there is step motion to a dissonating element.
Note that this rule prohibits double neighboring tones.
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:param movement:
melodic interval (in scale degrees) for line continuation
:return:
indicator whether a continuation is in accordance with the rule
"""
ctp_scale_element = counterpoint_continuation.scale_element
cf_scale_element = cantus_firmus_elements[0].scale_element
if check_consonance(ctp_scale_element, cf_scale_element):
return True
return movement in [-1, 1]
def check_step_motion_from_dissonance(
movement: int,
is_last_element_consonant: bool,
**kwargs
) -> bool:
"""
Check that there is step motion from a dissonating element.
Note that this rule prohibits double neighboring tones.
:param movement:
melodic interval (in scale degrees) for line continuation
:param is_last_element_consonant:
indicator whether last element of counterpoint line (not including
a new continuation in question) forms consonance with cantus firmus
:return:
indicator whether a continuation is in accordance with the rule
"""
if is_last_element_consonant:
return True
return movement in [-1, 1]
def check_resolution_of_suspended_dissonance(
line: List['LineElement'],
movement: int,
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
is_last_element_consonant: bool,
**kwargs
) -> bool:
"""
Check that suspended dissonance is resolved by downward step motion.
:param line:
counterpoint line in progress
:param movement:
melodic interval (in scale degrees) for line continuation
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:param is_last_element_consonant:
indicator whether last element of counterpoint line (not including
a new continuation in question) forms consonance with cantus firmus
:return:
indicator whether a continuation is in accordance with the rule
"""
last_note_start = line[-1].start_time_in_eighths
last_note_end = line[-1].end_time_in_eighths
last_note_duration = last_note_end - last_note_start
if last_note_duration != N_EIGHTHS_PER_MEASURE:
return True
if is_last_element_consonant:
return True
if movement != -1:
return False
return check_consonance(
counterpoint_continuation.scale_element,
cantus_firmus_elements[-1].scale_element
)
def check_absence_of_large_intervals(
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
max_n_semitones: int = 16,
**kwargs
) -> bool:
"""
Check that there are no large intervals between adjacent pitches.
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:param max_n_semitones:
maximum allowed interval in semitones between two
simultaneously sounding pitches
:return:
indicator whether a continuation is in accordance with the rule
"""
cpt_pitch = counterpoint_continuation.scale_element.position_in_semitones
for cantus_firmus_element in cantus_firmus_elements:
cf_pitch = cantus_firmus_element.scale_element.position_in_semitones
if abs(cpt_pitch - cf_pitch) > max_n_semitones:
return False
return True
def check_absence_of_lines_crossing(
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
is_counterpoint_above: bool,
prohibit_unisons: bool = True,
**kwargs
) -> bool:
"""
Check that there are no lines crossings.
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:param is_counterpoint_above:
indicator whether counterpoint must be above cantus firmus
:param prohibit_unisons:
if it is set to `True`, unison are considered a special case of
lines crossing
:return:
indicator whether a continuation is in accordance with the rule
"""
initial_sign = 1 if is_counterpoint_above else -1
cpt_pitch = counterpoint_continuation.scale_element.position_in_semitones
for cantus_firmus_element in cantus_firmus_elements:
cf_pitch = cantus_firmus_element.scale_element.position_in_semitones
if prohibit_unisons and cpt_pitch == cf_pitch:
return False
elif initial_sign * (cpt_pitch - cf_pitch) < 0:
return False
return True
def check_absence_of_overlapping_motion(
counterpoint_continuation: 'LineElement',
previous_cantus_firmus_element: 'LineElement',
is_counterpoint_above: bool,
**kwargs
) -> bool:
"""
Check that there is no overlapping motion.
:param counterpoint_continuation:
current continuation of counterpoint line
:param previous_cantus_firmus_element:
the latest element of cantus firmus that sounds simultaneously
with the last counterpoint element (excluding its continuation)
:param is_counterpoint_above:
indicator whether counterpoint must be above cantus firmus
:return:
indicator whether a continuation is in accordance with the rule
"""
initial_sign = 1 if is_counterpoint_above else -1
cpt_pitch = counterpoint_continuation.scale_element.position_in_semitones
cf_pitch = previous_cantus_firmus_element.scale_element.position_in_semitones
return initial_sign * (cpt_pitch - cf_pitch) > 0
# Registry.
def get_rules_registry() -> Dict[str, Callable]:
"""
Get mapping from names to corresponding functions that check rules.
:return:
registry of functions checking rules of rhythm, voice leading,
and harmony
"""
registry = {
# Rhythm rules:
'rhythmic_pattern_validity': check_validity_of_rhythmic_pattern,
# Voice leading rules:
'rearticulation_stability': check_stability_of_rearticulated_pitch,
'absence_of_stalled_pitches': check_absence_of_stalled_pitches,
'absence_of_long_motion': check_absence_of_monotonous_long_motion,
'absence_of_skip_series': check_absence_of_skip_series,
'turn_after_skip': check_that_skip_is_followed_by_opposite_step_motion,
'VI_VII_resolution': check_resolution_of_submediant_and_leading_tone,
'step_motion_to_end': check_step_motion_to_final_pitch,
# Harmony rules:
'consonance_on_strong_beat': check_consonance_on_strong_beat,
'step_motion_to_dissonance': check_step_motion_to_dissonance,
'step_motion_from_dissonance': check_step_motion_from_dissonance,
'resolution_of_suspended_dissonance': check_resolution_of_suspended_dissonance,
'absence_of_large_intervals': check_absence_of_large_intervals,
'absence_of_lines_crossing': check_absence_of_lines_crossing,
'absence_of_overlapping_motion': check_absence_of_overlapping_motion,
}
return registry | /rl-musician-0.4.6.tar.gz/rl-musician-0.4.6/rlmusician/environment/rules.py | 0.942255 | 0.599016 | rules.py | pypi |
from collections import Counter
from typing import Any, Callable, Dict, Optional
import numpy as np
from scipy.stats import entropy
from rlmusician.environment.piece import Piece
from rlmusician.utils import rolling_aggregate
def evaluate_absence_of_looped_fragments(
piece: Piece, min_size: int = 4, max_size: Optional[int] = None
) -> float:
"""
Evaluate non-triviality of a piece based on absence of looped fragments.
:param piece:
`Piece` instance
:param min_size:
minimum duration of a fragment (in eighths)
:param max_size:
maximum duration of a fragment (in eighths)
:return:
multiplied by -1 number of looped fragments
"""
score = 0
max_size = max_size or piece.total_duration_in_eighths // 2
for size in range(min_size, max_size + 1):
max_position = piece.total_duration_in_eighths - 2 * size
penultimate_measure_end = piece.total_duration_in_eighths - 8
max_position = min(max_position, penultimate_measure_end - 1)
for position in range(0, max_position + 1):
fragment = piece.piano_roll[:, position:position+size]
next_fragment = piece.piano_roll[:, position+size:position+2*size]
if np.array_equal(fragment, next_fragment):
score -= 1
return score
def evaluate_entropy(piece: Piece) -> float:
"""
Evaluate non-triviality of counterpoint line based on entropy.
:param piece:
`Piece` instance
:return:
normalized average over all lines entropy of pitches distribution
"""
positions = [
x.scale_element.position_in_degrees
for x in piece.counterpoint
]
counter = Counter(positions)
lower_position = piece.lowest_element.position_in_degrees
upper_position = piece.highest_element.position_in_degrees
elements = piece.scale.elements[lower_position:upper_position + 1]
distribution = [
counter[element.position_in_degrees] / len(piece.counterpoint)
for element in elements
]
raw_score = entropy(distribution)
max_entropy_distribution = [1 / len(elements) for _ in elements]
denominator = entropy(max_entropy_distribution)
score = raw_score / denominator
return score
def evaluate_absence_of_narrow_ranges(
piece: Piece, min_size: int = 9,
penalties: Optional[Dict[int, float]] = None
) -> float:
"""
Evaluate melodic fluency based on absence of narrow ranges.
:param piece:
`Piece` instance
:param min_size:
minimum size of narrow range (in line elements)
:param penalties:
mapping from width of a range (in scale degrees) to penalty
applicable to ranges of not greater width
:return:
multiplied by -1 count of narrow ranges weighted based on their width
"""
penalties = penalties or {2: 1, 3: 0.5}
pitches = [x.scale_element.position_in_degrees for x in piece.counterpoint]
rolling_mins = rolling_aggregate(pitches, min, min_size)[min_size-1:]
rolling_maxs = rolling_aggregate(pitches, max, min_size)[min_size-1:]
borders = zip(rolling_mins, rolling_maxs)
score = 0
for lower_border, upper_border in borders:
range_width = upper_border - lower_border
curr_penalties = [v for k, v in penalties.items() if k >= range_width]
penalty = max(curr_penalties) if curr_penalties else 0
score -= penalty
return score
def evaluate_climax_explicity(
piece: Piece,
shortage_penalty: float = 0.3, duplication_penalty: float = 0.5
) -> float:
"""
Evaluate goal-orientedness of counterpoint line based on climax explicity.
:param piece:
`Piece` instance
:param shortage_penalty:
penalty for each scale degree between declared highest pitch of a line
and actual highest pitch of this line
:param duplication_penalty:
penalty for each non-first occurrence of line's highest pitch within
this line
:return:
one minus all applicable penalties
"""
max_position = piece.counterpoint[0].scale_element.position_in_degrees
n_duplications = 0
for line_element in piece.counterpoint[1:]:
current_position = line_element.scale_element.position_in_degrees
if current_position == max_position:
n_duplications += 1
elif current_position > max_position:
max_position = current_position
n_duplications = 0
declared_max_position = piece.highest_element.position_in_degrees
shortage = declared_max_position - max_position
shortage_term = shortage_penalty * shortage
duplication_term = duplication_penalty * n_duplications
score = 1 - shortage_term - duplication_term
return score
def evaluate_number_of_skips(
piece: Piece, rewards: Optional[Dict[int, float]] = None
) -> float:
"""
Evaluate interestingness/coherency of counterpoint based on skips number.
:param piece:
`Piece` instance
:param rewards:
mapping from number of skips to reward
:return:
reward assigned to balancing between interestingess and coherency
of counterpoint line
"""
rewards = rewards or {1: 0.8, 2: 0.9, 3: 1, 4: 0.9, 5: 0.5, 6: 0.25}
n_skips = 0
for movement in piece.past_movements:
if abs(movement) > 1:
n_skips += 1
score = rewards.get(n_skips, 0)
return score
def get_scoring_functions_registry() -> Dict[str, Callable]:
"""
Get mapping from names of scoring functions to scoring functions.
:return:
registry of scoring functions
"""
registry = {
'looped_fragments': evaluate_absence_of_looped_fragments,
'entropy': evaluate_entropy,
'narrow_ranges': evaluate_absence_of_narrow_ranges,
'climax_explicity': evaluate_climax_explicity,
'number_of_skips': evaluate_number_of_skips,
}
return registry
def evaluate(
piece: Piece,
scoring_coefs: Dict[str, float],
scoring_fn_params: Dict[str, Dict[str, Any]],
verbose: bool = False
) -> float:
"""
Evaluate piece.
:param piece:
`Piece` instance
:param scoring_coefs:
mapping from scoring function names to their weights in final score
:param scoring_fn_params:
mapping from scoring function names to their parameters
:param verbose:
if it is set to `True`, scores are printed with detailing by functions
:return:
weighted sum of scores returned by various scoring functions
"""
score = 0
registry = get_scoring_functions_registry()
for fn_name, weight in scoring_coefs.items():
fn = registry[fn_name]
fn_params = scoring_fn_params.get(fn_name, {})
curr_score = weight * fn(piece, **fn_params)
if verbose:
print(f'{fn_name:>30}: {curr_score}') # pragma: no cover
score += curr_score
return score | /rl-musician-0.4.6.tar.gz/rl-musician-0.4.6/rlmusician/environment/evaluation.py | 0.940422 | 0.609873 | evaluation.py | pypi |
from typing import Any, Dict, List, Tuple
import gym
import numpy as np
from rlmusician.environment.piece import Piece
from rlmusician.environment.evaluation import evaluate
from rlmusician.utils import convert_to_base
class CounterpointEnv(gym.Env):
"""
An environment where counterpoint line is composed given cantus firmus.
"""
reward_range = (-np.inf, np.inf)
valid_durations = [1, 2, 4, 8]
def __init__(
self,
piece: Piece,
scoring_coefs: Dict[str, float],
scoring_fn_params: Dict[str, Dict[str, Any]],
reward_for_dead_end: float,
verbose: bool = False
):
"""
Initialize instance.
:param piece:
data structure representing musical piece with florid counterpoint
:param scoring_coefs:
mapping from scoring function names to their weights in final score
:param scoring_fn_params:
mapping from scoring function names to their parameters
:param reward_for_dead_end:
reward for situations where there aren't any allowed actions, but
piece is not finished
:param verbose:
if it is set to `True`, breakdown of reward is printed at episode
end
"""
self.piece = piece
self.scoring_coefs = scoring_coefs
self.scoring_fn_params = scoring_fn_params
self.reward_for_dead_end = reward_for_dead_end
self.verbose = verbose
n_actions = len(piece.all_movements) * len(self.valid_durations)
self.action_space = gym.spaces.Discrete(n_actions)
self.action_to_line_continuation = None
self.__set_action_to_line_continuation()
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=self.piece.piano_roll.shape,
dtype=np.int32
)
def __set_action_to_line_continuation(self) -> None:
"""Create mapping from action to a pair of movement and duration."""
base = len(self.piece.all_movements)
required_len = 2
raw_mapping = {
action: (x for x in convert_to_base(action, base, required_len))
for action in range(self.action_space.n)
}
offset = self.piece.max_skip
action_to_continuation = {
action: (movement_id - offset, 2 ** duration_id)
for action, (duration_id, movement_id) in raw_mapping.items()
}
self.action_to_line_continuation = action_to_continuation
@property
def valid_actions(self) -> List[int]:
"""Get actions that are valid at the current step."""
valid_actions = [
i for i in range(self.action_space.n)
if self.piece.check_validity(*self.action_to_line_continuation[i])
]
return valid_actions
def step(self, action: int) -> Tuple[np.ndarray, float, bool, Dict]:
"""
Run one step of the environment's dynamics.
:param action:
an action provided by an agent to the environment
:return:
a tuple of:
- observation: agent's observation of the current environment,
- reward: amount of reward returned after previous action,
- done: whether the episode has ended, in which case further
`step()` calls will return undefined results,
- info: list of next allowed actions
"""
movement, duration = self.action_to_line_continuation[action]
self.piece.add_line_element(movement, duration)
observation = self.piece.piano_roll
info = {'next_actions': self.valid_actions}
past_duration = self.piece.current_time_in_eighths
piece_duration = self.piece.total_duration_in_eighths
finished = past_duration == piece_duration
no_more_actions = len(info['next_actions']) == 0
done = finished or no_more_actions
if finished:
reward = evaluate(
self.piece,
self.scoring_coefs,
self.scoring_fn_params,
self.verbose
)
elif no_more_actions:
reward = self.reward_for_dead_end
else:
reward = 0
return observation, reward, done, info
def reset(self) -> np.ndarray:
"""
Reset the state of the environment and return an initial observation.
:return:
initial observation
"""
self.piece.reset()
initial_observation = self.piece.piano_roll
return initial_observation
def render(self, mode='human'): # pragma: no cover.
"""
Save piece in various formats.
:return:
None
"""
self.piece.render() | /rl-musician-0.4.6.tar.gz/rl-musician-0.4.6/rlmusician/environment/environment.py | 0.94156 | 0.552841 | environment.py | pypi |
# %% auto 0
__all__ = ['constant_velocity_generator', 'mfpt_rw', 'mfpt_informed_rw', 'rw_generator', 'exp_time_generator', 'Biexp',
'biexp_time_generator', 'constant_velocity_generator_2D', 'mfpt_rw_2D', 'mfpt_informed_rw_2D']
# %% ../nbs/lib_nbs/05_mfpt.ipynb 3
import numpy as np
from tqdm.notebook import tqdm, trange
from tqdm import tqdm as tqdm_term
from typing import Callable
from .analytics import pdf_powerlaw, pdf_discrete_sample
# %% ../nbs/lib_nbs/05_mfpt.ipynb 4
# This package is not included in the requirements and should be installed by hand
from joblib import Parallel, delayed
# %% ../nbs/lib_nbs/05_mfpt.ipynb 7
def constant_velocity_generator(N, # Number of trajectories
T, # Length of trajectories
time_sampler, # Sampler of time of constant velocity
velocity = 1, # Velocity
**sample_args # Optional arguments for the sampler
):
''' Given a sampler for length of time steps, generates a trajectory considering a constant
velocity in the sampled times. After each time step, we sample a new direction. '''
trajs = np.zeros((N, T))
for idxN in (range(N)):
times = time_sampler(num_samples = T+1, **sample_args).astype(int)
max_t = np.argwhere(np.cumsum(times) > T+1).flatten()
if len(max_t) > 0:
max_t = max_t[0]
else: max_t = T+1
traj = np.array([0])
cumsum = 0
for time in times[:max_t+1]:
segment = (2*np.random.randint(0, 2)-1)*np.arange(1, min(time+1, T+1))+traj[-1]
traj = np.append(traj, segment)
cumsum += len(segment)
if cumsum > T:
break
trajs[idxN] = traj[:T]
return trajs
# %% ../nbs/lib_nbs/05_mfpt.ipynb 12
from typing import Callable
def mfpt_rw(N:int, # Number of trajectories
T:int, # Lengths of trajectories
x0:float, # Starting point of walk
Ls:list, # Boundary position
traj_generator:Callable, # Function generating the walk (must start in x0 = zero)
max_loop = 5, # Max number of while loop if some walks do not reach the boundary
save = None,
**args_generator # Arguments of the trajectory generator
): # MFPT vs. L
''' Calculates MFPT to boundaries at 0 and L starting in x0 '''
trajs = traj_generator(N,T,**args_generator) + x0
if save is not None:
statistics_fpt = np.zeros((len(Ls), N))
mfpt = []
for idxL, L in tqdm_term(enumerate(Ls)):
first_passage = np.argmax((trajs < 0) | (trajs > L), axis = 1)
# Enlarging the time of those who did not hit the boundaries
n_loop = 0
non_passed = np.argwhere(first_passage == 0).flatten()
last_non_passed = trajs[non_passed, -1]
while len(non_passed) > 0 and n_loop < max_loop:
trajs_upd = traj_generator(len(non_passed),T,**args_generator) + last_non_passed[..., np.newaxis]
# Finding the FPT of the new trajectories
first_upd = np.argmax((trajs_upd < 0) | (trajs_upd > L), axis = 1)
# Adding these to the main FPT array, considering that loop number
first_passage[non_passed] = (first_upd + (n_loop+1)*T)*first_upd.astype(bool)
# Checking the ones that still did not pass
non_passed = np.argwhere(first_passage == 0).flatten()
# updating the last element of the non passed trajectories
upd_non_passed = np.argwhere(first_upd == 0).flatten()
if len(non_passed) != len(upd_non_passed): print('ERROR')
last_non_passed = trajs_upd[upd_non_passed, -1]
# updating loop num
n_loop += 1
if save is not None:
statistics_fpt[idxL] = first_passage.copy()
# Calculating mean considering that some may not have passed
first_passage = first_passage[first_passage != 0].mean()
# Adding this MFPT for current L
mfpt.append(first_passage)
if save is not None:
np.save(save+'.npy', mfpt)
np.save(save+'_stats.npy', statistics_fpt)
return mfpt
# %% ../nbs/lib_nbs/05_mfpt.ipynb 14
def mfpt_informed_rw(N:int, # Number of trajectories
T:int, # Lengths of trajectories
x0:float, # Starting point of walk
Ls:list, # Boundary position
traj_generator:Callable, # Function generating the walk (must start in x0 = zero)
max_loop = 5, # Max number of while loop if some walks do not reach the boundary
time_sampler = None,
save = None,
**args_generator # Optional
): # MFPT vs. L
''' Calculates MFPT to boundaries at 0 and L starting in x0 with a traj generator informed on the scales of the sysmte (x0 and L)'''
if save is not None:
statistics_fpt = np.zeros((len(Ls), N))
mfpt = []
for idxL, L in tqdm_term(enumerate(Ls)):
trajs = traj_generator(N, T, time_sampler = time_sampler, x0 = x0, L = L, informed = True)
first_passage = np.argmax((trajs < 0) | (trajs > L), axis = 1)
# Enlarging the time of those who did not hit the boundaries
n_loop = 0
non_passed = np.argwhere(first_passage == 0).flatten()
while len(non_passed) > 0 and n_loop < max_loop:
last_non_passed = trajs[non_passed, -1]
trajs_upd = traj_generator(len(non_passed),T,
time_sampler = time_sampler, x0 = x0, L = L,
informed = True) + last_non_passed[..., np.newaxis]
# updating trajectories with new positions. This makes that last_non_passed is always correct
trajs[non_passed, -1] = trajs_upd[:, -1]
# Finding the FPT of the new trajectories
first_upd = np.argmax((trajs_upd < 0) | (trajs_upd > L), axis = 1)
# Adding these to the main FPT array, considering that loop number
first_passage[non_passed] = (first_upd + (n_loop+1)*T)*first_upd.astype(bool)
# Checking the ones that still did not pass
non_passed = np.argwhere(first_passage == 0).flatten()
# updating loop num
n_loop += 1
if save is not None:
statistics_fpt[idxL] = first_passage.copy()
# Calculating mean considering that some may not have passed
first_passage = first_passage[first_passage != 0].mean()
# Adding this MFPT for current L
mfpt.append(first_passage)
if save is not None:
np.save(save+'.npy', mfpt)
np.save(save+'_stats.npy', statistics_fpt)
return mfpt
# %% ../nbs/lib_nbs/05_mfpt.ipynb 17
def rw_generator(N, T):
trajs = 2*np.random.randint(0, 2, size = (N,T))-1
trajs[:,0] = 0
trajs = np.cumsum(trajs, axis = 1)
return trajs
# %% ../nbs/lib_nbs/05_mfpt.ipynb 21
def exp_time_generator(num_samples):
return np.random.exponential(size = num_samples)+1
# %% ../nbs/lib_nbs/05_mfpt.ipynb 25
from scipy.stats.sampling import NumericalInverseHermite
class Biexp():
def __init__(self, informed = False, **args):
if not informed:
self.w1 = args['w1']
self.d1 = args['d1']
self.d2 = args['d2']
else:
self.w1 = 2*args['x0']/args['L']
self.d1 = 3*args['L']
self.d2 = args['x0']
def pdf(self, x):
return (self.w1/self.d1)*np.exp(-x/self.d1) + ((1 - self.w1)/ self.d2)*np.exp(-x/self.d2)
def cdf(self, x):
return 1 + (self.w1-1)*np.exp(-x/self.d2) - self.w1*np.exp(-x/self.d1)
def dpdf(self, x):
return (self.w1/self.d1**2)*np.exp(-x/self.d1) + ((1 - self.w1)/ self.d2**2)*np.exp(-x/self.d2)
def sample(self, num_rn):
return NumericalInverseHermite(self, domain=(0, np.inf)).rvs(int(num_rn))
# %% ../nbs/lib_nbs/05_mfpt.ipynb 26
def biexp_time_generator(num_samples, **sample_kwargs):
sampler = Biexp(**sample_kwargs)
return sampler.sample(num_samples)+1
# %% ../nbs/lib_nbs/05_mfpt.ipynb 62
def constant_velocity_generator_2D(N, # Number of trajectories
T, # Length of trajectories
time_sampler, # Sampler of time of constant velocity
velocity = 1, # Velocity
**sample_args # Optional arguments for the sampler
):
''' Given a sampler for length of time steps, generates a 2D trajectory considering a constant
velocity in the sampled times. After each time step, we sample a new direction. '''
trajs_x = np.zeros((N, T))
trajs_y = np.zeros((N, T))
for idxN in (range(N)):
times = time_sampler(num_samples = T+1, **sample_args).astype(int)
# If we have too many times (e.g. because we sampled big times),
# we cut the times vector with max_t
max_t = np.argwhere(np.cumsum(times) > T+1).flatten()
if len(max_t) > 0:
max_t = max_t[0]
else: max_t = T+1
traj_x = np.array([0])
traj_y = np.array([0])
cumsum = 0
for time in times[:max_t+1]:
angle = np.random.rand()*2*np.pi
segment_x = np.cos(angle)*np.arange(1, min(time+1, T+1))+traj_x[-1]
segment_y = np.sin(angle)*np.arange(1, min(time+1, T+1))+traj_y[-1]
traj_x = np.append(traj_x, segment_x)
traj_y = np.append(traj_y, segment_y)
cumsum += len(segment_x)
if cumsum > T:
break
trajs_x[idxN] = traj_x[:T]
trajs_y[idxN] = traj_y[:T]
return trajs_x, trajs_y
# %% ../nbs/lib_nbs/05_mfpt.ipynb 66
from typing import Callable
def mfpt_rw_2D(N:int, # Number of trajectories
T:int, # Lengths of trajectories
x0:list, # Starting point of walk (in 2d)
Ls:list, # Boundary position
traj_generator:Callable, # Function generating the walk (must start in x0 = zero)
max_loop = 5, # Max number of while loop if some walks do not reach the boundary
**args_generator # Arguments of the trajectory generator
): # MFPT vs. L
''' Calculates MFPT to boundaries at 0 and L starting in x0 in 2D'''
# Debug
# N = 5; T = 50; x0 = [3,3]; Ls = [99]; max_loop = 40
trajs_x, trajs_y = traj_generator(N, T, **args_generator)
trajs_x += x0[0]
trajs_y += x0[1]
mfpt = []
for L in tqdm_term(Ls):
first_passage_x = np.argmax((trajs_x < -L**2) | (trajs_x > L), axis = 1).astype(float)
first_passage_y = np.argmax((trajs_y < -L**2) | (trajs_y > L), axis = 1).astype(float)
# Needed transformation to correctly find the minimum considering the presence of zeros
first_passage_x[first_passage_x == 0] = np.nan
first_passage_y[first_passage_y == 0] = np.nan
first_passage = np.nanmin(np.vstack((first_passage_x, first_passage_y)), axis = 0)
first_passage[np.isnan(first_passage)] = 0
# Enlarging the time of those who did not hit the boundaries
n_loop = 0
non_passed = np.argwhere(first_passage == 0).flatten()
last_non_passed = (trajs_x[non_passed, -1], trajs_x[non_passed, -1])
while len(non_passed) > 0 and n_loop < max_loop:
# print('inside')
trajs_upd_x = rw_generator(len(non_passed),T) + last_non_passed[0][..., np.newaxis]
trajs_upd_y = rw_generator(len(non_passed),T) + last_non_passed[1][..., np.newaxis]
# Finding the FPT of the new trajectories
first_upd_x = np.argmax((trajs_upd_x < -L**2) | (trajs_upd_x > L), axis = 1).astype(float)
first_upd_y = np.argmax((trajs_upd_y < -L**2) | (trajs_upd_y > L), axis = 1).astype(float)
# Needed transformation to correctly find the minimum considering the presence of zeros
first_upd_x[first_upd_x == 0] = np.nan
first_upd_y[first_upd_y == 0] = np.nan
first_upd = np.nanmin(np.vstack((first_upd_x, first_upd_y)), axis = 0)
first_upd[np.isnan(first_upd)] = 0
# Adding these to the main FPT array, considering that loop number
first_passage[non_passed] = (first_upd + (n_loop+1)*T)*first_upd.astype(bool)
# Checking the ones that still did not pass
non_passed = np.argwhere(first_passage == 0).flatten()
# updating the last element of the non passed trajectories
upd_non_passed = np.argwhere(first_upd == 0).flatten()
if len(non_passed) != len(upd_non_passed): print('ERROR')
last_non_passed = (trajs_upd_x[upd_non_passed, -1], trajs_upd_y[upd_non_passed, -1])
n_loop += 1
# Calculating mean considering that some may not have passed
first_passage = first_passage[first_passage != 0].mean()
if len(non_passed) > 0: print(round(len(non_passed)/len(non_passed),2))
# Adding this MFPT for current L
mfpt.append(first_passage)
return mfpt
# %% ../nbs/lib_nbs/05_mfpt.ipynb 80
from typing import Callable
def mfpt_informed_rw_2D(N:int, # Number of trajectories
T:int, # Lengths of trajectories
x0:list, # Starting point of walk (in 2d)
Ls:list, # Boundary position
traj_generator:Callable, # Function generating the walk (must start in x0 = zero)
max_loop = 5, # Max number of while loop if some walks do not reach the boundary
**args_generator # Arguments of the trajectory generator
): # MFPT vs. L
''' Calculates MFPT to boundaries at 0 and L starting in x0 in 2D'''
# Debug
# N = 5; T = 50; x0 = [3,3]; Ls = [99]; max_loop = 40
mfpt = []
for L in tqdm_term(Ls):
trajs_x, trajs_y = traj_generator(N, T, time_sampler = time_sampler, x0 = x0, L = L, informed = True)
trajs_x += x0[0]
trajs_y += x0[1]
first_passage_x = np.argmax((trajs_x < 0) | (trajs_x > L), axis = 1).astype(float)
first_passage_y = np.argmax((trajs_y < 0) | (trajs_y > L), axis = 1).astype(float)
# Needed transformation to correctly find the minimum considering the presence of zeros
first_passage_x[first_passage_x == 0] = np.nan
first_passage_y[first_passage_y == 0] = np.nan
first_passage = np.nanmin(np.vstack((first_passage_x, first_passage_y)), axis = 0)
first_passage[np.isnan(first_passage)] = 0
# Enlarging the time of those who did not hit the boundaries
n_loop = 0
non_passed = np.argwhere(first_passage == 0).flatten()
last_non_passed = (trajs_x[non_passed, -1], trajs_x[non_passed, -1])
while len(non_passed) > 0 and n_loop < max_loop:
# print('inside')
trajs_upd_x = rw_generator(len(non_passed),T) + last_non_passed[0][..., np.newaxis]
trajs_upd_y = rw_generator(len(non_passed),T) + last_non_passed[1][..., np.newaxis]
# Finding the FPT of the new trajectories
first_upd_x = np.argmax((trajs_upd_x < 0) | (trajs_upd_x > L), axis = 1).astype(float)
first_upd_y = np.argmax((trajs_upd_y < 0) | (trajs_upd_y > L), axis = 1).astype(float)
# Needed transformation to correctly find the minimum considering the presence of zeros
first_upd_x[first_upd_x == 0] = np.nan
first_upd_y[first_upd_y == 0] = np.nan
first_upd = np.nanmin(np.vstack((first_upd_x, first_upd_y)), axis = 0)
first_upd[np.isnan(first_upd)] = 0
# Adding these to the main FPT array, considering that loop number
first_passage[non_passed] = (first_upd + (n_loop+1)*T)*first_upd.astype(bool)
# Checking the ones that still did not pass
non_passed = np.argwhere(first_passage == 0).flatten()
# updating the last element of the non passed trajectories
upd_non_passed = np.argwhere(first_upd == 0).flatten()
if len(non_passed) != len(upd_non_passed): print('ERROR')
last_non_passed = (trajs_upd_x[upd_non_passed, -1], trajs_upd_y[upd_non_passed, -1])
n_loop += 1
# Calculating mean considering that some may not have passed
first_passage = first_passage[first_passage != 0].mean()
# Adding this MFPT for current L
mfpt.append(first_passage)
return mfpt | /rl_opts-0.0.1.tar.gz/rl_opts-0.0.1/rl_opts/mfpt.py | 0.707809 | 0.471832 | mfpt.py | pypi |
# %% auto 0
__all__ = ['learning', 'walk_from_policy', 'agent_efficiency', 'average_search_efficiency']
# %% ../nbs/lib_nbs/02_learning_and_benchmark.ipynb 3
import numpy as np
import pathlib
from .rl_framework import TargetEnv, Forager
from .utils import get_encounters
# %% ../nbs/lib_nbs/02_learning_and_benchmark.ipynb 5
def learning(config, results_path, run):
"""
Training of the RL agent
Parameters
----------
config : dict
Dictionary with all the parameters
results_path : str
Path to save the results
run : int
Agent identifier
"""
#Simulation parameters
TIME_EP = config['MAX_STEP_L'] #time steps per episode
EPISODES = config['NUM_EPISODES'] #number of episodes
#initialize environment
env = TargetEnv(Nt=config['NUM_TARGETS'], L=config['WORLD_SIZE'], r=config['r'], lc=config['lc'])
#initialize agent
STATE_SPACE = [np.linspace(0, config['MAX_STEP_L']-1, config['NUM_BINS']), np.arange(1), np.arange(1)]
NUM_STATES = np.prod([len(i) for i in STATE_SPACE])
#default initialization policy
if config['PI_INIT'] == 0.5:
INITIAL_DISTR = None
#change initialization policy
elif config['PI_INIT'] == 0.99:
INITIAL_DISTR = []
for percept in range(NUM_STATES):
INITIAL_DISTR.append([0.99, 0.01])
agent = Forager(num_actions=config['NUM_ACTIONS'],
state_space=STATE_SPACE,
gamma_damping=config['GAMMA'],
eta_glow_damping=config['ETA_GLOW'],
initial_prob_distr=INITIAL_DISTR)
for e in range(EPISODES):
#initialize environment and agent's counter and g matrix
env.init_env()
agent.agent_state = 0
agent.reset_g()
for t in range(TIME_EP):
#step to set counter to its min value n=1
if t == 0 or env.kicked[0]:
#do one step with random direction (no learning in this step)
env.update_pos(1)
#check boundary conditions
env.check_bc()
#reset counter
agent.agent_state = 0
#set kicked value to false again
env.kicked[0] = 0
else:
#get perception
state = agent.get_state()
#decide
action = agent.deliberate(state)
#act (update counter)
agent.act(action)
#update positions
env.update_pos(action)
#check if target was found + kick if it is
reward = env.check_encounter()
#check boundary conditions
env.check_bc()
#learn
agent.learn(reward)
if (e+1)%500 == 0:
#save h matrix of the agent at this stage of the learning process
np.save(results_path+'memory_agent_'+str(run)+'_episode_'+str(e+1)+'.npy', agent.h_matrix)
# %% ../nbs/lib_nbs/02_learning_and_benchmark.ipynb 7
def walk_from_policy(policy, time_ep, n, L, Nt, r, lc, destructive=False, with_bound=False, bound=100):
"""
Walk of foragers given a policy. Performance is evaluated as the number of targets found in a fixed time time_ep.
Parameters
----------
policy : list
Starting from counter=1, prob of continuing for each counter value.
time_ep : int
Number of steps (decisions).
n : int
Number of agents that walk in parallel (all with the same policy, they do not interact). This is "number of walks" in the paper.
L : int
World size.
Nt : int
Number of targets.
r : float
Target radius.
lc : float
Cutoff length. Agent is displaced a distance lc from the target when it finds it.
destructive : bool, optional
True if targets are destructive. The default is False.
with_bound : bool, optional
True if policy is cut. The default is False.
bound : int, optional
Bound of the policy (maximum value for the counter). The default is 20.
Returns
-------
reward : list, len(rewards)=n
Number of targets found by each agent in time_ep steps of d=1.
"""
#initialize agents clocks, positions and directions, as well as targets in the env.
pos = np.zeros((time_ep, n, 2))
pos[0] = np.random.rand(n,2)*L
current_pos = np.random.rand(n,2)*L
direction = np.random.rand(n)*2*np.pi
internal_counter = [0]*n
target_positions = np.random.rand(Nt,2) * L
reward = [0]*n
#cut policy
if with_bound:
policy[bound:] = [0] * (len(policy)-bound)
for t in range(1, time_ep):
#update position
previous_pos = np.copy(current_pos)
current_pos[:,0] = previous_pos[:, 0] + np.cos(direction)
current_pos[:,1] = previous_pos[:, 1] + np.sin(direction)
#check reward
encounters = get_encounters(previous_pos, current_pos, target_positions, L, r)
for ag, num_encounters in enumerate(np.sum(encounters,axis=0)):
kick = False
if num_encounters > 0:
first_encounter = np.arange(len(target_positions))[encounters[:,ag]]
if destructive:
#target is destroyed, sample position for a new target.
target_positions[first_encounter] = np.random.rand(2) * L
else:
#----KICK----
# If there was encounter, we reset direction and change position of particle to (pos target + lc)
kick_direction = np.random.rand()*2*np.pi
current_pos[ag, 0] = target_positions[first_encounter, 0] + lc*np.cos(kick_direction)
current_pos[ag, 1] = target_positions[first_encounter, 1] + lc*np.sin(kick_direction)
#------------
internal_counter[ag] = 0
reward[ag] += 1
kick = True
current_pos[ag] %= L
if np.random.rand() > policy[internal_counter[ag]] or kick:
internal_counter[ag] = 0
direction[ag] = np.random.rand()*2*np.pi
else:
internal_counter[ag] += 1
return reward
# %% ../nbs/lib_nbs/02_learning_and_benchmark.ipynb 9
from .utils import get_config, get_policy
def agent_efficiency(results_path, config, run, num_walks, episode_interval):
"""
Computes the agent's average search efficiency over a number of walks where the agent follows a fixed policy.
This is repeated with the policies at different stages of the training to analyze the evolution of its performance.
Parameters
----------
results_path : str
Path to the results folder, from which to extract the agent's policies
config : dict
Dictionary with all the parameters. It needs to be the same configuration file as the one used to train the agent.
run : int
Id of the agent
num_walks : int
Number of (independent) walks
episode_interval : int
Every 'episode_interval' training episodes, the policy of the agent is taken and its performance is analyzed.
"""
print('Statistics postlearning of agent', run, '\nData obtained from folder: ', results_path)
for training_episode in [i for i in range(0, config['NUM_EPISODES'] + 1, episode_interval)]:
if training_episode == 0 and config['PI_INIT'] == 0.99:
frozen_policy = [0.99 for percept in range(config['MAX_STEP_L'])] #initial policy
elif training_episode == 0 and config['PI_INIT'] == 0.5:
frozen_policy = [0.5 for percept in range(config['MAX_STEP_L'])] #initial policy
else:
#get policy from the stored h matrix at the given training_episode
frozen_policy = get_policy(results_path, run, training_episode)
#run the 10^4 walks (in parallel) with the same policy
rewards = walk_from_policy(policy=frozen_policy,
time_ep=config['MAX_STEP_L'],
n=num_walks,
L=config['WORLD_SIZE'],
Nt=config['NUM_TARGETS'],
r=config['r'],
lc=config['lc'])
#save results
np.save(results_path+'performance_post_training_agent_'+str(run)+'_episode_'+str(training_episode)+'.npy', rewards)
# %% ../nbs/lib_nbs/02_learning_and_benchmark.ipynb 12
import pathlib
from ray import tune
from ray.tune.search.bayesopt import BayesOptSearch
from ray.tune.search import ConcurrencyLimiter
from .analytics import get_policy_from_dist, pdf_powerlaw, pdf_multimode
# %% ../nbs/lib_nbs/02_learning_and_benchmark.ipynb 13
def average_search_efficiency(config):
"""
Get the average search efficiency, considering the benchmark model defined in config.
Parameters
----------
config : dict
Dictionary with the configuration of the benchmark model.
"""
#get parameters of the distributions depending on the chosen model
if config['model'] == 'powerlaw':
parameters = [config['beta']]
#get policy from benchmark model
policy = get_policy_from_dist(n_max = config['time_ep'],
func = pdf_powerlaw,
beta = config['beta']
)
elif config['model'] == 'double_exp':
parameters = [config['d_int'], config['d_ext'], config['p']]
#get policy from benchmark model
policy = get_policy_from_dist(n_max=config['time_ep'],
func = pdf_multimode,
lambdas = np.array(parameters[:2]),
probs = np.array([parameters[2], 1-parameters[2]])
)
#run the walks in parallel
efficiencies = walk_from_policy(policy=policy,
time_ep=config['time_ep'],
n=config['n'],
L=config['L'],
Nt=config['Nt'],
r=config['r'],
lc=config['lc'])
#get the mean search efficiency over the walks
mean_eff = np.mean(efficiencies)
tune.report(mean_eff = mean_eff)
#save results
if config['results_path']:
np.save(config['results_path']+'efficiencies_'+ str([np.round(p, 10) for p in parameters])+'.npy', efficiencies) | /rl_opts-0.0.1.tar.gz/rl_opts-0.0.1/rl_opts/learn_and_bench.py | 0.549641 | 0.357147 | learn_and_bench.py | pypi |
# %% auto 0
__all__ = ['pdf_multimode', 'pdf_powerlaw', 'pdf_discrete_sample', 'get_policy_from_dist']
# %% ../nbs/lib_nbs/03_analytics.ipynb 2
import numpy as np
# %% ../nbs/lib_nbs/03_analytics.ipynb 5
def pdf_multimode(L: int, # Either int or array for which pdf is calculated
lambdas: list, # Scales of each modes
probs: list # Probability weight of each mode
)-> np.array: # Array with probability of each L
''' Computes the discrete PDF of multi-mode exponential of the form
$$
\Pr(L) = \sum_{i=1,2} \omega_i (1-e^{-1/\lambda_i}) e^{-(L-1)/\lambda_i} \, ,
$$
where $\omega$ is the probability of each mode and $\lambda$ it's scale.
'''
if isinstance(L, int):
return np.sum((probs)*(np.exp(1/lambdas)-1)*np.exp(-L/lambdas))
else:
return [np.sum((probs)*(np.exp(1/lambdas)-1)*np.exp(-l/lambdas)) for l in L]
# %% ../nbs/lib_nbs/03_analytics.ipynb 7
from scipy.special import zeta
def pdf_powerlaw(L:float, # Either int or array for which pdf is calculated
beta: float = 1, # Exponent of the power law
)-> np.array : # Array with probability of each L
''' Computes the discrete PDF of a powerlaw of the form
$$
\Pr(L)\sim L^{-1-\mathrm{beta}}.
$$
'''
if not isinstance(L, int):
L = L.astype(float)
return (1/zeta(beta+1, q = 1))*L**(-1-beta)
# %% ../nbs/lib_nbs/03_analytics.ipynb 9
def pdf_discrete_sample(pdf_func: object, # Function generating the pdf
num_samples: int, # Number of samples to create
**args_func # Arguments of the generating funcion
)-> np.array: # Samples
''' Samples discrete values from a given PDF'''
P_L = pdf_func(**args_func)
# Normalization check
P_L = P_L/np.sum(P_L)
return np.random.choice(np.arange(1, len(P_L)+1), p = P_L, size = num_samples)
# %% ../nbs/lib_nbs/03_analytics.ipynb 12
def get_policy_from_dist(n_max, # Maximum counter n_max for which the policy is calculated
func, # Function generating the pdf
renorm = True, # If true, we check whether the distribution has a boundary N, for which \sum_n=N^\infty Pr(L=nd) = 0
**args_func # Arguments of the generating funcion (should have L_max as input parameter)
)-> np.array : # Policy at each counter value
''' Given a PDF of step lengths, calculates the corresponding policy'''
policy = []
if renorm:
bound = np.copy(n_max + 1)
for l in range(2, n_max):
dist = func(L = np.arange(1, l, dtype=float), **args_func)
if np.sum(dist) >= 1:
bound = l-1
break
for length in range(1, n_max+1):
if length >= bound:
policy.append(0)
else:
policy.append(1 - func(L = length, **args_func) / (1-np.sum(func(L = np.arange(1, length, dtype=float), **args_func))))
else:
for length in range(1, n_max+1):
policy.append(1 - func(L = length, **args_func) / (1-np.sum(func(L = np.arange(1, length, dtype=float), **args_func))))
return policy | /rl_opts-0.0.1.tar.gz/rl_opts-0.0.1/rl_opts/analytics.py | 0.746046 | 0.811303 | analytics.py | pypi |
# %% auto 0
__all__ = ['PS_imitation']
# %% ../nbs/lib_nbs/04_imitation_learning.ipynb 2
import numpy as np
# %% ../nbs/lib_nbs/04_imitation_learning.ipynb 4
class PS_imitation():
def __init__(self,
num_states: int, # Number of states
eta: float, # Glow parameter of PS
gamma: float # Damping parameter of PS
):
'''Constructs a PS agent with two actions (continue and rotate) that performs imitation learning
in the search scenario. Instead of following a full trajectory of action-state tuples, the agent
is directly given the reward state (the step length in this case). The agent updates all previous
continue actions and the current rotate action.
'''
self.num_states = num_states
self.eta = eta
self.gamma_damping = gamma
# h-matrix
self.h_matrix = np.ones((2, self.num_states)).astype(float)
# initiate glow matrix
self.reset()
def reset(self):
'''Resets the glow matrix'''
self.g_matrix = np.zeros((2, self.num_states)).astype(float)
def update(self,
length: int, # Step length rewarded
reward: int = 1 # Value of the reward
):
'''
Updates the policy based on the imitation scheme (see paper for detailes)
NOTE: state is length-1 because counter starts in 0
(but in 0, agent has already performed a step of length 1 -- from the previous action "rotate").
'''
factor = 1 - self.eta
# ---- Updating the CONTINUE part of g-matrix ----
# Damping before adding up the traversed edges.
self.g_matrix[0, :length-1] *= (factor**np.arange(1,length))
# Set to one all previous states (adding up the traversed edges)
self.g_matrix[0, :length-1] += 1
# Multiply by eta**x all previous states
self.g_matrix[0, :length-1] *= (factor**np.arange(1,length))[::-1]
# Multiply the rest of the matrix by number of steps don
self.g_matrix[0, length-1:] *= factor**length
# ---- Updating the TURN part of g-matrix ----
self.g_matrix[1, :] *= factor**length
self.g_matrix[1, length-1] += 1
# Apply damping
if self.gamma_damping > 0:
for _ in range(length):
self.h_matrix -= self.gamma_damping*(self.h_matrix - 1.)
# Apply reward
self.h_matrix += self.g_matrix*reward | /rl_opts-0.0.1.tar.gz/rl_opts-0.0.1/rl_opts/imitation.py | 0.633977 | 0.589953 | imitation.py | pypi |
# rl-plotter
  
[README](README.md) | [中文文档](README_zh.md)
This is a simple tool which can plot learning curves easily for reinforcement learning (RL)
## Installation
from PIP
```
pip install rl_plotter
```
from source
```
python setup.py install
```
## Usage
**1. add basic logger**
Add our logger in your code of evaluation (Recommend)
```python
from rl_plotter.logger import Logger
logger = Logger(exp_name="your_exp_name", env_name, seed, locals())
····
logger.update(score=evaluation_score_list, total_steps=current_training_steps)
```
or just use [OpenAI-spinningup](https://github.com/openai/spinningup) to log (Support)
or you can use [OpenAI-baseline](https://github.com/openai/baselines) bench.Monitor (Not Recommend)
```python
env = logger.monitor_env(env)
```
**2. track other variables (Optional)**
if you want to track other variables, you can use our custom_logger:
```python
custom_logger=logger.new_custom_logger(filename, fieldnames=["variable 1", "variable 2", ..., "variable n"])
custom_logger.update(fieldvalues=variable_value_list, total_steps=current_training_steps)
```
**3. plot the results**
After the training or when you are training your agent, you can plot the learning curves in this way:
- switch to log directory or multi log’s parent directory (default: ./)
- run command to plot:
```
rl_plotter --save --show
```
You can also use seaborn kernel to get the same plot style as [OpenAI-spinningup](https://github.com/openai/spinningup):
```
rl_plotter_spinup --save --show
```
## Example
**1. commonly used commands**
```
rl_plotter --save --show --filter HalfCheetah
rl_plotter --save --show --filter Ant --avg_group --shaded_std
rl_plotter --save --show --filter Swimmer --avg_group --shaded_std --shaded_err
rl_plotter --save --show --filter Walker2d --filename progress.txt --xkey TotalEnvInteracts --ykey AverageEpRet
```
**2. practical examples**
```
rl_plotter --show --save --avg_group --shaded_err --shaded_std
```
<div align="center"><img width="400" height="300" src="https://github.com/gxywy/rl-plotter/blob/master/imgs/figure_1.png?raw=true"/></div>
```
rl_plotter --show --save --avg_group --shaded_err --shaded_std --filename q --filters Walker HalfCheetah --ykey bias real_q --yduel --style default --smooth 0
```
<div align="center"><img width="400" height="300" src="https://github.com/gxywy/rl-plotter/blob/master/imgs/figure_2.png?raw=true"/></div>
**3. more specific usage**
you can find all parameters which can custom the style of your curves using `help`
```
rl_plotter --help
```
```
optional arguments:
-h, --help show this help message and exit
--fig_length matplotlib figure length (default: 8)
--fig_width matplotlib figure width (default: 6)
--style matplotlib figure style (default: seaborn)
--title matplotlib figure title (default: None)
--xlabel matplotlib figure xlabel
--xkey x-axis key in csv file (default: l)
--ykey y-axis key in csv file (support multi) (default: r)
--yduel duel y axis (use if has two ykeys)
--ylabel matplotlib figure ylabel
--smooth smooth radius of y axis (default: 10)
--resample if not zero, size of the uniform grid in x direction
to resample onto. Resampling is performed via
symmetric EMA smoothing (see the docstring for
symmetric_ema). Default is zero (no resampling). Note
that if average_group is True, resampling is
necessary; in that case, default value is 512.
(default: 512)
--smooth_step when resampling (i.e. when resample > 0 or
average_group is True), use this EMA decay parameter
(in units of the new grid step). See docstrings for
decay_steps in symmetric_ema or one_sided_ema functions.
(default: 1.0)
--avg_group average the curves in the same group and plot the mean
--shaded_std shaded region corresponding to standard deviation of the group
--shaded_err shaded region corresponding to error in mean estimate of the group
--legend_loc location of legend
--legend_outside place the legend outside of the figure
--borderpad borderpad of legend (default: 0.5)
--labelspacing labelspacing of legend (default: 0.5)
--no_legend_group_num don't show num of group in legend
--time enable this will activate parameters about time
--time_unit parameters about time, x axis time unit (default: h)
--time_interval parameters about time, x axis time interval (default: 1)
--xformat x-axis format
--xlim x-axis limitation (default: None)
--log_dir log dir (default: ./)
--filters filter of dirname
--filename csv filename
--show show figure
--save save figure
--dpi figure dpi (default: 400)
```
## Features
- [x] custom logger, style, key, label, x-axis formatter, and so on ...
- [x] filter of directory name
- [x] multi-experiment plotter
- [x] multiple plotting kernels (native matplotlib plotting or seaborn plotting)
- [x] compatible with [OpenAI-baseline](https://github.com/openai/baselines) monitor and [OpenAI-spinningup](https://github.com/openai/spinningup)
- [x] corresponding color for specific experiment
- [x] multi y key & duel y legend
## Citing the Project
If using this repository for your research or publication, please cite:
```
@misc{rl-plotter,
author = {Xiaoyu Gong},
title = {RL-plotter: A plotter for reinforcement learning},
year = {2020},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/gxywy/rl-plotter}},
}
```
## Acknowledgment
The core of this tool is inspired by [baselines/plot_util.py](https://github.com/openai/baselines/blob/master/baselines/common/plot_util.py)
| /rl_plotter-2.4.0.tar.gz/rl_plotter-2.4.0/README.md | 0.637031 | 0.899431 | README.md | pypi |
from operator import itemgetter
from typing import Dict, List
import numpy as np
from rl_replicas.experience import Experience
class ReplayBuffer:
"""
Replay buffer for off-policy algorithms
:param buffer_size: (int) The size of the replay buffer.
"""
def __init__(self, buffer_size: int = int(1e6)) -> None:
self.buffer_size = buffer_size
self.current_size: int = 0
self.observations: List[np.ndarray] = []
self.actions: List[np.ndarray] = []
self.rewards: List[float] = []
self.next_observations: List[np.ndarray] = []
self.dones: List[bool] = []
def add_experience(self, experience: Experience) -> None:
"""
Add experience
:param experience: (Experience) Experience.
"""
self.observations.extend(experience.flattened_observations)
self.actions.extend(experience.flattened_actions)
self.rewards.extend(experience.flattened_rewards)
self.next_observations.extend(experience.flattened_next_observations)
self.dones.extend(experience.flattened_dones)
self.current_size += len(experience.flattened_observations)
if self.current_size > self.buffer_size:
num_exceeded_experinece: int = self.current_size - self.buffer_size
del self.observations[:num_exceeded_experinece]
del self.actions[:num_exceeded_experinece]
del self.rewards[:num_exceeded_experinece]
del self.next_observations[:num_exceeded_experinece]
del self.dones[:num_exceeded_experinece]
self.current_size -= num_exceeded_experinece
def sample_minibatch(self, minibatch_size: int = 32) -> Dict[str, np.ndarray]:
"""
Sample minibatch
:param minibatch_size: (int) The number of transitions to be sampled.
:return: (Dict[str, np.ndarray]) Sampled transitions.
"""
indices = np.random.randint(0, self.current_size, minibatch_size)
sampled_observations: np.ndarray = np.vstack(
itemgetter(*indices)(self.observations)
)
sampled_actions: np.ndarray = np.vstack(itemgetter(*indices)(self.actions))
sampled_rewards: np.ndarray = np.asarray(itemgetter(*indices)(self.rewards))
sampled_next_observations: np.ndarray = np.vstack(
itemgetter(*indices)(self.next_observations)
)
sampled_dones: np.ndarray = np.asarray(itemgetter(*indices)(self.dones))
minibatch: Dict[str, np.ndarray] = {
"observations": sampled_observations,
"actions": sampled_actions,
"rewards": sampled_rewards,
"next_observations": sampled_next_observations,
"dones": sampled_dones,
}
return minibatch | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/replay_buffer.py | 0.944747 | 0.436862 | replay_buffer.py | pypi |
import random
from typing import Iterable, List
import numpy as np
import scipy.signal
import torch
from gym import Space
from torch import Tensor, nn
from rl_replicas.policies.policy import Policy
from rl_replicas.value_function import ValueFunction
def discounted_cumulative_sums(vector: np.ndarray, discount: float) -> np.ndarray:
"""
Compute discounted cumulative sums of vector
:param vector: (np.ndarray) A target vector.
e.g. [x0,
x1,
x2]
:param discount: (float) The discount factor for the cumulative return.
:return: (np.ndarray) The discounted cumulative sums of a vector.
e.g. [x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return np.asarray(
list(scipy.signal.lfilter([1], [1, -discount], vector[::-1], axis=0)[::-1])
)
def gae(
rewards: np.ndarray, gamma: float, values: np.ndarray, gae_lambda: float
) -> np.ndarray:
"""
Compute Generalized Advantage Estimation (GAE)
:param rewards: (np.ndarray) Rewards for all states.
:param gamma: (float) The discount factor for the cumulative return.
:param values: (np.ndarray) Values for all states.
:param gae_lambda: (float) A smoothing parameter for reducing the variance.
:return gaes: (np.ndarray) The GAEs for all states.
"""
deltas: np.ndarray = rewards[:-1] + gamma * values[1:] - values[:-1]
gaes: np.ndarray = discounted_cumulative_sums(deltas, gamma * gae_lambda)
return gaes
def polyak_average(
params: Iterable[nn.Parameter], target_params: Iterable[nn.Parameter], rho: float
) -> None:
"""
Perform Polyak averaging on target_params using params
:param params: (Iterable[torch.nn.Parameter]) The parameters to use to update the target params.
:param target_params: (Iterable[torch.nn.Parameter]) The parameters to update.
:param rho: (float) The coefficient for polyak averaging (between 0 and 1).
"""
with torch.no_grad():
for param, target_param in zip(params, target_params):
target_param.data.copy_(
torch.tensor(rho) * target_param.data
+ torch.tensor(1.0 - rho) * param.data
)
def compute_values(
observations_with_last_observation: List[List[np.ndarray]],
value_function: ValueFunction,
) -> List[np.ndarray]:
values: List[np.ndarray] = []
with torch.no_grad():
for (
episode_observations_with_last_observation
) in observations_with_last_observation:
episode_observations_with_last_observation_tensor: Tensor = (
torch.from_numpy(
np.stack(episode_observations_with_last_observation)
).float()
)
values.append(
value_function(episode_observations_with_last_observation_tensor)
.flatten()
.numpy()
)
return values
def bootstrap_rewards_with_last_values(
rewards: List[List[float]], episode_dones: List[bool], last_values: List[float]
) -> List[np.ndarray]:
bootstrapped_rewards: List[np.ndarray] = []
for episode_rewards, episode_done, last_value in zip(
rewards, episode_dones, last_values
):
episode_bootstrapped_rewards: List[float]
if episode_done:
episode_bootstrapped_rewards = episode_rewards + [0]
else:
episode_bootstrapped_rewards = episode_rewards + [last_value]
bootstrapped_rewards.append(np.asarray(episode_bootstrapped_rewards))
return bootstrapped_rewards
def normalize_tensor(vector: Tensor) -> Tensor:
normalized_vector = (vector - torch.mean(vector)) / torch.std(vector)
return normalized_vector
def add_noise_to_get_action(
policy: Policy, action_space: Space, action_noise_scale: float
) -> Policy:
noised_policy: Policy = _NoisedPolicy(policy, action_space, action_noise_scale)
return noised_policy
class _NoisedPolicy(Policy):
def __init__(
self, base_policy: Policy, action_space: Space, action_noise_scale: float
):
super().__init__()
self.base_policy = base_policy
self.action_space = action_space
self.action_noise_scale = action_noise_scale
self.action_limit = action_space.high[0]
self.action_size = action_space.shape[0]
def get_action_tensor(self, observation: Tensor) -> Tensor:
action = self.base_policy.get_action_tensor(observation)
action += self.action_noise_scale * torch.randn(self.action_size)
action = torch.clip(action, -self.action_limit, self.action_limit)
return action
def get_action_numpy(self, observation: np.ndarray) -> np.ndarray:
action = self.base_policy.get_action_numpy(observation)
action += self.action_noise_scale * np.random.randn(self.action_size)
action = np.clip(action, -self.action_limit, self.action_limit)
return action
def set_seed_for_libraries(seed: int) -> None:
"""
Set seed for random, numpy and torch.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/utils.py | 0.950537 | 0.825519 | utils.py | pypi |
from typing import List, Optional
import numpy as np
class Experience:
"""
Experience
N: The number of episodes.
L: The length of each episode (it may vary).
A^*: The shape of single action step.
O^*: The shape of single observation step.
:param observations: (Optional[List[List[np.ndarray]]]) A nested list of shape (N, L, O^*).
:param actions: (Optional[List[List[np.ndarray]]]) A nested list of shape (N, L, A^*).
:param rewards: (Optional[List[List[float]]]) A nested list of shape (N, L).
:param last_observations: (Optional[List[np.ndarray]]) A list of np.ndarray (N, O^*).
:param dones: (Optional[List[bool]]) A nested list shape (N, L).
:param episode_returns: (Optional[List[float]]) A list with length (N).
:param episode_lengths: (Optional[List[int]]) A list with length (N).
"""
def __init__(
self,
observations: Optional[List[List[np.ndarray]]] = None,
actions: Optional[List[List[np.ndarray]]] = None,
rewards: Optional[List[List[float]]] = None,
last_observations: Optional[List[np.ndarray]] = None,
dones: Optional[List[List[bool]]] = None,
episode_returns: Optional[List[float]] = None,
episode_lengths: Optional[List[int]] = None,
):
self.observations = observations if observations else []
self.actions = actions if actions else []
self.rewards = rewards if rewards else []
self.last_observations = last_observations if last_observations else []
self.dones = dones if dones else []
self.episode_returns = episode_returns if episode_returns else []
self.episode_lengths = episode_lengths if episode_lengths else []
@property
def observations_with_last_observation(self) -> List[List[np.ndarray]]:
return [
observations + [last_observation]
for observations, last_observation in zip(
self.observations, self.last_observations
)
]
@property
def next_observations(self) -> List[List[np.ndarray]]:
return [
observations[1:] + [last_observation]
for observations, last_observation in zip(
self.observations, self.last_observations
)
]
@property
def episode_dones(self) -> List[bool]:
return [dones_per_episode[-1] for dones_per_episode in self.dones]
@property
def flattened_observations(self) -> List[np.ndarray]:
return [
observation
for observations_per_episode in self.observations
for observation in observations_per_episode
]
@property
def flattened_actions(self) -> List[np.ndarray]:
return [
action
for actions_per_episode in self.actions
for action in actions_per_episode
]
@property
def flattened_rewards(self) -> List[float]:
return [
reward
for rewards_per_episode in self.rewards
for reward in rewards_per_episode
]
@property
def flattened_next_observations(self) -> List[np.ndarray]:
return [
next_observation
for next_observations_per_episode in self.next_observations
for next_observation in next_observations_per_episode
]
@property
def flattened_dones(self) -> List[bool]:
return [done for dones_per_episode in self.dones for done in dones_per_episode] | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/experience.py | 0.957616 | 0.895477 | experience.py | pypi |
import logging
from typing import Callable, Iterable, List, Tuple
import numpy as np
import torch
from torch import Tensor
from torch.optim import Optimizer
from typing_extensions import TypedDict
logger = logging.getLogger(__name__)
State = TypedDict(
"State",
{
"max_constraint": float,
"n_conjugate_gradients": int,
"max_backtracks": int,
"backtrack_ratio": float,
"hvp_damping_coefficient": float,
},
)
class ConjugateGradientOptimizer(Optimizer):
"""
Performs constrained optimization via backtracking line search
The search direction is computed using a conjugate gradient algorithm,
which gives x = H^{-1}g, where H is a second order approximation of the
constraint and g is the gradient of the loss function.
:param params: (Iterable) A iterable of parameters to optimize.
:param max_constraint: (float) The maximum constraint value.
:param n_conjugate_gradients: (int) The number of conjugate gradient iterations used to calculate H^-1 g.
:param max_backtracks: (int) The max number of iterations for backtrack linesearch.
:param backtrack_ratio: (float) The backtrack ratio for backtracking line search.
:param hvp_damping_coefficient: (float) The coefficient for numerical stability, should be smallish.
Adjusts Hessian-vector product calculation: H -> H + hvp_damping_coefficient*I.
"""
def __init__(
self,
params: Iterable[Tensor],
max_constraint: float = 0.01,
n_conjugate_gradients: int = 10,
max_backtracks: int = 15,
backtrack_ratio: float = 0.8,
hvp_damping_coefficient: float = 1e-5,
):
# Initializing defaults is required
defaults: dict = {}
super().__init__(params, defaults)
self.max_constraint = max_constraint
self.n_conjugate_gradients = n_conjugate_gradients
self.max_backtracks = max_backtracks
self.backtrack_ratio = backtrack_ratio
self.hvp_damping_coefficient = hvp_damping_coefficient
def step(self, loss_function: Callable, kl_divergence_function: Callable) -> None:
"""
Performs a single optimization step
:param loss_function: (Callable) A function to compute the loss.
:param kl_divergence_function: (Callable) A function to compute the kl divergence.
"""
# Collect trainable parameters and gradients
params: List[Tensor] = []
grads: List[Tensor] = []
for group in self.param_groups:
for p in group["params"]:
if p.grad is not None:
params.append(p)
grads.append(p.grad.reshape(-1))
flat_loss_grads: Tensor = torch.cat(grads)
# Build Hessian-vector-product function
hessian_vector_product_function = self._build_hessian_vector_product(
kl_divergence_function, params
)
# Compute step direction
step_direction = self._conjugate_gradient(
hessian_vector_product_function, flat_loss_grads
)
# Replace nan with 0.0
step_direction[step_direction.ne(step_direction)] = 0.0
# Compute step size
step_size = np.sqrt(
2.0
* self.max_constraint
* (
1.0
/ (
torch.dot(
step_direction, hessian_vector_product_function(step_direction)
)
+ 1e-8
)
)
)
if np.isnan(step_size):
step_size = 1.0
descent_step = step_size * step_direction
# Update parameters using backtracking line search
self._backtracking_line_search(
params, descent_step, loss_function, kl_divergence_function
)
@property
def state(self) -> State:
"""
:return: (State) The hyper-parameters of the optimizer.
"""
return {
"max_constraint": self.max_constraint,
"n_conjugate_gradients": self.n_conjugate_gradients,
"max_backtracks": self.max_backtracks,
"backtrack_ratio": self.backtrack_ratio,
"hvp_damping_coefficient": self.hvp_damping_coefficient,
}
@state.setter
def state(self, state: State) -> None:
self.max_constraint = state.get("max_constraint", 0.01)
self.n_conjugate_gradients = state.get("n_conjugate_gradients", 10)
self.max_backtracks = state.get("max_backtracks", 15)
self.backtrack_ratio = state.get("backtrack_ratio", 0.8)
self.hvp_damping_coefficient = state.get("hvp_damping_coefficient", 1e-5)
def __setstate__(self, state: dict) -> None:
"""
Restore the optimizer state
:param state: (dict) State dictionary.
"""
if "hvp_damping_coefficient" not in state["state"]:
logger.warning("Resuming ConjugateGradientOptimizer with lost state.")
# Set the fields manually so that the setter gets called.
self.state = state["state"]
self.param_groups = state["param_groups"]
def _build_hessian_vector_product(
self, hessian_target_vector_function: Callable, params: List[Tensor]
) -> Callable:
param_shapes: List[torch.Size] = [p.shape or torch.Size([1]) for p in params]
hessian_target_vector = hessian_target_vector_function()
hessian_target_vector_grads: Tuple[Tensor, ...] = torch.autograd.grad(
hessian_target_vector, params, create_graph=True
)
def _eval(vector: Tensor) -> Tensor:
"""
The evaluation function
:param vector (Tensor): The vector to be multiplied with Hessian.
:return: (Tensor) The product of Hessian of function f and v.
"""
unflattened_vector_list: List[Tensor] = self.unflatten_tensor(
vector, param_shapes
)
assert len(hessian_target_vector_grads) == len(unflattened_vector_list)
grad_vector_product_list: List[Tensor] = []
for g, x in zip(hessian_target_vector_grads, unflattened_vector_list):
single_grad_vector_product = torch.sum(g * x)
grad_vector_product_list.append(single_grad_vector_product)
grad_vector_product = torch.sum(torch.stack(grad_vector_product_list))
hvp: List[Tensor] = list(
torch.autograd.grad(grad_vector_product, params, retain_graph=True)
)
for i, (hx, p) in enumerate(zip(hvp, params)):
if hx is None:
hvp[i] = torch.zeros_like(p)
flat_output: Tensor = torch.cat([h.reshape(-1) for h in hvp])
return flat_output + self.hvp_damping_coefficient * vector
return _eval
def _conjugate_gradient(
self,
hessian_vector_product_function: Callable,
b: Tensor,
residual_tol: float = 1e-10,
) -> Tensor:
"""
Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312
:param hessian_vector_product_function: (Callable) A function to compute Hessian vector product.
:param b: (Tensor) The right hand side of the equation to solve.
:param residual_tol: (float) Tolerence for convergence.
:return: (Tensor) Solution x* for equation Ax = b.
"""
x = torch.zeros_like(b)
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for _ in range(self.n_conjugate_gradients):
z = hessian_vector_product_function(p)
v = rdotr / torch.dot(p, z)
x += v * p
r -= v * z
newrdotr = torch.dot(r, r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
return x
def _backtracking_line_search(
self,
params: List[Tensor],
descent_step: float,
loss_function: Callable,
constraint_function: Callable,
) -> None:
previous_params: List[Tensor] = [p.clone() for p in params]
ratio_list: np.ndarray = self.backtrack_ratio ** np.arange(self.max_backtracks)
loss_before: Tensor = loss_function()
param_shapes: List[torch.Size] = [p.shape or torch.Size([1]) for p in params]
unflattened_descent_step_list: List[Tensor] = self.unflatten_tensor(
torch.as_tensor(descent_step), param_shapes
)
assert len(unflattened_descent_step_list) == len(params)
for ratio in ratio_list:
for step, previous_param, param in zip(
unflattened_descent_step_list, previous_params, params
):
step = ratio * step
new_param = previous_param.data - step
param.data = new_param.data
new_loss = loss_function()
constraint = constraint_function()
if new_loss < loss_before and constraint <= self.max_constraint:
break
if (
torch.isnan(new_loss)
or torch.isnan(constraint)
or new_loss >= loss_before
or constraint >= self.max_constraint
):
logger.warning("Line search condition violated. Rejecting the step.")
if torch.isnan(new_loss):
logger.debug("Violated because loss is NaN")
if torch.isnan(constraint):
logger.debug("Violated because constraint is NaN")
if new_loss >= loss_before:
logger.debug("Violated because loss not improving")
if constraint >= self.max_constraint:
logger.debug("Violated because constraint is violated")
for previous_param, param in zip(previous_params, params):
param.data = previous_param.data
def unflatten_tensor(
self, flattened: Tensor, shapes: List[torch.Size]
) -> List[Tensor]:
"""
Unflatten a flattened tensor into a list of unflattened tensors
:param flattened: (Tensor) Flattened tensor.
:param shapes: (List[torch.Size]) A list of shapes.
:return: (List[Tensor]) A list of unflattened tensors.
"""
sizes: List[np.ndarray] = list(map(np.prod, shapes))
indices: Tensor = torch.from_numpy(np.cumsum(sizes)[:-1])
return [
torch.reshape(subarray, shape)
for subarray, shape in zip(torch.tensor_split(flattened, indices), shapes)
] | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/optimizers/conjugate_gradient_optimizer.py | 0.942321 | 0.563378 | conjugate_gradient_optimizer.py | pypi |
import logging
from typing import List, Optional
import gym
import numpy as np
from rl_replicas.experience import Experience
from rl_replicas.policies import Policy
from rl_replicas.samplers import Sampler
logger = logging.getLogger(__name__)
class BatchSampler(Sampler):
"""
Batch sampler
:param env: (gym.Env) Environment.
:param seed: (int) Seed.
:param is_continuous: (bool) If true, observation is retained and
it samples experiences continuously across sample function calls.
"""
def __init__(
self, env: gym.Env, seed: Optional[int] = None, is_continuous: bool = False
):
self.env = env
self.seed = seed
self.is_continuous = is_continuous
self.observation: Optional[np.ndarray] = None
def sample(self, num_samples: int, policy: Policy) -> Experience:
"""
Sample experience
:param num_samples: (int) The number of samples to collect.
:param policy: (int) Policy.
:return: (Experience) Sampled experience.
"""
experience: Experience = Experience()
# Variables on each episode
episode_observations: List[np.ndarray] = []
episode_actions: List[np.ndarray] = []
episode_rewards: List[float] = []
episode_dones: List[bool] = []
episode_return: float = 0.0
episode_length: int = 0
if self.observation is None:
# Reset env for the first function call
self.observation = self.env.reset(seed=self.seed)
elif not self.is_continuous:
self.observation = self.env.reset()
for current_step in range(num_samples):
assert self.observation is not None
episode_observations.append(self.observation)
action: np.ndarray = policy.get_action_numpy(self.observation)
episode_actions.append(action)
reward: float
episode_done: bool
self.observation, reward, episode_done, _ = self.env.step(action)
episode_return += reward
episode_rewards.append(reward)
episode_dones.append(episode_done)
episode_length += 1
epoch_ended: bool = current_step == num_samples - 1
if episode_done or epoch_ended:
if epoch_ended and not episode_done:
logger.debug(
"The trajectory cut off at {} steps on the current episode".format(
episode_length
)
)
assert self.observation is not None
episode_last_observation: np.ndarray = self.observation
experience.observations.append(episode_observations)
experience.actions.append(episode_actions)
experience.rewards.append(episode_rewards)
experience.last_observations.append(episode_last_observation)
experience.dones.append(episode_dones)
experience.episode_returns.append(episode_return)
experience.episode_lengths.append(episode_length)
if episode_done:
self.observation = self.env.reset()
episode_return, episode_length = 0.0, 0
(
episode_observations,
episode_actions,
episode_rewards,
episode_dones,
) = ([], [], [], [])
return experience | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/samplers/batch_sampler.py | 0.921468 | 0.549338 | batch_sampler.py | pypi |
import logging
import os
import time
from typing import List
import gym
import numpy as np
import torch
from torch import Tensor
from torch.distributions import Distribution
from torch.nn import functional as F
from rl_replicas.experience import Experience
from rl_replicas.metrics_manager import MetricsManager
from rl_replicas.policies import Policy
from rl_replicas.samplers import Sampler
from rl_replicas.utils import (
bootstrap_rewards_with_last_values,
compute_values,
discounted_cumulative_sums,
gae,
normalize_tensor,
)
from rl_replicas.value_function import ValueFunction
logger = logging.getLogger(__name__)
class VPG:
"""
Vanilla Policy Gradient (REINFORCE) with Generalized Advantage Estimator (GAE)
:param policy: (Policy) Policy.
:param value_function: (ValueFunction) Value function.
:param env: (gym.Env) Environment.
:param sampler: (Sampler) Sampler.
:param gamma: (float) The discount factor for the cumulative return.
:param gae_lambda: (float) The factor for trade-off of bias vs variance for GAE.
:param num_value_gradients (int): The number of gradient descent steps to take on value function per epoch.
"""
def __init__(
self,
policy: Policy,
value_function: ValueFunction,
env: gym.Env,
sampler: Sampler,
gamma: float = 0.99,
gae_lambda: float = 0.97,
num_value_gradients: int = 80,
) -> None:
self.policy = policy
self.value_function = value_function
self.env = env
self.sampler = sampler
self.gamma = gamma
self.gae_lambda = gae_lambda
self.num_value_gradients = num_value_gradients
def learn(
self,
num_epochs: int = 50,
batch_size: int = 4000,
model_saving_interval: int = 4000,
output_dir: str = ".",
) -> None:
"""
Learn the model
:param num_epochs: (int) The number of epochs to run and train.
:param batch_size: (int) The number of steps to run per epoch.
:param model_saving_interval: (int) The interval epochs between model saving.
:param output_dir: (str) The output directory.
"""
start_time: float = time.time()
self.current_total_steps: int = 0
self.current_total_episodes: int = 0
os.makedirs(output_dir, exist_ok=True)
self.metrics_manager: MetricsManager = MetricsManager(output_dir)
for current_epoch in range(1, num_epochs + 1):
experience: Experience = self.sampler.sample(batch_size, self.policy)
episode_returns: List[float] = experience.episode_returns
episode_lengths: List[int] = experience.episode_lengths
self.current_total_steps += sum(experience.episode_lengths)
self.current_total_episodes += sum(experience.episode_dones)
self.metrics_manager.record_scalar("epoch", current_epoch)
self.metrics_manager.record_scalar("total_steps", self.current_total_steps)
self.metrics_manager.record_scalar(
"total_episodes", self.current_total_episodes
)
self.metrics_manager.record_scalar(
"sampling/average_episode_return",
float(np.mean(episode_returns)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"sampling/episode_return_std", float(np.std(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/max_episode_return", float(np.max(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/min_episode_return", float(np.min(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/average_episode_length",
float(np.mean(episode_lengths)),
self.current_total_steps,
tensorboard=True,
)
self.train(experience)
if self.current_total_steps % model_saving_interval == 0:
model_path: str = os.path.join(output_dir, "model.pt")
logger.debug("Save model")
self.save_model(current_epoch, model_path)
self.metrics_manager.record_scalar("time", time.time() - start_time)
# Dump all metrics stored in this epoch
self.metrics_manager.dump()
self.metrics_manager.close()
def train(self, experience: Experience) -> None:
values: List[np.ndarray] = compute_values(
experience.observations_with_last_observation, self.value_function
)
last_values: List[float] = [
float(episode_values[-1]) for episode_values in values
]
bootstrapped_rewards: List[np.ndarray] = bootstrap_rewards_with_last_values(
experience.rewards, experience.episode_dones, last_values
)
discounted_returns: List[np.ndarray] = [
discounted_cumulative_sums(episode_rewards, self.gamma)[:-1]
for episode_rewards in bootstrapped_rewards
]
flattened_discounted_returns: Tensor = torch.from_numpy(
np.concatenate(discounted_returns)
).float()
flattened_observations: Tensor = torch.from_numpy(
np.concatenate(experience.observations)
).float()
flattened_actions: Tensor = torch.from_numpy(
np.concatenate(experience.actions)
).float()
gaes: List[np.ndarray] = [
gae(episode_rewards, self.gamma, episode_values, self.gae_lambda)
for episode_rewards, episode_values in zip(bootstrapped_rewards, values)
]
flattened_advantages: Tensor = torch.from_numpy(np.concatenate(gaes)).float()
flattened_advantages = normalize_tensor(flattened_advantages)
# For logging
with torch.no_grad():
policy_dist_before: Distribution = self.policy(flattened_observations)
log_probs_before: Tensor = policy_dist_before.log_prob(flattened_actions)
policy_loss_before: Tensor = -torch.mean(
log_probs_before * flattened_advantages
)
entropies_before: Tensor = policy_dist_before.entropy()
self.train_policy(
flattened_observations, flattened_actions, flattened_advantages
)
# Train value function
value_function_losses: List[float] = []
for _ in range(self.num_value_gradients):
value_function_loss: Tensor = self.train_value_function(
flattened_observations, flattened_discounted_returns
)
value_function_losses.append(value_function_loss.item())
self.metrics_manager.record_scalar(
"policy/loss",
policy_loss_before.item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"policy/avarage_entropy",
torch.mean(entropies_before).item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"policy/log_prob_std",
torch.std(log_probs_before).item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"value_function/average_loss",
float(np.mean(value_function_losses)),
self.current_total_steps,
tensorboard=True,
)
def train_policy(
self,
flattened_observations: Tensor,
flattened_actions: Tensor,
flattened_advantages: Tensor,
) -> None:
policy_dist: Distribution = self.policy(flattened_observations)
log_probs: Tensor = policy_dist.log_prob(flattened_actions)
policy_loss: Tensor = -torch.mean(log_probs * flattened_advantages)
self.policy.optimizer.zero_grad()
policy_loss.backward()
self.policy.optimizer.step()
def train_value_function(
self, flattened_observations: Tensor, flattened_discounted_returns: Tensor
) -> Tensor:
value_function_loss: Tensor = self.compute_value_function_loss(
flattened_observations, flattened_discounted_returns
)
self.value_function.optimizer.zero_grad()
value_function_loss.backward()
self.value_function.optimizer.step()
return value_function_loss.detach()
def compute_value_function_loss(
self, observations: Tensor, discounted_returns: Tensor
) -> Tensor:
values: Tensor = self.value_function(observations)
squeezed_values: Tensor = torch.squeeze(values, -1)
value_loss: Tensor = F.mse_loss(squeezed_values, discounted_returns)
return value_loss
def save_model(self, epoch: int, model_path: str) -> None:
"""
Save model
:param epoch: (int) The current epoch.
:param model_path: (int) The path to save the model.
"""
torch.save(
{
"epoch": epoch,
"total_steps": self.current_total_steps,
"policy_state_dict": self.policy.network.state_dict(),
"policy_optimizer_state_dict": self.policy.optimizer.state_dict(),
"value_function_state_dict": self.value_function.network.state_dict(),
"value_function_optimizer_state_dict": self.value_function.optimizer.state_dict(),
},
model_path,
) | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/algorithms/vpg.py | 0.891584 | 0.512693 | vpg.py | pypi |
import copy
import logging
import os
import time
from typing import List
import gym
import numpy as np
import torch
from torch import Tensor
from torch.distributions import Distribution
from torch.nn import functional as F
from rl_replicas.experience import Experience
from rl_replicas.metrics_manager import MetricsManager
from rl_replicas.policies import Policy
from rl_replicas.samplers import Sampler
from rl_replicas.utils import (
bootstrap_rewards_with_last_values,
compute_values,
discounted_cumulative_sums,
gae,
normalize_tensor,
)
from rl_replicas.value_function import ValueFunction
logger = logging.getLogger(__name__)
class PPO:
"""
Proximal Policy Optimization (by clipping) with early stopping based on approximate KL divergence
:param policy: (Policy) Policy.
:param value_function: (ValueFunction) Value function.
:param env: (gym.Env) Environment.
:param sampler: (Sampler) Sampler.
:param gamma: (float) The discount factor for the cumulative return.
:param gae_lambda: (float) The factor for trade-off of bias vs variance for GAE.
:param clip_range: (float) The limit on the likelihood ratio between policies for clipping in the policy objective.
:param max_kl_divergence: (float) The limit on the KL divergence between policies for early stopping.
:param num_policy_gradients (int): The number of gradient descent steps to take on policy per epoch.
:param num_value_gradients (int): The number of gradient descent steps to take on value function per epoch.
"""
def __init__(
self,
policy: Policy,
value_function: ValueFunction,
env: gym.Env,
sampler: Sampler,
gamma: float = 0.99,
gae_lambda: float = 0.97,
clip_range: float = 0.2,
max_kl_divergence: float = 0.01,
num_policy_gradients: int = 80,
num_value_gradients: int = 80,
) -> None:
self.policy = policy
self.value_function = value_function
self.env = env
self.sampler = sampler
self.gamma = gamma
self.gae_lambda = gae_lambda
self.clip_range = clip_range
self.max_kl_divergence = max_kl_divergence
self.num_policy_gradients = num_policy_gradients
self.num_value_gradients = num_value_gradients
self.old_policy: Policy = copy.deepcopy(self.policy)
def learn(
self,
num_epochs: int = 50,
batch_size: int = 4000,
model_saving_interval: int = 4000,
output_dir: str = ".",
) -> None:
"""
Learn the model
:param num_epochs: (int) The number of epochs to run and train.
:param batch_size: (int) The number of steps to run per epoch.
:param model_saving_interval: (int) The interval steps between model saving.
:param output_dir: (str) The output directory.
"""
start_time: float = time.time()
self.current_total_steps: int = 0
self.current_total_episodes: int = 0
os.makedirs(output_dir, exist_ok=True)
self.metrics_manager: MetricsManager = MetricsManager(output_dir)
for current_epoch in range(1, num_epochs + 1):
experience: Experience = self.sampler.sample(batch_size, self.policy)
episode_returns: List[float] = experience.episode_returns
episode_lengths: List[int] = experience.episode_lengths
self.current_total_steps += sum(experience.episode_lengths)
self.current_total_episodes += sum(experience.episode_dones)
self.metrics_manager.record_scalar("epoch", current_epoch)
self.metrics_manager.record_scalar("total_steps", self.current_total_steps)
self.metrics_manager.record_scalar(
"total_episodes", self.current_total_episodes
)
self.metrics_manager.record_scalar(
"sampling/average_episode_return",
float(np.mean(episode_returns)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"sampling/episode_return_std", float(np.std(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/max_episode_return", float(np.max(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/min_episode_return", float(np.min(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/average_episode_length",
float(np.mean(episode_lengths)),
self.current_total_steps,
tensorboard=True,
)
self.train(experience)
if self.current_total_steps % model_saving_interval == 0:
model_path: str = os.path.join(output_dir, "model.pt")
logger.debug("Save model")
self.save_model(current_epoch, model_path)
self.metrics_manager.record_scalar("time", time.time() - start_time)
# Dump all metrics stored in this epoch
self.metrics_manager.dump()
self.metrics_manager.close()
def train(self, experience: Experience) -> None:
values: List[np.ndarray] = compute_values(
experience.observations_with_last_observation, self.value_function
)
last_values: List[float] = [
float(episode_values[-1]) for episode_values in values
]
bootstrapped_rewards: List[np.ndarray] = bootstrap_rewards_with_last_values(
experience.rewards, experience.episode_dones, last_values
)
discounted_returns: List[np.ndarray] = [
discounted_cumulative_sums(episode_rewards, self.gamma)[:-1]
for episode_rewards in bootstrapped_rewards
]
flattened_discounted_returns: Tensor = torch.from_numpy(
np.concatenate(discounted_returns)
).float()
flattened_observations: Tensor = torch.from_numpy(
np.concatenate(experience.observations)
).float()
flattened_actions: Tensor = torch.from_numpy(
np.concatenate(experience.actions)
).float()
gaes: List[np.ndarray] = [
gae(episode_rewards, self.gamma, episode_values, self.gae_lambda)
for episode_rewards, episode_values in zip(bootstrapped_rewards, values)
]
flattened_advantages: Tensor = torch.from_numpy(np.concatenate(gaes)).float()
flattened_advantages = normalize_tensor(flattened_advantages)
# For logging
with torch.no_grad():
policy_dist: Distribution = self.policy(flattened_observations)
policy_loss_before: Tensor = self.compute_policy_loss(
flattened_observations, flattened_actions, flattened_advantages
)
log_probs_before: Tensor = policy_dist.log_prob(flattened_actions)
entropies_before: Tensor = policy_dist.entropy()
# Train policy
for i in range(self.num_policy_gradients):
self.train_policy(
flattened_observations, flattened_actions, flattened_advantages
)
approximate_kl_divergence: Tensor = self.compute_approximate_kl_divergence(
flattened_observations, flattened_actions
).detach()
if approximate_kl_divergence > 1.5 * self.max_kl_divergence:
logger.info(
"Early stopping at update {} due to reaching max KL divergence.".format(
i
)
)
break
self.old_policy.load_state_dict(self.policy.state_dict())
# Train value function
value_function_losses: List[float] = []
for _ in range(self.num_value_gradients):
value_function_loss: Tensor = self.train_value_function(
flattened_observations, flattened_discounted_returns
)
value_function_losses.append(value_function_loss.item())
self.metrics_manager.record_scalar(
"policy/loss",
policy_loss_before.item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"policy/avarage_entropy",
torch.mean(entropies_before).item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"policy/log_prob_std",
torch.std(log_probs_before).item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"policy/kl_divergence",
approximate_kl_divergence.item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"value_function/average_loss",
float(np.mean(value_function_losses)),
self.current_total_steps,
tensorboard=True,
)
def train_policy(
self,
flattened_observations: Tensor,
flattened_actions: Tensor,
flattened_advantages: Tensor,
) -> None:
policy_loss: Tensor = self.compute_policy_loss(
flattened_observations, flattened_actions, flattened_advantages
)
self.policy.optimizer.zero_grad()
policy_loss.backward()
self.policy.optimizer.step()
def compute_policy_loss(
self, observations: Tensor, actions: Tensor, advantages: Tensor
) -> Tensor:
policy_dist: Distribution = self.policy(observations)
log_probs: Tensor = policy_dist.log_prob(actions)
with torch.no_grad():
old_policy_dist: Distribution = self.old_policy(observations)
old_log_probs: Tensor = old_policy_dist.log_prob(actions)
# Calculate surrogate
likelihood_ratio: Tensor = torch.exp(log_probs - old_log_probs)
surrogate: Tensor = likelihood_ratio * advantages
# Clipping the constraint
likelihood_ratio_clip: Tensor = torch.clamp(
likelihood_ratio, min=1 - self.clip_range, max=1 + self.clip_range
)
# Calculate surrotate clip
surrogate_clip: Tensor = likelihood_ratio_clip * advantages
policy_loss: Tensor = -torch.min(surrogate, surrogate_clip).mean()
return policy_loss
def compute_approximate_kl_divergence(
self, observations: Tensor, actions: Tensor
) -> Tensor:
with torch.no_grad():
policy_dist: Distribution = self.policy(observations)
log_probs: Tensor = policy_dist.log_prob(actions)
old_policy_dist: Distribution = self.old_policy(observations)
old_log_probs: Tensor = old_policy_dist.log_prob(actions)
approximate_kl_divergence: Tensor = old_log_probs - log_probs
return torch.mean(approximate_kl_divergence)
def train_value_function(
self, flattened_observations: Tensor, flattened_discounted_returns: Tensor
) -> Tensor:
value_function_loss: Tensor = self.compute_value_function_loss(
flattened_observations, flattened_discounted_returns
)
self.value_function.optimizer.zero_grad()
value_function_loss.backward()
self.value_function.optimizer.step()
return value_function_loss.detach()
def compute_value_function_loss(
self, observations: Tensor, discounted_returns: Tensor
) -> Tensor:
values: Tensor = self.value_function(observations)
squeezed_values: Tensor = torch.squeeze(values, -1)
value_loss: Tensor = F.mse_loss(squeezed_values, discounted_returns)
return value_loss
def save_model(self, epoch: int, model_path: str) -> None:
"""
Save model
:param epoch: (int) The current epoch.
:param model_path: (int) The path to save the model.
"""
torch.save(
{
"epoch": epoch,
"total_steps": self.current_total_steps,
"policy_state_dict": self.policy.network.state_dict(),
"policy_optimizer_state_dict": self.policy.optimizer.state_dict(),
"value_function_state_dict": self.value_function.network.state_dict(),
"value_function_optimizer_state_dict": self.value_function.optimizer.state_dict(),
},
model_path,
) | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/algorithms/ppo.py | 0.915734 | 0.468547 | ppo.py | pypi |
import copy
import logging
import os
import time
from typing import Dict, List
import gym
import numpy as np
import torch
from torch import Tensor
from torch.nn import functional as F
from rl_replicas.evaluator import Evaluator
from rl_replicas.experience import Experience
from rl_replicas.metrics_manager import MetricsManager
from rl_replicas.policies import Policy
from rl_replicas.q_function import QFunction
from rl_replicas.replay_buffer import ReplayBuffer
from rl_replicas.samplers import Sampler
from rl_replicas.utils import add_noise_to_get_action, polyak_average
logger = logging.getLogger(__name__)
class DDPG:
"""
Deep Deterministic Policy Gradient (DDPG)
:param policy: (Policy) Policy.
:param exploration_policy: (Policy) Exploration policy.
:param q_function: (QFunction) Q function.
:param env: (gym.Env) Environment.
:param sampler: (Sampler) Sampler.
:param replay_buffer: (ReplayBuffer) Replay buffer.
:param evaluator: (Evaluator) Evaluator.
:param gamma: (float) The discount factor for the cumulative return.
:param polyak_rho: (float) The interpolation factor in polyak averaging for target networks.
:param action_noise_scale: (float) The scale of the noise (std).
"""
def __init__(
self,
policy: Policy,
exploration_policy: Policy,
q_function: QFunction,
env: gym.Env,
sampler: Sampler,
replay_buffer: ReplayBuffer,
evaluator: Evaluator,
gamma: float = 0.99,
polyak_rho: float = 0.995,
action_noise_scale: float = 0.1,
) -> None:
self.policy = policy
self.exploration_policy = exploration_policy
self.q_function = q_function
self.env = env
self.sampler = sampler
self.replay_buffer = replay_buffer
self.evaluator = evaluator
self.gamma = gamma
self.polyak_rho = polyak_rho
self.action_noise_scale = action_noise_scale
self.noised_policy = add_noise_to_get_action(
self.policy, self.env.action_space, self.action_noise_scale
)
self.evaluation_env = gym.make(env.spec.id)
self.target_policy = copy.deepcopy(self.policy)
self.target_q_function = copy.deepcopy(self.q_function)
for param in self.target_policy.network.parameters():
param.requires_grad = False
for param in self.target_q_function.network.parameters():
param.requires_grad = False
def learn(
self,
num_epochs: int = 2000,
batch_size: int = 50,
minibatch_size: int = 100,
num_start_steps: int = 10000,
num_steps_before_update: int = 1000,
num_train_steps: int = 50,
num_evaluation_episodes: int = 5,
evaluation_interval: int = 4000,
model_saving_interval: int = 4000,
output_dir: str = ".",
) -> None:
"""
Learn the model
:param num_epochs: (int) The number of epochs to run and train.
:param batch_size: (int) The number of steps to run per epoch.
;param minibatch_size: (int) The minibatch size for SGD.
:param num_start_steps: (int) The number of steps for exploration action selection at the beginning.
:param num_steps_before_update: (int) The number of steps to perform before policy is updated.
:param num_train_steps: (int) The number of training steps on each epoch.
:param num_evaluation_episodes: (int) The number of evaluation episodes.
:param evaluation_interval: (int) The interval steps between evaluation.
:param model_saving_interval: (int) The interval steps between model saving.
:param output_dir: (str) The output directory.
"""
start_time: float = time.time()
self.current_total_steps: int = 0
self.current_total_episodes: int = 0
os.makedirs(output_dir, exist_ok=True)
self.metrics_manager: MetricsManager = MetricsManager(output_dir)
for current_epoch in range(1, num_epochs + 1):
experience: Experience
if self.current_total_steps < num_start_steps:
experience = self.sampler.sample(batch_size, self.exploration_policy)
else:
experience = self.sampler.sample(batch_size, self.noised_policy)
self.replay_buffer.add_experience(experience)
episode_returns: List[float] = experience.episode_returns
episode_lengths: List[int] = experience.episode_lengths
self.current_total_steps += sum(experience.episode_lengths)
self.current_total_episodes += sum(experience.flattened_dones)
self.metrics_manager.record_scalar("epoch", current_epoch)
self.metrics_manager.record_scalar("total_steps", self.current_total_steps)
self.metrics_manager.record_scalar(
"total_episodes", self.current_total_episodes
)
if len(episode_lengths) > 0:
self.metrics_manager.record_scalar(
"sampling/average_episode_return",
float(np.mean(episode_returns)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"sampling/episode_return_std", float(np.std(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/max_episode_return", float(np.max(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/min_episode_return", float(np.min(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/average_episode_length",
float(np.mean(episode_lengths)),
self.current_total_steps,
tensorboard=True,
)
if self.current_total_steps >= num_steps_before_update:
self.train(self.replay_buffer, num_train_steps, minibatch_size)
if (
num_evaluation_episodes > 0
and self.current_total_steps % evaluation_interval == 0
):
evaluation_episode_returns: List[float]
evaluation_episode_lengths: List[int]
(
evaluation_episode_returns,
evaluation_episode_lengths,
) = self.evaluator.evaluate(
self.policy, self.evaluation_env, num_evaluation_episodes
)
self.metrics_manager.record_scalar(
"evaluation/average_episode_return",
float(np.mean(evaluation_episode_returns)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"evaluation/episode_return_std",
float(np.std(evaluation_episode_returns)),
)
self.metrics_manager.record_scalar(
"evaluation/max_episode_return",
float(np.max(evaluation_episode_returns)),
)
self.metrics_manager.record_scalar(
"evaluation/min_episode_return",
float(np.min(evaluation_episode_returns)),
)
self.metrics_manager.record_scalar(
"evaluation/average_episode_length",
float(np.mean(evaluation_episode_lengths)),
self.current_total_steps,
tensorboard=True,
)
if self.current_total_steps % model_saving_interval == 0:
model_path: str = os.path.join(output_dir, "model.pt")
logger.debug("Save model")
self.save_model(current_epoch, model_path)
self.metrics_manager.record_scalar("time", time.time() - start_time)
# Dump all metrics stored in this epoch
self.metrics_manager.dump()
self.metrics_manager.close()
def train(
self, replay_buffer: ReplayBuffer, num_train_steps: int, minibatch_size: int
) -> None:
policy_losses: List[float] = []
q_function_losses: List[float] = []
all_q_values: List[float] = []
for _ in range(num_train_steps):
minibatch: Dict[str, np.ndarray] = replay_buffer.sample_minibatch(
minibatch_size
)
observations: Tensor = torch.from_numpy(minibatch["observations"]).float()
actions: Tensor = torch.from_numpy(minibatch["actions"]).float()
rewards: Tensor = torch.from_numpy(minibatch["rewards"]).float()
next_observations: Tensor = torch.from_numpy(
minibatch["next_observations"]
).float()
dones: Tensor = torch.from_numpy(minibatch["dones"]).int()
# For logging
with torch.no_grad():
q_values: Tensor = self.q_function(observations, actions)
all_q_values.extend(q_values.tolist())
targets: Tensor = self.compute_targets(next_observations, rewards, dones)
q_function_loss: Tensor = self.train_q_function(
observations, actions, targets
)
q_function_losses.append(q_function_loss.item())
policy_loss: Tensor = self.train_policy(observations)
policy_losses.append(policy_loss.item())
# Update targets
polyak_average(
self.policy.network.parameters(),
self.target_policy.network.parameters(),
self.polyak_rho,
)
polyak_average(
self.q_function.network.parameters(),
self.target_q_function.network.parameters(),
self.polyak_rho,
)
self.metrics_manager.record_scalar(
"policy/average_loss",
float(np.mean(policy_losses)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"q-function/average_loss",
float(np.mean(q_function_losses)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"q-function/avarage_q-value",
float(np.mean(all_q_values)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"q-function/max_q-value", float(np.max(all_q_values))
)
self.metrics_manager.record_scalar(
"q-function/min_q-value", float(np.min(all_q_values))
)
def train_policy(self, observations: Tensor) -> Tensor:
# Freeze Q-network so you don't waste computational effort
for param in self.q_function.network.parameters():
param.requires_grad = False
policy_actions: Tensor = self.policy(observations)
policy_q_values: Tensor = self.q_function(observations, policy_actions)
policy_loss: Tensor = -torch.mean(policy_q_values)
self.policy.optimizer.zero_grad()
policy_loss.backward()
self.policy.optimizer.step()
# Unfreeze Q-network
for param in self.q_function.network.parameters():
param.requires_grad = True
return policy_loss.detach()
def compute_targets(
self, next_observations: Tensor, rewards: Tensor, dones: Tensor
) -> Tensor:
with torch.no_grad():
next_actions: Tensor = self.target_policy(next_observations)
target_q_values: Tensor = self.target_q_function(
next_observations, next_actions
)
targets: Tensor = rewards + self.gamma * (1 - dones) * target_q_values
return targets
def train_q_function(
self, observations: Tensor, actions: Tensor, targets: Tensor
) -> Tensor:
q_values: Tensor = self.q_function(observations, actions)
q_function_loss: Tensor = F.mse_loss(q_values, targets)
self.q_function.optimizer.zero_grad()
q_function_loss.backward()
self.q_function.optimizer.step()
return q_function_loss.detach()
def save_model(self, current_epoch: int, model_path: str) -> None:
"""
Save model
:param current_epoch: (int) The current epoch.
:param model_path: (int) The path to save the model.
"""
torch.save(
{
"epoch": current_epoch,
"total_steps": self.current_total_steps,
"policy_state_dict": self.policy.network.state_dict(),
"policy_optimizer_state_dict": self.policy.optimizer.state_dict(),
"target_policy_state_dict": self.target_policy.network.state_dict(),
"q_function_state_dict": self.q_function.network.state_dict(),
"q_function_optimizer_state_dict": self.q_function.optimizer.state_dict(),
"target_q_function_state_dict": self.target_q_function.network.state_dict(),
},
model_path,
) | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/algorithms/ddpg.py | 0.887984 | 0.3534 | ddpg.py | pypi |
import copy
import logging
import os
import time
from typing import Callable, List
import gym
import numpy as np
import torch
from torch import Tensor
from torch.distributions import Distribution, kl
from torch.nn import functional as F
from rl_replicas.experience import Experience
from rl_replicas.metrics_manager import MetricsManager
from rl_replicas.policies import Policy
from rl_replicas.samplers import Sampler
from rl_replicas.utils import (
bootstrap_rewards_with_last_values,
compute_values,
discounted_cumulative_sums,
gae,
normalize_tensor,
)
from rl_replicas.value_function import ValueFunction
logger = logging.getLogger(__name__)
class TRPO:
"""
Trust Region Policy Optimization with GAE for advantage estimation
:param policy: (Policy) Policy.
:param value_function: (ValueFunction) Value function.
:param env: (gym.Env) Environment.
:param sampler: (Sampler) Sampler.
:param gamma: (float) The discount factor for the cumulative return.
:param gae_lambda: (float) The factor for trade-off of bias vs variance for GAE.
:param num_value_gradients (int): The number of gradient descent steps to take on value function per epoch.
"""
def __init__(
self,
policy: Policy,
value_function: ValueFunction,
env: gym.Env,
sampler: Sampler,
gamma: float = 0.99,
gae_lambda: float = 0.97,
num_value_gradients: int = 80,
) -> None:
self.policy = policy
self.value_function = value_function
self.env = env
self.sampler = sampler
self.gamma = gamma
self.gae_lambda = gae_lambda
self.num_value_gradients = num_value_gradients
self.old_policy: Policy = copy.deepcopy(self.policy)
def learn(
self,
num_epochs: int = 50,
batch_size: int = 4000,
model_saving_interval: int = 4000,
output_dir: str = ".",
) -> None:
"""
Learn the model
:param num_epochs: (int) The number of epochs to run and train.
:param batch_size: (int) The number of steps to run per epoch.
:param model_saving_interval: (int) The interval steps between model saving.
:param output_dir: (str) The output directory.
"""
start_time: float = time.time()
self.current_total_steps: int = 0
self.current_total_episodes: int = 0
os.makedirs(output_dir, exist_ok=True)
self.metrics_manager: MetricsManager = MetricsManager(output_dir)
for current_epoch in range(1, num_epochs + 1):
experience: Experience = self.sampler.sample(batch_size, self.policy)
episode_returns: List[float] = experience.episode_returns
episode_lengths: List[int] = experience.episode_lengths
self.current_total_steps += sum(experience.episode_lengths)
self.current_total_episodes += sum(experience.episode_dones)
self.metrics_manager.record_scalar("epoch", current_epoch)
self.metrics_manager.record_scalar("total_steps", self.current_total_steps)
self.metrics_manager.record_scalar(
"total_episodes", self.current_total_episodes
)
self.metrics_manager.record_scalar(
"sampling/average_episode_return",
float(np.mean(episode_returns)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"sampling/episode_return_std", float(np.std(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/max_episode_return", float(np.max(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/min_episode_return", float(np.min(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/average_episode_length",
float(np.mean(episode_lengths)),
self.current_total_steps,
tensorboard=True,
)
self.train(experience)
if self.current_total_steps % model_saving_interval == 0:
model_path: str = os.path.join(output_dir, "model.pt")
logger.debug("Save model")
self.save_model(current_epoch, model_path)
self.metrics_manager.record_scalar("time", time.time() - start_time)
# Dump all metrics stored in this epoch
self.metrics_manager.dump()
self.metrics_manager.close()
def train(self, experience: Experience) -> None:
values: List[np.ndarray] = compute_values(
experience.observations_with_last_observation, self.value_function
)
last_values: List[float] = [
float(episode_values[-1]) for episode_values in values
]
bootstrapped_rewards: List[np.ndarray] = bootstrap_rewards_with_last_values(
experience.rewards, experience.episode_dones, last_values
)
discounted_returns: List[np.ndarray] = [
discounted_cumulative_sums(episode_rewards, self.gamma)[:-1]
for episode_rewards in bootstrapped_rewards
]
flattened_discounted_returns: Tensor = torch.from_numpy(
np.concatenate(discounted_returns)
).float()
flattened_observations: Tensor = torch.from_numpy(
np.concatenate(experience.observations)
).float()
flattened_actions: Tensor = torch.from_numpy(
np.concatenate(experience.actions)
).float()
gaes: List[np.ndarray] = [
gae(episode_rewards, self.gamma, episode_values, self.gae_lambda)
for episode_rewards, episode_values in zip(bootstrapped_rewards, values)
]
flattened_advantages: Tensor = torch.from_numpy(np.concatenate(gaes)).float()
flattened_advantages = normalize_tensor(flattened_advantages)
def compute_surrogate_loss() -> Tensor:
policy_dist: Distribution = self.policy(flattened_observations)
log_probs: Tensor = policy_dist.log_prob(flattened_actions)
with torch.no_grad():
old_policy_dist: Distribution = self.old_policy(flattened_observations)
old_log_probs: Tensor = old_policy_dist.log_prob(flattened_actions)
likelihood_ratio: Tensor = torch.exp(log_probs - old_log_probs)
surrogate_loss: Tensor = -torch.mean(
likelihood_ratio * flattened_advantages
)
return surrogate_loss
def compute_kl_constraint() -> Tensor:
policy_dist: Distribution = self.policy(flattened_observations)
with torch.no_grad():
old_policy_dist: Distribution = self.old_policy(flattened_observations)
kl_constraint: Tensor = kl.kl_divergence(old_policy_dist, policy_dist)
return torch.mean(kl_constraint)
# For logging
policy_loss_before: Tensor = compute_surrogate_loss()
with torch.no_grad():
policy_dist_before: Distribution = self.policy(flattened_observations)
log_probs_before: Tensor = policy_dist_before.log_prob(flattened_actions)
entropies_before: Tensor = policy_dist_before.entropy()
self.train_policy(
compute_surrogate_loss,
compute_kl_constraint,
flattened_observations,
flattened_actions,
flattened_advantages,
)
self.old_policy.load_state_dict(self.policy.state_dict())
# Train value function
value_function_losses: List[float] = []
for _ in range(self.num_value_gradients):
value_function_loss: Tensor = self.train_value_function(
flattened_observations, flattened_discounted_returns
)
value_function_losses.append(value_function_loss.item())
self.metrics_manager.record_scalar(
"policy/loss",
policy_loss_before.item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"policy/avarage_entropy",
torch.mean(entropies_before).item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"policy/log_prob_std",
torch.std(log_probs_before).item(),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"value_function/average_loss",
float(np.mean(value_function_losses)),
self.current_total_steps,
tensorboard=True,
)
def train_policy(
self,
compute_surrogate_loss: Callable,
compute_kl_constraint: Callable,
flattened_observations: Tensor,
flattened_actions: Tensor,
flattened_advantages: Tensor,
) -> None:
policy_loss: Tensor = compute_surrogate_loss()
self.policy.optimizer.zero_grad()
policy_loss.backward()
self.policy.optimizer.step(compute_surrogate_loss, compute_kl_constraint)
def train_value_function(
self, flattened_observations: Tensor, flattened_discounted_returns: Tensor
) -> Tensor:
value_function_loss: Tensor = self.compute_value_function_loss(
flattened_observations, flattened_discounted_returns
)
self.value_function.optimizer.zero_grad()
value_function_loss.backward()
self.value_function.optimizer.step()
return value_function_loss.detach()
def compute_value_function_loss(
self, observations: Tensor, discounted_returns: Tensor
) -> Tensor:
values: Tensor = self.value_function(observations)
squeezed_values: Tensor = torch.squeeze(values, -1)
value_loss: Tensor = F.mse_loss(squeezed_values, discounted_returns)
return value_loss
def save_model(self, epoch: int, model_path: str) -> None:
"""
Save model
:param epoch: (int) The current epoch.
:param model_path: (int) The path to save the model.
"""
torch.save(
{
"epoch": epoch,
"total_steps": self.current_total_steps,
"policy_state_dict": self.policy.network.state_dict(),
"policy_optimizer_state_dict": self.policy.optimizer.state_dict(),
"value_function_state_dict": self.value_function.network.state_dict(),
"value_function_optimizer_state_dict": self.value_function.optimizer.state_dict(),
},
model_path,
) | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/algorithms/trpo.py | 0.899365 | 0.424531 | trpo.py | pypi |
import copy
import logging
import os
import time
from typing import Dict, List
import gym
import numpy as np
import torch
from torch import Tensor
from torch.nn import functional as F
from rl_replicas.evaluator import Evaluator
from rl_replicas.experience import Experience
from rl_replicas.metrics_manager import MetricsManager
from rl_replicas.policies import Policy
from rl_replicas.q_function import QFunction
from rl_replicas.replay_buffer import ReplayBuffer
from rl_replicas.samplers import Sampler
from rl_replicas.utils import add_noise_to_get_action, polyak_average
logger = logging.getLogger(__name__)
class TD3:
"""
Twin Delayed Deep Deterministic Policy Gradient (TD3)
:param policy: (Policy) Policy.
:param exploration_policy: (Policy) Exploration policy.
:param q_function_1: (QFunction) Q function.
:param q_function_2: (QFunction) Q function.
:param env: (gym.Env) Environment.
:param sampler: (Sampler) Sampler.
:param replay_buffer: (ReplayBuffer) Replay buffer.
:param evaluator: (Evaluator) Evaluator.
:param gamma: (float) The discount factor for the cumulative return.
:param polyak_rho: (float) The interpolation factor in polyak averaging for target networks.
:param action_noise_scale: (float) The scale of the noise (std) for the policy to explore better.
:param target_noise_scale: (float) The scale of the smoothing noise (std) for the target policy to exploit harder.
:param target_noise_clip: (float) The limit for absolute value of the target policy smoothing noise.
:param policy_delay: (int) The policy will only be updated once every policy_delay times for each update of
the Q-networks.
"""
def __init__(
self,
policy: Policy,
exploration_policy: Policy,
q_function_1: QFunction,
q_function_2: QFunction,
env: gym.Env,
sampler: Sampler,
replay_buffer: ReplayBuffer,
evaluator: Evaluator,
gamma: float = 0.99,
polyak_rho: float = 0.995,
action_noise_scale: float = 0.1,
target_noise_scale: float = 0.2,
target_noise_clip: float = 0.5,
policy_delay: int = 2,
) -> None:
self.policy = policy
self.exploration_policy = exploration_policy
self.q_function_1 = q_function_1
self.q_function_2 = q_function_2
self.env = env
self.sampler = sampler
self.replay_buffer = replay_buffer
self.evaluator = evaluator
self.gamma = gamma
self.polyak_rho = polyak_rho
self.action_noise_scale = action_noise_scale
self.noised_policy = add_noise_to_get_action(
self.policy, self.env.action_space, self.action_noise_scale
)
self.evaluation_env = gym.make(env.spec.id)
self.target_policy = copy.deepcopy(self.policy)
for param in self.target_policy.network.parameters():
param.requires_grad = False
self.target_noise_scale = target_noise_scale
self.target_noise_clip = target_noise_clip
self.policy_delay = policy_delay
self.target_q_function_1 = copy.deepcopy(self.q_function_1)
self.target_q_function_2 = copy.deepcopy(self.q_function_2)
for param in self.target_q_function_1.network.parameters():
param.requires_grad = False
for param in self.target_q_function_2.network.parameters():
param.requires_grad = False
def learn(
self,
num_epochs: int = 2000,
batch_size: int = 50,
minibatch_size: int = 100,
num_start_steps: int = 10000,
num_steps_before_update: int = 1000,
num_train_steps: int = 50,
num_evaluation_episodes: int = 5,
evaluation_interval: int = 4000,
model_saving_interval: int = 4000,
output_dir: str = ".",
) -> None:
"""
Learn the model
:param num_epochs: (int) The number of epochs to run and train.
:param batch_size: (int) The number of steps to run per epoch.
;param minibatch_size: (int) The minibatch size for SGD.
:param num_start_steps: (int) The number of steps for exploration action selection at the beginning.
:param num_steps_before_update: (int) The number of steps to perform before policy is updated.
:param num_train_steps: (int) The number of training steps on each epoch.
:param num_evaluation_episodes: (int) The number of evaluation episodes.
:param evaluation_interval: (int) The interval steps between evaluation.
:param model_saving_interval: (int) The interval steps between model saving.
:param output_dir: (str) The output directory.
"""
start_time: float = time.time()
self.current_total_steps: int = 0
self.current_total_episodes: int = 0
os.makedirs(output_dir, exist_ok=True)
self.metrics_manager: MetricsManager = MetricsManager(output_dir)
for current_epoch in range(1, num_epochs + 1):
experience: Experience
if self.current_total_steps < num_start_steps:
experience = self.sampler.sample(batch_size, self.exploration_policy)
else:
experience = self.sampler.sample(batch_size, self.noised_policy)
self.replay_buffer.add_experience(experience)
episode_returns: List[float] = experience.episode_returns
episode_lengths: List[int] = experience.episode_lengths
self.current_total_steps += sum(experience.episode_lengths)
self.current_total_episodes += sum(experience.flattened_dones)
self.metrics_manager.record_scalar("epoch", current_epoch)
self.metrics_manager.record_scalar("total_steps", self.current_total_steps)
self.metrics_manager.record_scalar(
"total_episodes", self.current_total_episodes
)
if len(episode_lengths) > 0:
self.metrics_manager.record_scalar(
"sampling/average_episode_return",
float(np.mean(episode_returns)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"sampling/episode_return_std", float(np.std(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/max_episode_return", float(np.max(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/min_episode_return", float(np.min(episode_returns))
)
self.metrics_manager.record_scalar(
"sampling/average_episode_length",
float(np.mean(episode_lengths)),
self.current_total_steps,
tensorboard=True,
)
if self.current_total_steps >= num_steps_before_update:
self.train(self.replay_buffer, num_train_steps, minibatch_size)
if (
num_evaluation_episodes > 0
and self.current_total_steps % evaluation_interval == 0
):
evaluation_episode_returns: List[float]
evaluation_episode_lengths: List[int]
(
evaluation_episode_returns,
evaluation_episode_lengths,
) = self.evaluator.evaluate(
self.policy, self.evaluation_env, num_evaluation_episodes
)
self.metrics_manager.record_scalar(
"evaluation/average_episode_return",
float(np.mean(evaluation_episode_returns)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"evaluation/episode_return_std",
float(np.std(evaluation_episode_returns)),
)
self.metrics_manager.record_scalar(
"evaluation/max_episode_return",
float(np.max(evaluation_episode_returns)),
)
self.metrics_manager.record_scalar(
"evaluation/min_episode_return",
float(np.min(evaluation_episode_returns)),
)
self.metrics_manager.record_scalar(
"evaluation/average_episode_length",
float(np.mean(evaluation_episode_lengths)),
self.current_total_steps,
tensorboard=True,
)
if self.current_total_steps % model_saving_interval == 0:
model_path: str = os.path.join(output_dir, "model.pt")
logger.debug("Save model")
self.save_model(current_epoch, model_path)
self.metrics_manager.record_scalar("time", time.time() - start_time)
# Dump all metrics stored in this epoch
self.metrics_manager.dump()
self.metrics_manager.close()
def train(
self, replay_buffer: ReplayBuffer, num_train_steps: int, minibatch_size: int
) -> None:
policy_losses: List[float] = []
q_function_1_losses: List[float] = []
q_function_2_losses: List[float] = []
all_q_values_1: List[float] = []
all_q_values_2: List[float] = []
for train_step in range(num_train_steps):
minibatch: Dict[str, np.ndarray] = replay_buffer.sample_minibatch(
minibatch_size
)
observations: Tensor = torch.from_numpy(minibatch["observations"]).float()
actions: Tensor = torch.from_numpy(minibatch["actions"]).float()
rewards: Tensor = torch.from_numpy(minibatch["rewards"]).float()
next_observations: Tensor = torch.from_numpy(
minibatch["next_observations"]
).float()
dones: Tensor = torch.from_numpy(minibatch["dones"]).int()
# For logging
with torch.no_grad():
q_values_1: Tensor = self.q_function_1(observations, actions)
q_values_2: Tensor = self.q_function_2(observations, actions)
all_q_values_1.extend(q_values_1.tolist())
all_q_values_2.extend(q_values_2.tolist())
targets: Tensor = self.compute_targets(next_observations, rewards, dones)
q_function_1_loss: Tensor = self.train_q_function(
self.q_function_1, observations, actions, targets
)
q_function_2_loss: Tensor = self.train_q_function(
self.q_function_2, observations, actions, targets
)
q_function_1_losses.append(q_function_1_loss.item())
q_function_2_losses.append(q_function_2_loss.item())
if train_step % self.policy_delay == 0:
policy_loss: Tensor = self.train_policy(observations)
policy_losses.append(policy_loss.item())
# Update targets
polyak_average(
self.policy.network.parameters(),
self.target_policy.network.parameters(),
self.polyak_rho,
)
polyak_average(
self.q_function_1.network.parameters(),
self.target_q_function_1.network.parameters(),
self.polyak_rho,
)
polyak_average(
self.q_function_2.network.parameters(),
self.target_q_function_2.network.parameters(),
self.polyak_rho,
)
self.metrics_manager.record_scalar(
"policy/average_loss",
float(np.mean(policy_losses)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"q-function_1/average_loss",
float(np.mean(q_function_1_losses)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"q-function_2/average_loss",
float(np.mean(q_function_2_losses)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"q-function_1/avarage_q-value",
float(np.mean(all_q_values_1)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"q-function_1/max_q-value", float(np.max(all_q_values_1))
)
self.metrics_manager.record_scalar(
"q-function_1/min_q-value", float(np.min(all_q_values_1))
)
self.metrics_manager.record_scalar(
"q-function_2/avarage_q-value",
float(np.mean(all_q_values_2)),
self.current_total_steps,
tensorboard=True,
)
self.metrics_manager.record_scalar(
"q-function_2/max_q-value", float(np.max(all_q_values_2))
)
self.metrics_manager.record_scalar(
"q-function_2/min_q-value", float(np.min(all_q_values_2))
)
def train_policy(self, observations: Tensor) -> Tensor:
# Freeze Q-networks
for param in self.q_function_1.network.parameters():
param.requires_grad = False
for param in self.q_function_2.network.parameters():
param.requires_grad = False
policy_actions: Tensor = self.policy(observations)
policy_q_values: Tensor = self.q_function_1(observations, policy_actions)
policy_loss: Tensor = -torch.mean(policy_q_values)
self.policy.optimizer.zero_grad()
policy_loss.backward()
self.policy.optimizer.step()
# Unfreeze Q-networks
for param in self.q_function_1.network.parameters():
param.requires_grad = True
for param in self.q_function_2.network.parameters():
param.requires_grad = True
return policy_loss.detach()
def compute_targets(
self, next_observations: Tensor, rewards: Tensor, dones: Tensor
) -> Tensor:
with torch.no_grad():
next_actions: Tensor = self.target_policy(next_observations)
epsilon: Tensor = self.target_noise_scale * torch.randn_like(next_actions)
epsilon = torch.clamp(epsilon, -self.target_noise_clip, self.target_noise_clip)
next_actions = next_actions + epsilon
action_limit: float = self.env.action_space.high[0]
next_actions = torch.clamp(next_actions, -action_limit, action_limit)
with torch.no_grad():
target_q_values_1: Tensor = self.target_q_function_1(
next_observations, next_actions
)
target_q_values_2: Tensor = self.target_q_function_2(
next_observations, next_actions
)
target_q_values: Tensor = torch.min(target_q_values_1, target_q_values_2)
targets: Tensor = rewards + self.gamma * (1 - dones) * target_q_values
return targets
def train_q_function(
self,
q_function: QFunction,
observations: Tensor,
actions: Tensor,
targets: Tensor,
) -> Tensor:
q_values: Tensor = q_function(observations, actions)
q_function_loss: Tensor = F.mse_loss(q_values, targets)
q_function.optimizer.zero_grad()
q_function_loss.backward()
q_function.optimizer.step()
return q_function_loss.detach()
def save_model(self, current_epoch: int, model_path: str) -> None:
"""
Save model
:param current_epoch: (int) The current epoch.
:param model_path: (int) The path to save the model.
"""
torch.save(
{
"epoch": current_epoch,
"total_steps": self.current_total_steps,
"policy_state_dict": self.policy.network.state_dict(),
"policy_optimizer_state_dict": self.policy.optimizer.state_dict(),
"target_policy_state_dict": self.target_policy.network.state_dict(),
"q_function_1_state_dict": self.q_function_1.network.state_dict(),
"q_function_1_optimizer_state_dict": self.q_function_1.optimizer.state_dict(),
"target_q_function_1_state_dict": self.target_q_function_1.network.state_dict(),
"q_function_2_state_dict": self.q_function_2.network.state_dict(),
"q_function_2_optimizer_state_dict": self.q_function_2.optimizer.state_dict(),
"target_q_function_2_state_dict": self.target_q_function_2.network.state_dict(),
},
model_path,
) | /rl_replicas-0.0.6-py3-none-any.whl/rl_replicas/algorithms/td3.py | 0.883933 | 0.398582 | td3.py | pypi |
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras import Model
from tensorflow.keras.initializers import Constant, VarianceScaling
from tensorflow.keras.layers import Dense, Lambda
from rl_toolkit.networks.layers import MultivariateGaussianNoise
uniform_initializer = VarianceScaling(distribution="uniform", mode="fan_in", scale=1.0)
class Actor(Model):
"""
Actor
===============
Attributes:
units (list): list of the numbers of units in each layer
n_outputs (int): number of outputs
clip_mean_min (float): the minimum value of mean
clip_mean_max (float): the maximum value of mean
init_noise (float): initialization of the Actor's noise
References:
- [Soft Actor-Critic Algorithms and Applications](https://arxiv.org/abs/1812.05905)
"""
def __init__(
self,
units: list,
n_outputs: int,
clip_mean_min: float,
clip_mean_max: float,
init_noise: float,
**kwargs
):
super(Actor, self).__init__(**kwargs)
# 1. layer
self.fc_0 = Dense(
units=units[0],
activation="relu",
kernel_initializer=uniform_initializer,
)
# 2. layer TODO(markub3327): Transformer
self.fc_1 = Dense(
units=units[1],
activation="relu",
kernel_initializer=uniform_initializer,
)
# Deterministicke akcie
self.mean = Dense(
n_outputs,
activation="linear",
kernel_initializer=uniform_initializer,
name="mean",
)
self.clip_mean = Lambda(
lambda x: tf.clip_by_value(x, clip_mean_min, clip_mean_max),
name="clip_mean",
)
# Stochasticke akcie
self.noise = MultivariateGaussianNoise(
n_outputs,
kernel_initializer=Constant(value=init_noise),
name="noise",
)
# Vystupna prenosova funkcia
self.bijector = tfp.bijectors.Tanh()
def reset_noise(self):
self.noise.sample_weights()
def call(self, inputs, with_log_prob=True, deterministic=None):
# 1. layer
x = self.fc_0(inputs)
# 2. layer
latent_sde = self.fc_1(x)
# Output layer
mean = self.mean(latent_sde)
mean = self.clip_mean(mean)
if deterministic:
action = self.bijector.forward(mean)
log_prob = None
else:
noise = self.noise(latent_sde)
action = self.bijector.forward(mean + noise)
if with_log_prob:
variance = tf.matmul(tf.square(latent_sde), tf.square(self.noise.scale))
pi_distribution = tfp.distributions.TransformedDistribution(
distribution=tfp.distributions.MultivariateNormalDiag(
loc=mean, scale_diag=tf.sqrt(variance + 1e-6)
),
bijector=self.bijector,
)
log_prob = pi_distribution.log_prob(action)[..., tf.newaxis]
else:
log_prob = None
return [action, log_prob]
# TODO(markub3327): def train_step(self, data): | /rl_toolkit-4.1.1-py3-none-any.whl/rl_toolkit/networks/models/actor.py | 0.728748 | 0.545588 | actor.py | pypi |
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.initializers import VarianceScaling
from tensorflow.keras.layers import Activation, Add, Dense
uniform_initializer = VarianceScaling(distribution="uniform", mode="fan_in", scale=1.0)
class Critic(Model):
"""
Critic
===============
Attributes:
units (list): list of the numbers of units in each layer
n_quantiles (int): number of predicted quantiles
References:
- [Controlling Overestimation Bias with Truncated Mixture of Continuous Distributional Quantile Critics](https://arxiv.org/abs/2005.04269)
"""
def __init__(self, units: list, n_quantiles: int, **kwargs):
super(Critic, self).__init__(**kwargs)
# 1. layer
self.fc_0 = Dense(
units=units[0],
activation="relu",
kernel_initializer=uniform_initializer,
)
# 2. layer TODO(markub3327): Transformer
self.fc_1 = Dense(
units=units[1],
kernel_initializer=uniform_initializer,
)
self.fc_2 = Dense(
units=units[1],
kernel_initializer=uniform_initializer,
)
self.add_0 = Add()
self.activ_0 = Activation("relu")
# Output layer
self.quantiles = Dense(
n_quantiles,
activation="linear",
kernel_initializer=uniform_initializer,
name="quantiles",
)
def call(self, inputs):
# 1. layer
state = self.fc_0(inputs[0])
# 2. layer
state = self.fc_1(state)
action = self.fc_2(inputs[1])
x = self.add_0([state, action])
x = self.activ_0(x)
# Output layer
quantiles = self.quantiles(x)
return quantiles
class MultiCritic(Model):
"""
MultiCritic
===============
Attributes:
units (list): list of the numbers of units in each layer
n_quantiles (int): number of predicted quantiles
top_quantiles_to_drop (int): number of quantiles to drop
n_critics (int): number of critic networks
"""
def __init__(
self,
units: list,
n_quantiles: int,
top_quantiles_to_drop: int,
n_critics: int,
**kwargs
):
super(MultiCritic, self).__init__(**kwargs)
self.n_quantiles = n_quantiles
self.quantiles_total = n_quantiles * n_critics
self.top_quantiles_to_drop = top_quantiles_to_drop
# init critics
self.models = [Critic(units, n_quantiles) for _ in range(n_critics)]
def call(self, inputs):
quantiles = tf.stack([model(inputs) for model in self.models], axis=1)
return quantiles
def summary(self):
for model in self.models:
model.summary()
# TODO(markub3327): def train_step(self, data): | /rl_toolkit-4.1.1-py3-none-any.whl/rl_toolkit/networks/models/critic.py | 0.78964 | 0.628635 | critic.py | pypi |
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras import constraints, initializers, regularizers
from tensorflow.keras.layers import Layer
class MultivariateGaussianNoise(Layer):
"""
Multivariate Gaussian Noise for exploration
===========
Attributes:
units (int): number of noisy units
kernel_initializer: initializer function applied to the `kernel` weights matrix
kernel_regularizer: regularizer function applied to the `kernel` weights matrix
kernel_constraint: constraint function applied to the `kernel` weights matrix
References:
- [Generalized State-Dependent Exploration for Deep Reinforcement Learning in Robotics](https://arxiv.org/abs/2005.05719)
"""
def __init__(
self,
units: int,
kernel_initializer,
kernel_regularizer=None,
kernel_constraint=None,
**kwargs
):
super(MultivariateGaussianNoise, self).__init__(**kwargs)
self.units = units
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
def build(self, input_shape):
super(MultivariateGaussianNoise, self).build(input_shape)
self.kernel = self.add_weight(
name="kernel",
shape=(input_shape[-1], self.units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
self.epsilon = self.add_weight(
name="epsilon",
shape=(input_shape[-1], self.units),
initializer=initializers.Zeros(),
trainable=False,
)
# Re-new noise matrix
self.sample_weights()
def call(self, inputs):
return tf.matmul(inputs, self.epsilon)
def get_config(self):
config = super(MultivariateGaussianNoise, self).get_config()
config.update(
{
"units": self.units,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
}
)
return config
@property
def scale(self):
return tf.math.softplus(self.kernel)
def sample_weights(self):
w_dist = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros_like(self.kernel), scale_diag=(self.scale + 1e-6)
)
self.epsilon.assign(w_dist.sample()) | /rl_toolkit-4.1.1-py3-none-any.whl/rl_toolkit/networks/layers/noise.py | 0.954393 | 0.713856 | noise.py | pypi |
import os
import numpy as np
import reverb
import wandb
from tensorflow.keras.optimizers import Adam
from wandb.keras import WandbCallback
from rl_toolkit.networks.callbacks import AgentCallback
from rl_toolkit.networks.models import ActorCritic
from rl_toolkit.utils import make_reverb_dataset
from .process import Process
class Learner(Process):
"""
Learner
=================
Attributes:
env_name (str): the name of environment
db_server (str): database server name (IP or domain name)
train_steps (int): number of training steps
batch_size (int): size of mini-batch used for training
actor_units (list): list of the numbers of units in each Actor's layer
critic_units (list): list of the numbers of units in each Critic's layer
actor_learning_rate (float): the learning rate for the Actor's optimizer
critic_learning_rate (float): the learning rate for the Critic's optimizer
alpha_learning_rate (float): the learning rate for the Alpha's optimizer
n_quantiles (int): number of predicted quantiles
top_quantiles_to_drop (int): number of quantiles to drop
n_critics (int): number of critic networks
clip_mean_min (float): the minimum value of mean
clip_mean_max (float): the maximum value of mean
gamma (float): the discount factor
tau (float): the soft update coefficient for target networks
init_alpha (float): initialization of alpha param
init_noise (float): initialization of the Actor's noise
model_path (str): path to the model
save_path (str): path to the models for saving
log_interval (int): the logging interval to the console
"""
def __init__(
self,
# ---
env_name: str,
db_server: str,
# ---
train_steps: int,
batch_size: int,
# ---
actor_units: list,
critic_units: list,
actor_learning_rate: float,
critic_learning_rate: float,
alpha_learning_rate: float,
# ---
n_quantiles: int,
top_quantiles_to_drop: int,
n_critics: int,
# ---
clip_mean_min: float,
clip_mean_max: float,
# ---
gamma: float,
tau: float,
init_alpha: float,
init_noise: float,
# ---
model_path: str,
save_path: str,
# ---
log_interval: int,
):
super(Learner, self).__init__(env_name, False)
self._train_steps = train_steps
self._save_path = save_path
self._log_interval = log_interval
self._db_server = db_server
# Init actor-critic's network
self.model = ActorCritic(
actor_units=actor_units,
critic_units=critic_units,
n_quantiles=n_quantiles,
top_quantiles_to_drop=top_quantiles_to_drop,
n_critics=n_critics,
n_outputs=np.prod(self._env.action_space.shape),
clip_mean_min=clip_mean_min,
clip_mean_max=clip_mean_max,
gamma=gamma,
tau=tau,
init_alpha=init_alpha,
init_noise=init_noise,
)
self.model.build((None,) + self._env.observation_space.shape)
self.model.compile(
actor_optimizer=Adam(
learning_rate=actor_learning_rate, global_clipnorm=40.0
),
critic_optimizer=Adam(
learning_rate=critic_learning_rate, global_clipnorm=40.0
),
alpha_optimizer=Adam(learning_rate=alpha_learning_rate),
)
if model_path is not None:
self.model.load_weights(model_path)
# Show models details
self.model.summary()
# Initializes the reverb's dataset
self.dataset = make_reverb_dataset(
server_address=self._db_server,
table="experience",
batch_size=batch_size,
)
# init Weights & Biases
wandb.init(project="rl-toolkit", group=f"{env_name}")
wandb.config.train_steps = train_steps
wandb.config.batch_size = batch_size
wandb.config.actor_units = actor_units
wandb.config.critic_units = critic_units
wandb.config.actor_learning_rate = actor_learning_rate
wandb.config.critic_learning_rate = critic_learning_rate
wandb.config.alpha_learning_rate = alpha_learning_rate
wandb.config.n_quantiles = n_quantiles
wandb.config.top_quantiles_to_drop = top_quantiles_to_drop
wandb.config.n_critics = n_critics
wandb.config.clip_mean_min = clip_mean_min
wandb.config.clip_mean_max = clip_mean_max
wandb.config.gamma = gamma
wandb.config.tau = tau
wandb.config.init_alpha = init_alpha
wandb.config.init_noise = init_noise
def run(self):
self.model.fit(
self.dataset,
epochs=self._train_steps,
steps_per_epoch=1,
verbose=0,
callbacks=[AgentCallback(self._db_server), WandbCallback(save_model=False)],
)
def save(self):
if self._save_path:
# create path if not exists
if not os.path.exists(self._save_path):
os.makedirs(self._save_path)
# Save model
self.model.save_weights(os.path.join(self._save_path, "actor_critic.h5"))
self.model.actor.save_weights(os.path.join(self._save_path, "actor.h5"))
def close(self):
super(Learner, self).close()
# create the checkpoint of the database
client = reverb.Client(self._db_server)
client.checkpoint() | /rl_toolkit-4.1.1-py3-none-any.whl/rl_toolkit/core/learner.py | 0.779028 | 0.421314 | learner.py | pypi |
import numpy as np
import reverb
import tensorflow as tf
import wandb
from rl_toolkit.networks.models import Actor
from rl_toolkit.utils import VariableContainer
from .process import Process
class Agent(Process):
"""
Agent
=================
Attributes:
env_name (str): the name of environment
render (bool): enable the rendering into the video file
db_server (str): database server name (IP or domain name)
actor_units (list): list of the numbers of units in each Actor's layer
clip_mean_min (float): the minimum value of mean
clip_mean_max (float): the maximum value of mean
init_noise (float): initialization of the Actor's noise
warmup_steps (int): number of interactions before using policy network
env_steps (int): number of steps per rollout
"""
def __init__(
self,
# ---
env_name: str,
render: bool,
db_server: str,
# ---
actor_units: list,
clip_mean_min: float,
clip_mean_max: float,
init_noise: float,
# ---
warmup_steps: int,
env_steps: int,
):
super(Agent, self).__init__(env_name, render)
self._env_steps = env_steps
self._warmup_steps = warmup_steps
# Init actor's network
self.actor = Actor(
units=actor_units,
n_outputs=np.prod(self._env.action_space.shape),
clip_mean_min=clip_mean_min,
clip_mean_max=clip_mean_max,
init_noise=init_noise,
)
self.actor.build((None,) + self._env.observation_space.shape)
# Show models details
self.actor.summary()
# Variables
self._train_step = tf.Variable(
0,
trainable=False,
dtype=tf.uint64,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
shape=(),
)
self._stop_agents = tf.Variable(
False,
trainable=False,
dtype=tf.bool,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
shape=(),
)
# Table for storing variables
self._variable_container = VariableContainer(
db_server=db_server,
table="variable",
variables={
"train_step": self._train_step,
"stop_agents": self._stop_agents,
"policy_variables": self.actor.variables,
},
)
# load content of variables & re-new noise matrix
self._variable_container.update_variables()
self.actor.reset_noise()
# Initializes the reverb client
self.client = reverb.Client(db_server)
# init Weights & Biases
wandb.init(
project="rl-toolkit",
group=f"{env_name}",
monitor_gym=render,
)
wandb.config.warmup_steps = warmup_steps
wandb.config.env_steps = env_steps
def random_policy(self, input):
action = self._env.action_space.sample()
return action
@tf.function(jit_compile=True)
def collect_policy(self, input):
action, _ = self.actor(
tf.expand_dims(input, axis=0),
with_log_prob=False,
deterministic=False,
)
return tf.squeeze(action, axis=0)
def collect(self, writer, max_steps, policy):
# collect the rollout
for _ in range(max_steps):
# Get the action
action = policy(self._last_obs)
action = np.array(action, copy=False)
# perform action
new_obs, reward, terminal, _ = self._env.step(action)
# Update variables
self._episode_reward += reward
self._episode_steps += 1
self._total_steps += 1
# Update the replay buffer
writer.append(
{
"observation": self._last_obs.astype("float32", copy=False),
"action": action,
"reward": np.array([reward], copy=False, dtype="float32"),
"terminal": np.array([terminal], copy=False),
}
)
# Ak je v cyklickom bufferi dostatok prikladov
if self._episode_steps > 1:
writer.create_item(
table="experience",
priority=1.0,
trajectory={
"observation": writer.history["observation"][-2],
"action": writer.history["action"][-2],
"reward": writer.history["reward"][-2],
"next_observation": writer.history["observation"][-1],
"terminal": writer.history["terminal"][-2],
},
)
# Check the end of episode
if terminal:
# Write the final interaction !!!
writer.append({"observation": new_obs.astype("float32", copy=False)})
writer.create_item(
table="experience",
priority=1.0,
trajectory={
"observation": writer.history["observation"][-2],
"action": writer.history["action"][-2],
"reward": writer.history["reward"][-2],
"next_observation": writer.history["observation"][-1],
"terminal": writer.history["terminal"][-2],
},
)
# Block until all the items have been sent to the server
writer.end_episode()
# logovanie
print("=============================================")
print(f"Epoch: {self._total_episodes}")
print(f"Score: {self._episode_reward}")
print(f"Steps: {self._episode_steps}")
print(f"TotalInteractions: {self._total_steps}")
print(f"Train step: {self._train_step.numpy()}")
print("=============================================")
wandb.log(
{
"Epoch": self._total_episodes,
"Score": self._episode_reward,
"Steps": self._episode_steps,
},
step=self._train_step.numpy(),
)
# Init variables
self._episode_reward = 0.0
self._episode_steps = 0
self._total_episodes += 1
# Init environment
self._last_obs = self._env.reset()
else:
# Super critical !!!
self._last_obs = new_obs
writer.flush()
def run(self):
# init environment
self._episode_reward = 0.0
self._episode_steps = 0
self._total_episodes = 0
self._total_steps = 0
self._last_obs = self._env.reset()
# spojenie s db
with self.client.trajectory_writer(num_keep_alive_refs=2) as writer:
# zahrievacie kola
self.collect(writer, self._warmup_steps, self.random_policy)
# hlavny cyklus hry
while not self._stop_agents:
self.collect(writer, self._env_steps, self.collect_policy)
# load content of variables & re-new noise matrix
self._variable_container.update_variables()
self.actor.reset_noise() | /rl_toolkit-4.1.1-py3-none-any.whl/rl_toolkit/core/agent.py | 0.736211 | 0.33846 | agent.py | pypi |
import numpy as np
import reverb
import tensorflow as tf
from rl_toolkit.networks.models import Actor
from rl_toolkit.utils import VariableContainer
from .process import Process
class Server(Process):
"""
Learner
=================
Attributes:
env_name (str): the name of environment
port (int): the port number of database server
actor_units (list): list of the numbers of units in each Actor's layer
clip_mean_min (float): the minimum value of mean
clip_mean_max (float): the maximum value of mean
init_noise (float): initialization of the Actor's noise
min_replay_size (int): minimum number of samples in memory before learning starts
max_replay_size (int): the capacity of experiences replay buffer
samples_per_insert (int): samples per insert ratio (SPI) `= num_sampled_items / num_inserted_items`
db_path (str): path to the database checkpoint
"""
def __init__(
self,
# ---
env_name: str,
port: int,
# ---
actor_units: list,
clip_mean_min: float,
clip_mean_max: float,
init_noise: float,
# ---
min_replay_size: int,
max_replay_size: int,
samples_per_insert: int,
# ---
db_path: str,
):
super(Server, self).__init__(env_name, False)
self._port = port
# Init actor's network
self.actor = Actor(
units=actor_units,
n_outputs=np.prod(self._env.action_space.shape),
clip_mean_min=clip_mean_min,
clip_mean_max=clip_mean_max,
init_noise=init_noise,
)
self.actor.build((None,) + self._env.observation_space.shape)
# Show models details
self.actor.summary()
# Variables
self._train_step = tf.Variable(
0,
trainable=False,
dtype=tf.uint64,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
shape=(),
)
self._stop_agents = tf.Variable(
False,
trainable=False,
dtype=tf.bool,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
shape=(),
)
# Table for storing variables
self._variable_container = VariableContainer(
db_server=f"localhost:{self._port}",
table="variable",
variables={
"train_step": self._train_step,
"stop_agents": self._stop_agents,
"policy_variables": self.actor.variables,
},
)
# Load DB from checkpoint or make a new one
if db_path is None:
checkpointer = None
else:
checkpointer = reverb.checkpointers.DefaultCheckpointer(path=db_path)
if samples_per_insert:
# 10% tolerance in rate
samples_per_insert_tolerance = 0.1 * samples_per_insert
error_buffer = min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=min_replay_size,
samples_per_insert=samples_per_insert,
error_buffer=error_buffer,
)
else:
limiter = reverb.rate_limiters.MinSize(min_replay_size)
# Initialize the reverb server
self.server = reverb.Server(
tables=[
reverb.Table( # Replay buffer
name="experience",
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=limiter,
max_size=max_replay_size,
max_times_sampled=0,
signature={
"observation": tf.TensorSpec(
[*self._env.observation_space.shape],
self._env.observation_space.dtype,
),
"action": tf.TensorSpec(
[*self._env.action_space.shape],
self._env.action_space.dtype,
),
"reward": tf.TensorSpec([1], tf.float32),
"next_observation": tf.TensorSpec(
[*self._env.observation_space.shape],
self._env.observation_space.dtype,
),
"terminal": tf.TensorSpec([1], tf.bool),
},
),
reverb.Table( # Variables container
name="variable",
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=1,
max_times_sampled=0,
signature=self._variable_container.signature,
),
],
port=self._port,
checkpointer=checkpointer,
)
# Init variable container in DB
self._variable_container.push_variables()
def run(self):
self.server.wait()
def close(self):
super(Server, self).close()
print("The database server is successfully closed! 🔥🔥🔥 Bay Bay.") | /rl_toolkit-4.1.1-py3-none-any.whl/rl_toolkit/core/server.py | 0.823186 | 0.283856 | server.py | pypi |
# WarpDrive: Extremely Fast End-to-End Deep Multi-Agent Reinforcement Learning on a GPU
WarpDrive is a flexible, lightweight, and easy-to-use open-source reinforcement learning (RL)
framework that implements end-to-end multi-agent RL on a single or multiple GPUs (Graphics Processing Unit).
Using the extreme parallelization capability of GPUs, WarpDrive enables orders-of-magnitude
faster RL compared to CPU simulation + GPU model implementations. It is extremely efficient as it avoids back-and-forth data copying between the CPU and the GPU,
and runs simulations across multiple agents and multiple environment replicas in parallel.
We have some main updates since its initial open source,
- version 1.3: provides the auto scaling tools to achieve the optimal throughput per device.
- version 1.4: supports the distributed asynchronous training among multiple GPU devices.
- version 1.6: supports the aggregation of multiple GPU blocks for one environment replica.
- version 2.0: supports the dual backends of both CUDA C and JIT compiled Numba. [(Our Blog article)](https://blog.salesforceairesearch.com/warpdrive-v2-numba-nvidia-gpu-simulations/)
Together, these allow the user to run thousands of concurrent multi-agent simulations and train
on extremely large batches of experience, achieving over 100x throughput over CPU-based counterparts.
We include several default multi-agent environments
based on the game of "Tag" for benchmarking and testing. In the "Tag" games, taggers are trying to run after
and tag the runners. They are fairly complicated games where thread synchronization, shared memory, high-dimensional indexing for thousands of interacting agents are involved. Several much more complex environments such as Covid-19 environment and climate change environment have been developed based on WarpDrive, you may see examples in [Real-World Problems and Collaborations](#real-world-problems-and-collaborations).
Below, we show multi-agent RL policies
trained for different tagger:runner speed ratios using WarpDrive.
These environments can **run** at **millions of steps per second**,
and **train** in just a few **hours**, all on a single GPU!
<img src="https://blog.einstein.ai/content/images/2021/08/tagger2x-1.gif" width="250" height="250"/> <img src="https://blog.einstein.ai/content/images/2021/08/same_speed_50fps-1.gif" width="250" height="250"/> <img src="https://blog.einstein.ai/content/images/2021/08/runner2x-2.gif" width="250" height="250"/>
WarpDrive also provides tools to build and train
multi-agent RL systems quickly with just a few lines of code.
Here is a short example to train tagger and runner agents:
```python
# Create a wrapped environment object via the EnvWrapper
# Ensure that env_backend is set to 'pycuda' or 'numba' (in order to run on the GPU)
env_wrapper = EnvWrapper(
TagContinuous(**run_config["env"]),
num_envs=run_config["trainer"]["num_envs"],
env_backend="pycuda"
)
# Agents can share policy models: this dictionary maps policy model names to agent ids.
policy_tag_to_agent_id_map = {
"tagger": list(env_wrapper.env.taggers),
"runner": list(env_wrapper.env.runners),
}
# Create the trainer object
trainer = Trainer(
env_wrapper=env_wrapper,
config=run_config,
policy_tag_to_agent_id_map=policy_tag_to_agent_id_map,
)
# Perform training!
trainer.train()
```
Below, we compare the training speed on an N1 16-CPU
node versus a single A100 GPU (using WarpDrive), for the Tag environment with 100 runners and 5 taggers. With the same environment configuration and training parameters, WarpDrive on a GPU is about 10× faster. Both scenarios are with 60 environment replicas running in parallel. Using more environments on the CPU node is infeasible as data copying gets too expensive. With WarpDrive, it is possible to scale up the number of environment replicas at least 10-fold, for even faster training.
<img src="https://user-images.githubusercontent.com/7627238/144560725-83167c73-274e-4c5a-a6cf-4e06355895f0.png" width="400" height="400"/>
## Code Structure
WarpDrive provides a CUDA (or Numba) + Python framework and quality-of-life tools, so you can quickly build fast, flexible and massively distributed multi-agent RL systems. The following figure illustrates a bottoms-up overview of the design and components of WarpDrive. The user only needs to write a CUDA or Numba step function at the CUDA environment layer, while the rest is a pure Python interface. We have step-by-step tutorials for you to master the workflow.
<img src="https://user-images.githubusercontent.com/31748898/151683116-299943b9-4e70-4a7b-8feb-16a3a351ca91.png" width="780" height="580"/>
## Papers and Citing WarpDrive
Our paper published at *Journal of Machine Learning Research* (JMLR) [https://jmlr.org/papers/v23/22-0185.html](https://jmlr.org/papers/v23/22-0185.html). You can also find more details in our white paper: [https://arxiv.org/abs/2108.13976](https://arxiv.org/abs/2108.13976).
If you're using WarpDrive in your research or applications, please cite using this BibTeX:
```
@article{JMLR:v23:22-0185,
author = {Tian Lan and Sunil Srinivasa and Huan Wang and Stephan Zheng},
title = {WarpDrive: Fast End-to-End Deep Multi-Agent Reinforcement Learning on a GPU},
journal = {Journal of Machine Learning Research},
year = {2022},
volume = {23},
number = {316},
pages = {1--6},
url = {http://jmlr.org/papers/v23/22-0185.html}
}
@misc{lan2021warpdrive,
title={WarpDrive: Extremely Fast End-to-End Deep Multi-Agent Reinforcement Learning on a GPU},
author={Tian Lan and Sunil Srinivasa and Huan Wang and Caiming Xiong and Silvio Savarese and Stephan Zheng},
year={2021},
eprint={2108.13976},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
## Tutorials and Quick Start
#### Tutorials
Familiarize yourself with WarpDrive by running these tutorials on Colab or [NGC container](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/warp_drive)!
- [WarpDrive basics(Introdunction and PyCUDA)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
- [WarpDrive basics(Numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
- [WarpDrive sampler(PyCUDA)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
- [WarpDrive sampler(Numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
- [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
- [Create custom environments (PyCUDA)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
- [Create custom environments (Numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
- [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
- [Scaling Up training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md)
- [Training with WarpDrive + Pytorch Lightning](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb)
You may also run these [tutorials](www.github.com/salesforce/warp-drive/blob/master/tutorials) *locally*, but you will need a GPU machine with nvcc compiler installed
and a compatible Nvidia GPU driver. You will also need [Jupyter](https://jupyter.org).
See [https://jupyter.readthedocs.io/en/latest/install.html](https://jupyter.readthedocs.io/en/latest/install.html) for installation instructions
#### Example Training Script
We provide some example scripts for you to quickly start the end-to-end training.
For example, if you want to train tag_continuous environment (10 taggers and 100 runners) with 2 GPUs and CUDA C backend
```
python example_training_script_pycuda.py -e tag_continuous -n 2
```
or switch to JIT compiled Numba backend with 1 GPU
```
python example_training_script_numba.py -e tag_continuous
```
You can find full reference documentation [here](http://opensource.salesforce.com/warp-drive/).
## Real World Problems and Collaborations
- [AI Economist Covid Environment with WarpDrive](https://github.com/salesforce/ai-economist/blob/master/tutorials/multi_agent_gpu_training_with_warp_drive.ipynb): We train two-level multi-agent economic simulations using [AI-Economist Foundation](https://github.com/salesforce/ai-economist) and train it using WarpDrive. We specifically consider the COVID-19 and economy simulation in this example.
- [Climate Change Cooperation Competition](https://mila-iqia.github.io/climate-cooperation-competition/) collaborated with [Mila](https://mila.quebec/en/). We provide the base version of the RICE (regional integrated climate environment) [simulation environment](https://github.com/mila-iqia/climate-cooperation-competition).
- [Pytorch Lightning Trainer with WarpDrive](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb): We provide a [tutorial example](https://pytorch-lightning.readthedocs.io/en/latest/notebooks/lightning_examples/warp-drive.html) and a [blog article](https://devblog.pytorchlightning.ai/turbocharge-multi-agent-reinforcement-learning-with-warpdrive-and-pytorch-lightning-6be9b00a3a43) of a multi-agent reinforcement learning training loop with WarpDrive and [Pytorch Lightning](https://www.pytorchlightning.ai/).
- [NVIDIA NGC Catalog and Quick Deployment to VertexAI](https://catalog.ngc.nvidia.com/): WarpDrive image is hosted by [NGC Catalog](https://catalog.ngc.nvidia.com/orgs/partners/teams/salesforce/containers/warpdrive). The NGC catalog "hosts containers for the top AI and data science software, tuned, tested and optimized by NVIDIA". Our tutorials also enable the quick deployment to VertexAI supported by the NGC.
## Installation Instructions
To get started, you'll need to have **Python 3.7+** and the **nvcc** compiler installed
with a compatible Nvidia GPU CUDA driver.
CUDA (which includes nvcc) can be installed by following Nvidia's instructions here: [https://developer.nvidia.com/cuda-downloads](https://developer.nvidia.com/cuda-downloads).
### Docker Image
V100 GPU: You can refer to the [example Dockerfile](https://github.com/salesforce/warp-drive/blob/master/Dockerfile) to configure your system.
A100 GPU: Our latest image is published and maintained by NVIDIA NGC. We recommend you download the latest image from [NGC catalog](https://catalog.ngc.nvidia.com/orgs/partners/teams/salesforce/containers/warpdrive).
If you want to build your customized environment, we suggest you visit [Nvidia Docker Hub](https://hub.docker.com/r/nvidia/cuda) to download the CUDA and cuDNN images compatible with your system.
You should be able to use the command line utility to monitor the NVIDIA GPU devices in your system:
```pyfunctiontypecomment
nvidia-smi
```
and see something like this
```pyfunctiontypecomment
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 450.51.06 Driver Version: 450.51.06 CUDA Version: 11.0 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla V100-SXM2... Off | 00000000:00:04.0 Off | 0 |
| N/A 37C P0 32W / 300W | 0MiB / 16160MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
```
In this snapshot, you can see we are using a Tesla V100 GPU and CUDA version 11.0.
### Installing using Pip
You can install WarpDrive using the Python package manager:
```pyfunctiontypecomment
pip install rl_warp_drive
```
### Installing from Source
1. Clone this repository to your machine:
```
git clone https://www.github.com/salesforce/warp-drive
```
2. *Optional, but recommended for first tries:* Create a new conda environment (named "warp_drive" below) and activate it:
```
conda create --name warp_drive python=3.7 --yes
conda activate warp_drive
```
3. Install as an editable Python package:
```pyfunctiontypecomment
cd warp_drive
pip install -e .
```
### Testing your Installation
You can call directly from Python command to test all modules and the end-to-end training workflow.
```
python warp_drive/utils/unittests/run_unittests_pycuda.py
python warp_drive/utils/unittests/run_unittests_numba.py
python warp_drive/utils/unittests/run_trainer_tests.py
```
## Learn More
For more information, please check out our [blog](https://blog.einstein.ai/warpdrive-fast-rl-on-a-gpu/), [white paper](https://arxiv.org/abs/2108.13976), and code [documentation](http://opensource.salesforce.com/warp-drive/).
If you're interested in extending this framework, or have questions, join the
AI Economist Slack channel using this
[invite link](https://join.slack.com/t/aieconomist/shared_invite/zt-g71ajic7-XaMygwNIup~CCzaR1T0wgA).
| /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/README.md | 0.796055 | 0.944177 | README.md | pypi |
# Salesforce Open Source Community Code of Conduct
## About the Code of Conduct
Equality is a core value at Salesforce. We believe a diverse and inclusive
community fosters innovation and creativity, and are committed to building a
culture where everyone feels included.
Salesforce open-source projects are committed to providing a friendly, safe, and
welcoming environment for all, regardless of gender identity and expression,
sexual orientation, disability, physical appearance, body size, ethnicity, nationality,
race, age, religion, level of experience, education, socioeconomic status, or
other similar personal characteristics.
The goal of this code of conduct is to specify a baseline standard of behavior so
that people with different social values and communication styles can work
together effectively, productively, and respectfully in our open source community.
It also establishes a mechanism for reporting issues and resolving conflicts.
All questions and reports of abusive, harassing, or otherwise unacceptable behavior
in a Salesforce open-source project may be reported by contacting the Salesforce
Open Source Conduct Committee at ossconduct@salesforce.com.
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of gender
identity and expression, sexual orientation, disability, physical appearance,
body size, ethnicity, nationality, race, age, religion, level of experience, education,
socioeconomic status, or other similar personal characteristics.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy toward other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Personal attacks, insulting/derogatory comments, or trolling
* Public or private harassment
* Publishing, or threatening to publish, others' private information—such as
a physical or electronic address—without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
* Advocating for or encouraging any of the above behaviors
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned with this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project email
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the Salesforce Open Source Conduct Committee
at ossconduct@salesforce.com. All complaints will be reviewed and investigated
and will result in a response that is deemed necessary and appropriate to the
circumstances. The committee is obligated to maintain confidentiality with
regard to the reporter of an incident. Further details of specific enforcement
policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership and the Salesforce Open Source Conduct
Committee.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][contributor-covenant-home],
version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html.
It includes adaptions and additions from [Go Community Code of Conduct][golang-coc],
[CNCF Code of Conduct][cncf-coc], and [Microsoft Open Source Code of Conduct][microsoft-coc].
This Code of Conduct is licensed under the [Creative Commons Attribution 3.0 License][cc-by-3-us].
[contributor-covenant-home]: https://www.contributor-covenant.org (https://www.contributor-covenant.org/)
[golang-coc]: https://golang.org/conduct
[cncf-coc]: https://github.com/cncf/foundation/blob/master/code-of-conduct.md
[microsoft-coc]: https://opensource.microsoft.com/codeofconduct/
[cc-by-3-us]: https://creativecommons.org/licenses/by/3.0/us/ | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/CODE_OF_CONDUCT.md | 0.642208 | 0.832951 | CODE_OF_CONDUCT.md | pypi |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib.patches import Polygon
from mpl_toolkits.mplot3d import art3d
def generate_tag_env_rollout_animation(
trainer,
fps=50,
tagger_color="#C843C3",
runner_color="#245EB6",
runner_not_in_game_color="#666666",
fig_width=6,
fig_height=6,
):
assert trainer is not None
episode_states = trainer.fetch_episode_states(
["loc_x", "loc_y", "still_in_the_game"]
)
assert isinstance(episode_states, dict)
env = trainer.cuda_envs.env
fig, ax = plt.subplots(
1, 1, figsize=(fig_width, fig_height)
) # , constrained_layout=True
ax.remove()
ax = fig.add_subplot(1, 1, 1, projection="3d")
# Bounds
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(-0.01, 0.01)
# Surface
corner_points = [(0, 0), (0, 1), (1, 1), (1, 0)]
poly = Polygon(corner_points, color=(0.1, 0.2, 0.5, 0.15))
ax.add_patch(poly)
art3d.pathpatch_2d_to_3d(poly, z=0, zdir="z")
# "Hide" side panes
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Hide grid lines
ax.grid(False)
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# Hide axes
ax.set_axis_off()
# Set camera
ax.elev = 40
ax.azim = -55
ax.dist = 10
# Try to reduce whitespace
fig.subplots_adjust(left=0, right=1, bottom=-0.2, top=1)
# Plot init data
lines = [None for _ in range(env.num_agents)]
for idx in range(env.num_agents):
if idx in env.taggers:
lines[idx] = ax.plot3D(
episode_states["loc_x"][:1, idx] / env.grid_length,
episode_states["loc_y"][:1, idx] / env.grid_length,
0,
color=tagger_color,
marker="o",
markersize=10,
)[0]
else: # runners
lines[idx] = ax.plot3D(
episode_states["loc_x"][:1, idx] / env.grid_length,
episode_states["loc_y"][:1, idx] / env.grid_length,
[0],
color=runner_color,
marker="o",
markersize=5,
)[0]
init_num_runners = env.num_agents - env.num_taggers
def _get_label(timestep, n_runners_alive, init_n_runners):
line1 = "Continuous Tag\n"
line2 = "Time Step:".ljust(14) + f"{timestep:4.0f}\n"
frac_runners_alive = n_runners_alive / init_n_runners
pct_runners_alive = f"{n_runners_alive:4} ({frac_runners_alive * 100:.0f}%)"
line3 = "Runners Left:".ljust(14) + pct_runners_alive
return line1 + line2 + line3
label = ax.text(
0,
0,
0.02,
_get_label(0, init_num_runners, init_num_runners).lower(),
)
label.set_fontsize(14)
label.set_fontweight("normal")
label.set_color("#666666")
def animate(i):
for idx, line in enumerate(lines):
line.set_data_3d(
episode_states["loc_x"][i : i + 1, idx] / env.grid_length,
episode_states["loc_y"][i : i + 1, idx] / env.grid_length,
np.zeros(1),
)
still_in_game = episode_states["still_in_the_game"][i, idx]
if still_in_game:
pass
else:
line.set_color(runner_not_in_game_color)
line.set_marker("")
n_runners_alive = episode_states["still_in_the_game"][i].sum() - env.num_taggers
label.set_text(_get_label(i, n_runners_alive, init_num_runners).lower())
ani = animation.FuncAnimation(
fig, animate, np.arange(0, env.episode_length + 1), interval=1000.0 / fps
)
plt.close()
return ani | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/example_envs/tag_continuous/generate_rollout_animation.py | 0.698638 | 0.630912 | generate_rollout_animation.py | pypi |
import copy
import heapq
import numpy as np
from gym import spaces
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
from warp_drive.utils.gpu_environment_context import CUDAEnvironmentContext
_OBSERVATIONS = Constants.OBSERVATIONS
_ACTIONS = Constants.ACTIONS
_REWARDS = Constants.REWARDS
_LOC_X = "loc_x"
_LOC_Y = "loc_y"
_SP = "speed"
_DIR = "direction"
_ACC = "acceleration"
_SIG = "still_in_the_game"
class TagContinuous(CUDAEnvironmentContext):
"""
The game of tag on a continuous circular 2D space.
There are some taggers trying to tag several runners.
The taggers want to get as close as possible to the runner, while the runner
wants to get as far away from them as possible.
Once a runner is tagged, he exits the game if runner_exits_game_after_tagged is True
otherwise he continues to run around (and the tagger can catch him again)
"""
def __init__(
self,
num_taggers=1,
num_runners=10,
grid_length=10.0,
episode_length=100,
starting_location_x=None,
starting_location_y=None,
starting_directions=None,
seed=None,
max_speed=1.0,
skill_level_runner=1.0,
skill_level_tagger=1.0,
max_acceleration=1.0,
min_acceleration=-1.0,
max_turn=np.pi / 2,
min_turn=-np.pi / 2,
num_acceleration_levels=10,
num_turn_levels=10,
edge_hit_penalty=-0.0,
use_full_observation=True,
num_other_agents_observed=2,
tagging_distance=0.01,
tag_reward_for_tagger=1.0,
step_penalty_for_tagger=-0.0,
tag_penalty_for_runner=-1.0,
step_reward_for_runner=0.0,
end_of_game_reward_for_runner=1.0,
runner_exits_game_after_tagged=True,
env_backend="cpu",
):
"""
Args:
num_taggers (int, optional): [number of taggers in the environment].
Defaults to 1.
num_runners (int, optional): [number of taggers in the environment].
Defaults to 10.
grid_length (float, optional): [length of the square grid]. Defaults to 10.0
episode_length (int, optional): [episode length]. Defaults to 100.
starting_location_x ([ndarray], optional): [starting x locations of the
agents]. Defaults to None.
starting_location_y ([ndarray], optional): [starting y locations of the
agents]. Defaults to None.
starting_directions ([ndarray], optional): starting orientations
in [0, 2*pi]. Defaults to None.
seed ([type], optional): [seeding parameter]. Defaults to None.
max_speed (float, optional): [max speed of the agents]. Defaults to 1.0
skill_level_runner (float, optional): [runner skill level;
this essentially is a multiplier to the max_speed].
Defaults to 1.0
skill_level_tagger (float, optional): [tagger skill level]. Defaults to 1.0
max_acceleration (float, optional): [the max acceleration]. Defaults to 1.0.
min_acceleration (float, optional): [description]. Defaults to -1.0
max_turn ([type], optional): [description]. Defaults to np.pi/2.
min_turn ([type], optional): [description]. Defaults to -np.pi/2.
num_acceleration_levels (int, optional): [number of acceleration actions
uniformly spaced between max and min acceleration]. Defaults to 10.
num_turn_levels (int, optional): [number of turn actions uniformly spaced
between max and min turns]. Defaults to 10.
edge_hit_penalty (float, optional): [penalty for hitting the edge (wall)].
Defaults to -0.0.
use_full_observation (bool, optional): [boolean indicating whether to
include all the agents' data in the observation or just the nearest
neighbors]. Defaults to True.
num_other_agents_observed (int, optional): [number of nearest neighbors
in the obs (only takes effect when use_full_observation is False)].
Defaults to 2.
tagging_distance (float, optional): [margin between a
tagger and runner to consider the runner as 'tagged'. This multiplies
on top of the grid length]. Defaults to 0.01.
tag_reward_for_tagger (float, optional): [positive reward for the tagger
upon tagging a runner]. Defaults to 1.0
step_penalty_for_tagger (float, optional): [penalty for every step
the game goes on]. Defaults to -0.0.
tag_penalty_for_runner (float, optional): [negative reward for getting
tagged]. Defaults to -1.0
step_reward_for_runner (float, optional): [reward for every step the
runner isn't tagged]. Defaults to 0.0.
end_of_game_reward_for_runner (float, optional): [reward at the end of
the game for a runner that isn't tagged]. Defaults to 1.0.
runner_exits_game_after_tagged (bool, optional): [boolean indicating
whether runners exit the game after getting tagged or can remain in and
continue to get tagged]. Defaults to True.
env_backend (string, optional): [indicate whether to use the CPU
or the GPU (either pycuda or numba) for stepping through the environment].
Defaults to "cpu".
"""
super().__init__()
self.float_dtype = np.float32
self.int_dtype = np.int32
# small number to prevent indeterminate cases
self.eps = self.float_dtype(1e-10)
assert num_taggers > 0
self.num_taggers = num_taggers
assert num_runners > 0
self.num_runners = num_runners
self.num_agents = self.num_taggers + self.num_runners
assert episode_length > 0
self.episode_length = episode_length
# Square 2D grid
assert grid_length > 0
self.grid_length = self.float_dtype(grid_length)
self.grid_diagonal = self.grid_length * np.sqrt(2)
# Penalty for hitting the edges
assert edge_hit_penalty <= 0
self.edge_hit_penalty = self.float_dtype(edge_hit_penalty)
# Seeding
self.np_random = np.random
if seed is not None:
self.seed(seed)
# Starting taggers
taggers = self.np_random.choice(
np.arange(self.num_agents), self.num_taggers, replace=False
)
self.agent_type = {}
self.taggers = {}
self.runners = {}
for agent_id in range(self.num_agents):
if agent_id in set(taggers):
self.agent_type[agent_id] = 1 # Tagger
self.taggers[agent_id] = True
else:
self.agent_type[agent_id] = 0 # Runner
self.runners[agent_id] = True
if starting_location_x is None:
assert starting_location_y is None
starting_location_x = self.grid_length * self.np_random.rand(
self.num_agents
)
starting_location_y = self.grid_length * self.np_random.rand(
self.num_agents
)
else:
assert len(starting_location_x) == self.num_agents
assert len(starting_location_y) == self.num_agents
self.starting_location_x = starting_location_x
self.starting_location_y = starting_location_y
if starting_directions is None:
starting_directions = self.np_random.choice(
[0, np.pi / 2, np.pi, np.pi * 3 / 2], self.num_agents, replace=True
)
else:
assert len(starting_directions) == self.num_agents
self.starting_directions = starting_directions
# Set the max speed level
self.max_speed = self.float_dtype(max_speed)
# All agents start with 0 speed and acceleration
self.starting_speeds = np.zeros(self.num_agents, dtype=self.float_dtype)
self.starting_accelerations = np.zeros(self.num_agents, dtype=self.float_dtype)
assert num_acceleration_levels >= 0
assert num_turn_levels >= 0
# The num_acceleration and num_turn levels refer to the number of
# uniformly-spaced levels between (min_acceleration and max_acceleration)
# and (min_turn and max_turn), respectively.
self.num_acceleration_levels = num_acceleration_levels
self.num_turn_levels = num_turn_levels
self.max_acceleration = self.float_dtype(max_acceleration)
self.min_acceleration = self.float_dtype(min_acceleration)
self.max_turn = self.float_dtype(max_turn)
self.min_turn = self.float_dtype(min_turn)
# Acceleration actions
self.acceleration_actions = np.linspace(
self.min_acceleration, self.max_acceleration, self.num_acceleration_levels
)
# Add action 0 - this will be the no-op, or 0 acceleration
self.acceleration_actions = np.insert(self.acceleration_actions, 0, 0).astype(
self.float_dtype
)
# Turn actions
self.turn_actions = np.linspace(
self.min_turn, self.max_turn, self.num_turn_levels
)
# Add action 0 - this will be the no-op, or 0 turn
self.turn_actions = np.insert(self.turn_actions, 0, 0).astype(self.float_dtype)
# Tagger and runner agent skill levels.
# Skill levels multiply on top of the acceleration levels
self.skill_levels = [
self.agent_type[agent_id] * self.float_dtype(skill_level_tagger)
+ (1 - self.agent_type[agent_id]) * self.float_dtype(skill_level_runner)
for agent_id in range(self.num_agents)
]
# Does the runner exit the game or continue to play after getting tagged?
self.runner_exits_game_after_tagged = runner_exits_game_after_tagged
# These will be set during reset (see below)
self.timestep = None
self.global_state = None
# Defining observation and action spaces
self.observation_space = None # Note: this will be set via the env_wrapper
self.action_space = {
agent_id: spaces.MultiDiscrete(
(len(self.acceleration_actions), len(self.turn_actions))
)
for agent_id in range(self.num_agents)
}
# Used in generate_observation()
# When use_full_observation is True, then all the agents will have info of
# all the other agents, otherwise, each agent will only have info of
# its k-nearest agents (k = num_other_agents_observed)
self.use_full_observation = use_full_observation
self.init_obs = None # Will be set later in generate_observation()
assert num_other_agents_observed <= self.num_agents
self.num_other_agents_observed = num_other_agents_observed
# Distance margin between agents for non-zero rewards
# If a tagger is closer than this to a runner, the tagger
# gets a positive reward, and the runner a negative reward
assert 0 <= tagging_distance <= 1
self.distance_margin_for_reward = (tagging_distance * self.grid_length).astype(
self.float_dtype
)
# Rewards and penalties
assert tag_reward_for_tagger >= 0
self.tag_reward_for_tagger = self.float_dtype(tag_reward_for_tagger)
assert step_penalty_for_tagger <= 0
self.step_penalty_for_tagger = self.float_dtype(step_penalty_for_tagger)
assert tag_penalty_for_runner <= 0
self.tag_penalty_for_runner = self.float_dtype(tag_penalty_for_runner)
assert step_reward_for_runner >= 0
self.step_reward_for_runner = self.float_dtype(step_reward_for_runner)
self.step_rewards = [
self.agent_type[agent_id] * self.step_penalty_for_tagger
+ (1 - self.agent_type[agent_id]) * self.step_reward_for_runner
for agent_id in range(self.num_agents)
]
assert end_of_game_reward_for_runner >= 0
self.end_of_game_reward_for_runner = self.float_dtype(
end_of_game_reward_for_runner
)
# Note: These will be set later
self.edge_hit_reward_penalty = None
self.still_in_the_game = None
# These will also be set via the env_wrapper
self.env_backend = env_backend
# Copy runners dict for applying at reset
self.runners_at_reset = copy.deepcopy(self.runners)
name = "TagContinuous"
def seed(self, seed=None):
"""
Seeding the environment with a desired seed
Note: this uses the code in
https://github.com/openai/gym/blob/master/gym/utils/seeding.py
"""
self.np_random.seed(seed)
return [seed]
def set_global_state(self, key=None, value=None, t=None, dtype=None):
"""
Set the global state for a specified key, value and timestep.
Note: for a new key, initialize global state to all zeros.
"""
assert key is not None
if dtype is None:
dtype = self.float_dtype
# If no values are passed, set everything to zeros.
if key not in self.global_state:
self.global_state[key] = np.zeros(
(self.episode_length + 1, self.num_agents), dtype=dtype
)
if t is not None and value is not None:
assert isinstance(value, np.ndarray)
assert value.shape[0] == self.global_state[key].shape[1]
self.global_state[key][t] = value
def update_state(self, delta_accelerations, delta_turns):
"""
Note: 'update_state' is only used when running on CPU step() only.
When using the CUDA step function, this Python method (update_state)
is part of the step() function!
The logic below mirrors (part of) the step function in CUDA.
"""
loc_x_prev_t = self.global_state[_LOC_X][self.timestep - 1]
loc_y_prev_t = self.global_state[_LOC_Y][self.timestep - 1]
speed_prev_t = self.global_state[_SP][self.timestep - 1]
dir_prev_t = self.global_state[_DIR][self.timestep - 1]
acc_prev_t = self.global_state[_ACC][self.timestep - 1]
# Update direction and acceleration
# Do not update location if agent is out of the game !
dir_curr_t = (
(dir_prev_t + delta_turns) % (2 * np.pi) * self.still_in_the_game
).astype(self.float_dtype)
acc_curr_t = acc_prev_t + delta_accelerations
# 0 <= speed <= max_speed (multiplied by the skill levels).
# Reset acceleration to 0 when speed is outside this range
max_speed = self.max_speed * np.array(self.skill_levels)
speed_curr_t = self.float_dtype(
np.clip(speed_prev_t + acc_curr_t, 0.0, max_speed) * self.still_in_the_game
)
acc_curr_t = acc_curr_t * (speed_curr_t > 0) * (speed_curr_t < max_speed)
loc_x_curr_t = self.float_dtype(
loc_x_prev_t + speed_curr_t * np.cos(dir_curr_t)
)
loc_y_curr_t = self.float_dtype(
loc_y_prev_t + speed_curr_t * np.sin(dir_curr_t)
)
# Crossing the edge
has_crossed_edge = ~(
(loc_x_curr_t >= 0)
& (loc_x_curr_t <= self.grid_length)
& (loc_y_curr_t >= 0)
& (loc_y_curr_t <= self.grid_length)
)
# Clip x and y if agent has crossed edge
clipped_loc_x_curr_t = self.float_dtype(
np.clip(loc_x_curr_t, 0.0, self.grid_length)
)
clipped_loc_y_curr_t = self.float_dtype(
np.clip(loc_y_curr_t, 0.0, self.grid_length)
)
# Penalize reward if agents hit the walls
self.edge_hit_reward_penalty = self.edge_hit_penalty * has_crossed_edge
# Set global states
self.set_global_state(key=_LOC_X, value=clipped_loc_x_curr_t, t=self.timestep)
self.set_global_state(key=_LOC_Y, value=clipped_loc_y_curr_t, t=self.timestep)
self.set_global_state(key=_SP, value=speed_curr_t, t=self.timestep)
self.set_global_state(key=_DIR, value=dir_curr_t, t=self.timestep)
self.set_global_state(key=_ACC, value=acc_curr_t, t=self.timestep)
def compute_distance(self, agent1, agent2):
"""
Note: 'compute_distance' is only used when running on CPU step() only.
When using the CUDA step function, this Python method (compute_distance)
is also part of the step() function!
"""
return np.sqrt(
(
self.global_state[_LOC_X][self.timestep, agent1]
- self.global_state[_LOC_X][self.timestep, agent2]
)
** 2
+ (
self.global_state[_LOC_Y][self.timestep, agent1]
- self.global_state[_LOC_Y][self.timestep, agent2]
)
** 2
).astype(self.float_dtype)
def k_nearest_neighbors(self, agent_id, k):
"""
Note: 'k_nearest_neighbors' is only used when running on CPU step() only.
When using the CUDA step function, this Python method (k_nearest_neighbors)
is also part of the step() function!
"""
agent_ids_and_distances = []
for ag_id in range(self.num_agents):
if (ag_id != agent_id) and (self.still_in_the_game[ag_id]):
agent_ids_and_distances += [
(ag_id, self.compute_distance(agent_id, ag_id))
]
k_nearest_neighbor_ids_and_distances = heapq.nsmallest(
k, agent_ids_and_distances, key=lambda x: x[1]
)
return [
item[0]
for item in k_nearest_neighbor_ids_and_distances[
: self.num_other_agents_observed
]
]
def generate_observation(self):
"""
Generate and return the observations for every agent.
"""
obs = {}
normalized_global_obs = None
for feature in [
(_LOC_X, self.grid_diagonal),
(_LOC_Y, self.grid_diagonal),
(_SP, self.max_speed + self.eps),
(_ACC, self.max_speed + self.eps),
(_DIR, 2 * np.pi),
]:
if normalized_global_obs is None:
normalized_global_obs = (
self.global_state[feature[0]][self.timestep] / feature[1]
)
else:
normalized_global_obs = np.vstack(
(
normalized_global_obs,
self.global_state[feature[0]][self.timestep] / feature[1],
)
)
agent_types = np.array(
[self.agent_type[agent_id] for agent_id in range(self.num_agents)]
)
time = np.array([float(self.timestep) / self.episode_length])
if self.use_full_observation:
for agent_id in range(self.num_agents):
# Initialize obs
obs[agent_id] = np.concatenate(
[
np.vstack(
(
np.zeros_like(normalized_global_obs),
agent_types,
self.still_in_the_game,
)
)[
:,
[idx for idx in range(self.num_agents) if idx != agent_id],
].reshape(
-1
), # filter out the obs for the current agent
np.array([0.0]),
]
)
# Set obs for agents still in the game
if self.still_in_the_game[agent_id]:
obs[agent_id] = np.concatenate(
[
np.vstack(
(
normalized_global_obs
- normalized_global_obs[:, agent_id].reshape(-1, 1),
agent_types,
self.still_in_the_game,
)
)[
:,
[
idx
for idx in range(self.num_agents)
if idx != agent_id
],
].reshape(
-1
), # filter out the obs for the current agent
time,
]
)
else: # use partial observation
for agent_id in range(self.num_agents):
if self.timestep == 0:
# Set obs to all zeros
obs_global_states = np.zeros(
(
normalized_global_obs.shape[0],
self.num_other_agents_observed,
)
)
obs_agent_types = np.zeros(self.num_other_agents_observed)
obs_still_in_the_game = np.zeros(self.num_other_agents_observed)
# Form the observation
self.init_obs = np.concatenate(
[
np.vstack(
(
obs_global_states,
obs_agent_types,
obs_still_in_the_game,
)
).reshape(-1),
np.array([0.0]), # time
]
)
# Initialize obs to all zeros
obs[agent_id] = self.init_obs
# Set obs for agents still in the game
if self.still_in_the_game[agent_id]:
nearest_neighbor_ids = self.k_nearest_neighbors(
agent_id, k=self.num_other_agents_observed
)
# For the case when the number of remaining agent ids is fewer
# than self.num_other_agents_observed (because agents have exited
# the game), we also need to pad obs wih zeros
obs_global_states = np.hstack(
(
normalized_global_obs[:, nearest_neighbor_ids]
- normalized_global_obs[:, agent_id].reshape(-1, 1),
np.zeros(
(
normalized_global_obs.shape[0],
self.num_other_agents_observed
- len(nearest_neighbor_ids),
)
),
)
)
obs_agent_types = np.hstack(
(
agent_types[nearest_neighbor_ids],
np.zeros(
(
self.num_other_agents_observed
- len(nearest_neighbor_ids)
)
),
)
)
obs_still_in_the_game = (
np.hstack(
(
self.still_in_the_game[nearest_neighbor_ids],
np.zeros(
(
self.num_other_agents_observed
- len(nearest_neighbor_ids)
)
),
)
),
)
# Form the observation
obs[agent_id] = np.concatenate(
[
np.vstack(
(
obs_global_states,
obs_agent_types,
obs_still_in_the_game,
)
).reshape(-1),
time,
]
)
return obs
def compute_reward(self):
"""
Compute and return the rewards for each agent.
"""
# Initialize rewards
rew = {agent_id: 0.0 for agent_id in range(self.num_agents)}
taggers_list = sorted(self.taggers)
# At least one runner present
if self.num_runners > 0:
runners_list = sorted(self.runners)
runner_locations_x = self.global_state[_LOC_X][self.timestep][runners_list]
tagger_locations_x = self.global_state[_LOC_X][self.timestep][taggers_list]
runner_locations_y = self.global_state[_LOC_Y][self.timestep][runners_list]
tagger_locations_y = self.global_state[_LOC_Y][self.timestep][taggers_list]
runners_to_taggers_distances = np.sqrt(
(
np.repeat(runner_locations_x, self.num_taggers)
- np.tile(tagger_locations_x, self.num_runners)
)
** 2
+ (
np.repeat(runner_locations_y, self.num_taggers)
- np.tile(tagger_locations_y, self.num_runners)
)
** 2
).reshape(self.num_runners, self.num_taggers)
min_runners_to_taggers_distances = np.min(
runners_to_taggers_distances, axis=1
)
argmin_runners_to_taggers_distances = np.argmin(
runners_to_taggers_distances, axis=1
)
nearest_tagger_ids = [
taggers_list[idx] for idx in argmin_runners_to_taggers_distances
]
# Rewards
# Add edge hit reward penalty and the step rewards/ penalties
for agent_id in range(self.num_agents):
if self.still_in_the_game[agent_id]:
rew[agent_id] += self.edge_hit_reward_penalty[agent_id]
rew[agent_id] += self.step_rewards[agent_id]
for idx, runner_id in enumerate(runners_list):
if min_runners_to_taggers_distances[idx] < self.distance_margin_for_reward:
# the runner is tagged!
rew[runner_id] += self.tag_penalty_for_runner
rew[nearest_tagger_ids[idx]] += self.tag_reward_for_tagger
if self.runner_exits_game_after_tagged:
# Remove runner from game
self.still_in_the_game[runner_id] = 0
del self.runners[runner_id]
self.num_runners -= 1
self.global_state[_SIG][self.timestep :, runner_id] = 0
if self.timestep == self.episode_length:
for runner_id in self.runners:
rew[runner_id] += self.end_of_game_reward_for_runner
return rew
def get_data_dictionary(self):
"""
Create a dictionary of data to push to the device
"""
data_dict = DataFeed()
for feature in [_LOC_X, _LOC_Y, _SP, _DIR, _ACC]:
data_dict.add_data(
name=feature,
data=self.global_state[feature][0],
save_copy_and_apply_at_reset=True,
)
data_dict.add_data(
name="agent_types",
data=[self.agent_type[agent_id] for agent_id in range(self.num_agents)],
)
data_dict.add_data(
name="num_runners", data=self.num_runners, save_copy_and_apply_at_reset=True
)
data_dict.add_data(
name="num_other_agents_observed", data=self.num_other_agents_observed
)
data_dict.add_data(name="grid_length", data=self.grid_length)
data_dict.add_data(
name="edge_hit_reward_penalty",
data=self.edge_hit_reward_penalty,
save_copy_and_apply_at_reset=True,
)
data_dict.add_data(
name="step_rewards",
data=self.step_rewards,
)
data_dict.add_data(name="edge_hit_penalty", data=self.edge_hit_penalty)
data_dict.add_data(name="max_speed", data=self.max_speed)
data_dict.add_data(name="acceleration_actions", data=self.acceleration_actions)
data_dict.add_data(name="turn_actions", data=self.turn_actions)
data_dict.add_data(name="skill_levels", data=self.skill_levels)
data_dict.add_data(name="use_full_observation", data=self.use_full_observation)
data_dict.add_data(
name="distance_margin_for_reward", data=self.distance_margin_for_reward
)
data_dict.add_data(
name="tag_reward_for_tagger", data=self.tag_reward_for_tagger
)
data_dict.add_data(
name="tag_penalty_for_runner", data=self.tag_penalty_for_runner
)
data_dict.add_data(
name="end_of_game_reward_for_runner",
data=self.end_of_game_reward_for_runner,
)
data_dict.add_data(
name="neighbor_distances",
data=np.zeros((self.num_agents, self.num_agents - 1), dtype=np.float32),
save_copy_and_apply_at_reset=True,
)
data_dict.add_data(
name="neighbor_ids_sorted_by_distance",
data=np.zeros((self.num_agents, self.num_agents - 1), dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
data_dict.add_data(
name="nearest_neighbor_ids",
data=np.zeros(
(self.num_agents, self.num_other_agents_observed), dtype=np.int32
),
save_copy_and_apply_at_reset=True,
)
data_dict.add_data(
name="runner_exits_game_after_tagged",
data=self.runner_exits_game_after_tagged,
)
data_dict.add_data(
name="still_in_the_game",
data=self.still_in_the_game,
save_copy_and_apply_at_reset=True,
)
return data_dict
def reset(self):
"""
Env reset().
"""
# Reset time to the beginning
self.timestep = 0
# Re-initialize the global state
self.global_state = {}
self.set_global_state(
key=_LOC_X, value=self.starting_location_x, t=self.timestep
)
self.set_global_state(
key=_LOC_Y, value=self.starting_location_y, t=self.timestep
)
self.set_global_state(key=_SP, value=self.starting_speeds, t=self.timestep)
self.set_global_state(key=_DIR, value=self.starting_directions, t=self.timestep)
self.set_global_state(
key=_ACC, value=self.starting_accelerations, t=self.timestep
)
# Array to keep track of the agents that are still in play
self.still_in_the_game = np.ones(self.num_agents, dtype=self.int_dtype)
# Initialize global state for "still_in_the_game" to all ones
self.global_state[_SIG] = np.ones(
(self.episode_length + 1, self.num_agents), dtype=self.int_dtype
)
# Penalty for hitting the edges
self.edge_hit_reward_penalty = np.zeros(self.num_agents, dtype=self.float_dtype)
# Reinitialize some variables that may have changed during previous episode
self.runners = copy.deepcopy(self.runners_at_reset)
self.num_runners = len(self.runners)
return self.generate_observation()
def step(self, actions=None):
"""
Env step() - The GPU version calls the corresponding CUDA kernels
"""
self.timestep += 1
if not self.env_backend == "cpu":
# CUDA version of step()
# This subsumes update_state(), generate_observation(),
# and compute_reward()
args = [
_LOC_X,
_LOC_Y,
_SP,
_DIR,
_ACC,
"agent_types",
"edge_hit_reward_penalty",
"edge_hit_penalty",
"grid_length",
"acceleration_actions",
"turn_actions",
"max_speed",
"num_other_agents_observed",
"skill_levels",
"runner_exits_game_after_tagged",
"still_in_the_game",
"use_full_observation",
_OBSERVATIONS,
_ACTIONS,
"neighbor_distances",
"neighbor_ids_sorted_by_distance",
"nearest_neighbor_ids",
_REWARDS,
"step_rewards",
"num_runners",
"distance_margin_for_reward",
"tag_reward_for_tagger",
"tag_penalty_for_runner",
"end_of_game_reward_for_runner",
"_done_",
"_timestep_",
("n_agents", "meta"),
("episode_length", "meta"),
]
if self.env_backend == "pycuda":
self.cuda_step(
*self.cuda_step_function_feed(args),
block=self.cuda_function_manager.block,
grid=self.cuda_function_manager.grid,
)
elif self.env_backend == "numba":
self.cuda_step[
self.cuda_function_manager.grid, self.cuda_function_manager.block
](*self.cuda_step_function_feed(args))
result = None # do not return anything
else:
assert isinstance(actions, dict)
assert len(actions) == self.num_agents
acceleration_action_ids = [
actions[agent_id][0] for agent_id in range(self.num_agents)
]
turn_action_ids = [
actions[agent_id][1] for agent_id in range(self.num_agents)
]
assert all(
0 <= acc <= self.num_acceleration_levels
for acc in acceleration_action_ids
)
assert all(0 <= turn <= self.num_turn_levels for turn in turn_action_ids)
delta_accelerations = self.acceleration_actions[acceleration_action_ids]
delta_turns = self.turn_actions[turn_action_ids]
# Update state and generate observation
self.update_state(delta_accelerations, delta_turns)
if self.env_backend == "cpu":
obs = self.generate_observation()
# Compute rewards and done
rew = self.compute_reward()
done = {
"__all__": (self.timestep >= self.episode_length)
or (self.num_runners == 0)
}
info = {}
result = obs, rew, done, info
return result | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/example_envs/tag_continuous/tag_continuous.py | 0.799168 | 0.39423 | tag_continuous.py | pypi |
import math
import numba.cuda as numba_driver
from numba import float32, int32, boolean
kTwoPi = 6.283185308
kEpsilon = 1.0e-10
# Device helper function to compute distances between two agents
@numba_driver.jit((float32[:, ::1],
float32[:, ::1],
int32,
int32,
int32,
int32),
device=True,
inline=True)
def ComputeDistance(
loc_x_arr, loc_y_arr, kThisAgentId1, kThisAgentId2, kEnvId, kNumAgents
):
return math.sqrt(
((loc_x_arr[kEnvId, kThisAgentId1] - loc_x_arr[kEnvId, kThisAgentId2]) ** 2)
+ ((loc_y_arr[kEnvId, kThisAgentId1] - loc_y_arr[kEnvId, kThisAgentId2]) ** 2)
)
# Device helper function to generate observation
@numba_driver.jit((float32[:, ::1],
float32[:, ::1],
float32[:, ::1],
float32[:, ::1],
float32[:, ::1],
int32[::1],
float32,
float32,
int32,
int32[:, ::1],
boolean,
float32[:, :, ::1],
float32[:, :, ::1],
int32[:, :, ::1],
int32[:, :, ::1],
int32[::1],
int32,
int32,
int32,
int32),
device=True)
def CudaTagContinuousGenerateObservation(
loc_x_arr,
loc_y_arr,
speed_arr,
direction_arr,
acceleration_arr,
agent_types_arr,
kGridLength,
kMaxSpeed,
kNumOtherAgentsObserved,
still_in_the_game_arr,
kUseFullObservation,
obs_arr,
neighbor_distances_arr,
neighbor_ids_sorted_by_distance_arr,
nearest_neighbor_ids,
env_timestep_arr,
kNumAgents,
kEpisodeLength,
kEnvId,
kThisAgentId,
):
num_features = 7
if kThisAgentId < kNumAgents:
if kUseFullObservation:
# Initialize obs
index = 0
for other_agent_id in range(kNumAgents):
if not other_agent_id == kThisAgentId:
obs_arr[kEnvId, kThisAgentId, 0 * (kNumAgents - 1) + index] = 0.0
obs_arr[kEnvId, kThisAgentId, 1 * (kNumAgents - 1) + index] = 0.0
obs_arr[kEnvId, kThisAgentId, 2 * (kNumAgents - 1) + index] = 0.0
obs_arr[kEnvId, kThisAgentId, 3 * (kNumAgents - 1) + index] = 0.0
obs_arr[kEnvId, kThisAgentId, 4 * (kNumAgents - 1) + index] = 0.0
obs_arr[
kEnvId, kThisAgentId, 5 * (kNumAgents - 1) + index
] = agent_types_arr[other_agent_id]
obs_arr[
kEnvId, kThisAgentId, 6 * (kNumAgents - 1) + index
] = still_in_the_game_arr[kEnvId, other_agent_id]
index += 1
obs_arr[kEnvId, kThisAgentId, num_features * (kNumAgents - 1)] = 0.0
# Update obs for agents still in the game
if still_in_the_game_arr[kEnvId, kThisAgentId]:
index = 0
for other_agent_id in range(kNumAgents):
if not other_agent_id == kThisAgentId:
obs_arr[
kEnvId, kThisAgentId, 0 * (kNumAgents - 1) + index
] = float(
loc_x_arr[kEnvId, other_agent_id]
- loc_x_arr[kEnvId, kThisAgentId]
) / (
math.sqrt(2.0) * kGridLength
)
obs_arr[
kEnvId, kThisAgentId, 1 * (kNumAgents - 1) + index
] = float(
loc_y_arr[kEnvId, other_agent_id]
- loc_y_arr[kEnvId, kThisAgentId]
) / (
math.sqrt(2.0) * kGridLength
)
obs_arr[
kEnvId, kThisAgentId, 2 * (kNumAgents - 1) + index
] = float(
speed_arr[kEnvId, other_agent_id]
- speed_arr[kEnvId, kThisAgentId]
) / (
kMaxSpeed + kEpsilon
)
obs_arr[
kEnvId, kThisAgentId, 3 * (kNumAgents - 1) + index
] = float(
acceleration_arr[kEnvId, other_agent_id]
- acceleration_arr[kEnvId, kThisAgentId]
) / (
kMaxSpeed + kEpsilon
)
obs_arr[kEnvId, kThisAgentId, 4 * (kNumAgents - 1) + index] = (
float(
direction_arr[kEnvId, other_agent_id]
- direction_arr[kEnvId, kThisAgentId]
)
/ kTwoPi
)
index += 1
obs_arr[kEnvId, kThisAgentId, num_features * (kNumAgents - 1)] = (
float(env_timestep_arr[kEnvId]) / kEpisodeLength
)
else:
# Initialize obs to all zeros
for idx in range(kNumOtherAgentsObserved):
obs_arr[kEnvId, kThisAgentId, 0 * kNumOtherAgentsObserved + idx] = 0.0
obs_arr[kEnvId, kThisAgentId, 1 * kNumOtherAgentsObserved + idx] = 0.0
obs_arr[kEnvId, kThisAgentId, 2 * kNumOtherAgentsObserved + idx] = 0.0
obs_arr[kEnvId, kThisAgentId, 3 * kNumOtherAgentsObserved + idx] = 0.0
obs_arr[kEnvId, kThisAgentId, 4 * kNumOtherAgentsObserved + idx] = 0.0
obs_arr[kEnvId, kThisAgentId, 5 * kNumOtherAgentsObserved + idx] = 0.0
obs_arr[kEnvId, kThisAgentId, 6 * kNumOtherAgentsObserved + idx] = 0.0
obs_arr[kEnvId, kThisAgentId, num_features * kNumOtherAgentsObserved] = 0.0
# Update obs for agents still in the game
if still_in_the_game_arr[kEnvId, kThisAgentId]:
# Find the nearest agents
# Initialize neighbor_ids_sorted_by_distance_arr
# other agents that are still in the same
num_valid_other_agents = 0
for other_agent_id in range(kNumAgents):
if (
not other_agent_id == kThisAgentId
and still_in_the_game_arr[kEnvId, other_agent_id]
):
neighbor_ids_sorted_by_distance_arr[
kEnvId, kThisAgentId, num_valid_other_agents
] = other_agent_id
num_valid_other_agents += 1
# First, find distance to all the valid agents
for idx in range(num_valid_other_agents):
neighbor_distances_arr[kEnvId, kThisAgentId, idx] = ComputeDistance(
loc_x_arr,
loc_y_arr,
kThisAgentId,
neighbor_ids_sorted_by_distance_arr[kEnvId, kThisAgentId, idx],
kEnvId,
kNumAgents,
)
# Find the nearest neighbor agent indices
for i in range(min(num_valid_other_agents, kNumOtherAgentsObserved)):
for j in range(i + 1, num_valid_other_agents):
if (
neighbor_distances_arr[kEnvId, kThisAgentId, j]
< neighbor_distances_arr[kEnvId, kThisAgentId, i]
):
tmp1 = neighbor_distances_arr[kEnvId, kThisAgentId, i]
neighbor_distances_arr[
kEnvId, kThisAgentId, i
] = neighbor_distances_arr[kEnvId, kThisAgentId, j]
neighbor_distances_arr[kEnvId, kThisAgentId, j] = tmp1
tmp2 = neighbor_ids_sorted_by_distance_arr[
kEnvId, kThisAgentId, i
]
neighbor_ids_sorted_by_distance_arr[
kEnvId, kThisAgentId, i
] = neighbor_ids_sorted_by_distance_arr[
kEnvId, kThisAgentId, j
]
neighbor_ids_sorted_by_distance_arr[
kEnvId, kThisAgentId, j
] = tmp2
# Save nearest neighbor ids
for idx in range(min(num_valid_other_agents, kNumOtherAgentsObserved)):
nearest_neighbor_ids[
kEnvId, kThisAgentId, idx
] = neighbor_ids_sorted_by_distance_arr[kEnvId, kThisAgentId, idx]
# Update observation
for idx in range(min(num_valid_other_agents, kNumOtherAgentsObserved)):
kOtherAgentId = nearest_neighbor_ids[kEnvId, kThisAgentId, idx]
obs_arr[
kEnvId, kThisAgentId, 0 * kNumOtherAgentsObserved + idx
] = float(
loc_x_arr[kEnvId, kOtherAgentId]
- loc_x_arr[kEnvId, kThisAgentId]
) / (
math.sqrt(2.0) * kGridLength
)
obs_arr[
kEnvId, kThisAgentId, 1 * kNumOtherAgentsObserved + idx
] = float(
loc_y_arr[kEnvId, kOtherAgentId]
- loc_y_arr[kEnvId, kThisAgentId]
) / (
math.sqrt(2.0) * kGridLength
)
obs_arr[
kEnvId, kThisAgentId, 2 * kNumOtherAgentsObserved + idx
] = float(
speed_arr[kEnvId, kOtherAgentId]
- speed_arr[kEnvId, kThisAgentId]
) / (
kMaxSpeed + kEpsilon
)
obs_arr[
kEnvId, kThisAgentId, 3 * kNumOtherAgentsObserved + idx
] = float(
acceleration_arr[kEnvId, kOtherAgentId]
- acceleration_arr[kEnvId, kThisAgentId]
) / (
kMaxSpeed + kEpsilon
)
obs_arr[kEnvId, kThisAgentId, 4 * kNumOtherAgentsObserved + idx] = (
float(
direction_arr[kEnvId, kOtherAgentId]
- direction_arr[kEnvId, kThisAgentId]
)
/ kTwoPi
)
obs_arr[
kEnvId, kThisAgentId, 5 * kNumOtherAgentsObserved + idx
] = agent_types_arr[kOtherAgentId]
obs_arr[
kEnvId, kThisAgentId, 6 * kNumOtherAgentsObserved + idx
] = still_in_the_game_arr[kEnvId, kOtherAgentId]
obs_arr[
kEnvId, kThisAgentId, num_features * kNumOtherAgentsObserved
] = (float(env_timestep_arr[kEnvId]) / kEpisodeLength)
# Device helper function to compute rewards
@numba_driver.jit((float32[:, ::1],
float32[:, ::1],
float32[:, ::1],
float32,
float32[:, ::1],
float32[::1],
int32[::1],
int32[::1],
float32,
float32,
float32,
float32,
boolean,
int32[:, ::1],
int32[::1],
int32[::1],
int32,
int32,
int32,
int32),
device=True)
def CudaTagContinuousComputeReward(
rewards_arr,
loc_x_arr,
loc_y_arr,
kGridLength,
edge_hit_reward_penalty,
step_rewards_arr,
num_runners_arr,
agent_types_arr,
kDistanceMarginForReward,
kTagRewardForTagger,
kTagPenaltyForRunner,
kEndOfGameRewardForRunner,
kRunnerExitsGameAfterTagged,
still_in_the_game_arr,
done_arr,
env_timestep_arr,
kNumAgents,
kEpisodeLength,
kEnvId,
kThisAgentId,
):
if kThisAgentId < kNumAgents:
# Initialize rewards
rewards_arr[kEnvId, kThisAgentId] = 0.0
if still_in_the_game_arr[kEnvId, kThisAgentId]:
# Add the edge hit penalty and the step rewards / penalties
rewards_arr[kEnvId, kThisAgentId] += edge_hit_reward_penalty[
kEnvId, kThisAgentId
]
rewards_arr[kEnvId, kThisAgentId] += step_rewards_arr[kThisAgentId]
# Ensure that all the agents rewards are initialized before we proceed
# The rewards are only set by the runners, so this pause is necessary
numba_driver.syncthreads()
min_dist = kGridLength * math.sqrt(2.0)
is_runner = not agent_types_arr[kThisAgentId]
if is_runner and still_in_the_game_arr[kEnvId, kThisAgentId]:
for other_agent_id in range(kNumAgents):
is_tagger = agent_types_arr[other_agent_id] == 1
if is_tagger:
dist = ComputeDistance(
loc_x_arr,
loc_y_arr,
kThisAgentId,
other_agent_id,
kEnvId,
kNumAgents,
)
if dist < min_dist:
min_dist = dist
nearest_tagger_id = other_agent_id
if min_dist < kDistanceMarginForReward:
# The runner is tagged
rewards_arr[kEnvId, kThisAgentId] += kTagPenaltyForRunner
rewards_arr[kEnvId, nearest_tagger_id] += kTagRewardForTagger
if kRunnerExitsGameAfterTagged:
still_in_the_game_arr[kEnvId, kThisAgentId] = 0
num_runners_arr[kEnvId] -= 1
# Add end of game reward for runners at the end of the episode
if env_timestep_arr[kEnvId] == kEpisodeLength:
rewards_arr[kEnvId, kThisAgentId] += kEndOfGameRewardForRunner
numba_driver.syncthreads()
if kThisAgentId == 0:
if env_timestep_arr[kEnvId] == kEpisodeLength or num_runners_arr[kEnvId] == 0:
done_arr[kEnvId] = 1
@numba_driver.jit((float32[:, ::1],
float32[:, ::1],
float32[:, ::1],
float32[:, ::1],
float32[:, ::1],
int32[::1],
float32[:, ::1],
float32,
float32,
float32[::1],
float32[::1],
float32,
int32,
float32[::1],
boolean,
int32[:, ::1],
boolean,
float32[:, :, ::1],
int32[:, :, ::1],
float32[:, :, ::1],
int32[:, :, ::1],
int32[:, :, ::1],
float32[:, ::1],
float32[::1],
int32[::1],
float32,
float32,
float32,
float32,
int32[::1],
int32[::1],
int32,
int32))
def NumbaTagContinuousStep(
loc_x_arr,
loc_y_arr,
speed_arr,
direction_arr,
acceleration_arr,
agent_types_arr,
edge_hit_reward_penalty,
kEdgeHitPenalty,
kGridLength,
acceleration_actions_arr,
turn_actions_arr,
kMaxSpeed,
kNumOtherAgentsObserved,
skill_levels_arr,
kRunnerExitsGameAfterTagged,
still_in_the_game_arr,
kUseFullObservation,
obs_arr,
action_indices_arr,
neighbor_distances_arr,
neighbor_ids_sorted_by_distance_arr,
nearest_neighbor_ids,
rewards_arr,
step_rewards_arr,
num_runners_arr,
kDistanceMarginForReward,
kTagRewardForTagger,
kTagPenaltyForRunner,
kEndOfGameRewardForRunner,
done_arr,
env_timestep_arr,
kNumAgents,
kEpisodeLength,
):
kEnvId = numba_driver.blockIdx.x
kThisAgentId = numba_driver.threadIdx.x
kNumActions = 2
if kThisAgentId == 0:
env_timestep_arr[kEnvId] += 1
numba_driver.syncthreads()
assert env_timestep_arr[kEnvId] > 0 and env_timestep_arr[kEnvId] <= kEpisodeLength
if kThisAgentId < kNumAgents:
delta_acceleration = acceleration_actions_arr[
action_indices_arr[kEnvId, kThisAgentId, 0]
]
delta_turn = turn_actions_arr[action_indices_arr[kEnvId, kThisAgentId, 1]]
acceleration_arr[kEnvId, kThisAgentId] += delta_acceleration
direction_arr[kEnvId, kThisAgentId] = (
(direction_arr[kEnvId, kThisAgentId] + delta_turn) % kTwoPi
) * still_in_the_game_arr[kEnvId, kThisAgentId]
if direction_arr[kEnvId, kThisAgentId] < 0:
direction_arr[kEnvId, kThisAgentId] = (
kTwoPi + direction_arr[kEnvId, kThisAgentId]
)
# Speed clipping
speed_arr[kEnvId, kThisAgentId] = (
min(
kMaxSpeed * skill_levels_arr[kThisAgentId],
max(
0.0,
speed_arr[kEnvId, kThisAgentId]
+ acceleration_arr[kEnvId, kThisAgentId],
),
)
* still_in_the_game_arr[kEnvId, kThisAgentId]
)
# Reset acceleration to 0 when speed becomes 0 or
# kMaxSpeed (multiplied by skill levels)
if speed_arr[kEnvId, kThisAgentId] <= 0.0 or speed_arr[
kEnvId, kThisAgentId
] >= (kMaxSpeed * skill_levels_arr[kThisAgentId]):
acceleration_arr[kEnvId, kThisAgentId] = 0.0
loc_x_arr[kEnvId, kThisAgentId] += speed_arr[kEnvId, kThisAgentId] * math.cos(
direction_arr[kEnvId, kThisAgentId]
)
loc_y_arr[kEnvId, kThisAgentId] += speed_arr[kEnvId, kThisAgentId] * math.sin(
direction_arr[kEnvId, kThisAgentId]
)
# Crossing the edge
has_crossed_edge = (
loc_x_arr[kEnvId, kThisAgentId] < 0
or loc_x_arr[kEnvId, kThisAgentId] > kGridLength
or loc_y_arr[kEnvId, kThisAgentId] < 0
or loc_y_arr[kEnvId, kThisAgentId] > kGridLength
)
# Clip x and y if agent has crossed edge
if has_crossed_edge:
if loc_x_arr[kEnvId, kThisAgentId] < 0:
loc_x_arr[kEnvId, kThisAgentId] = 0.0
elif loc_x_arr[kEnvId, kThisAgentId] > kGridLength:
loc_x_arr[kEnvId, kThisAgentId] = kGridLength
if loc_y_arr[kEnvId, kThisAgentId] < 0:
loc_y_arr[kEnvId, kThisAgentId] = 0.0
elif loc_y_arr[kEnvId, kThisAgentId] > kGridLength:
loc_y_arr[kEnvId, kThisAgentId] = kGridLength
edge_hit_reward_penalty[kEnvId, kThisAgentId] = kEdgeHitPenalty
else:
edge_hit_reward_penalty[kEnvId, kThisAgentId] = 0.0
numba_driver.syncthreads()
# --------------------------------
# Generate observation -
# --------------------------------
CudaTagContinuousGenerateObservation(
loc_x_arr,
loc_y_arr,
speed_arr,
direction_arr,
acceleration_arr,
agent_types_arr,
kGridLength,
kMaxSpeed,
kNumOtherAgentsObserved,
still_in_the_game_arr,
kUseFullObservation,
obs_arr,
neighbor_distances_arr,
neighbor_ids_sorted_by_distance_arr,
nearest_neighbor_ids,
env_timestep_arr,
kNumAgents,
kEpisodeLength,
kEnvId,
kThisAgentId,
)
# --------------------------------
# Compute reward -
# --------------------------------
CudaTagContinuousComputeReward(
rewards_arr,
loc_x_arr,
loc_y_arr,
kGridLength,
edge_hit_reward_penalty,
step_rewards_arr,
num_runners_arr,
agent_types_arr,
kDistanceMarginForReward,
kTagRewardForTagger,
kTagPenaltyForRunner,
kEndOfGameRewardForRunner,
kRunnerExitsGameAfterTagged,
still_in_the_game_arr,
done_arr,
env_timestep_arr,
kNumAgents,
kEpisodeLength,
kEnvId,
kThisAgentId,
) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/example_envs/tag_continuous/tag_continuous_step_numba.py | 0.635222 | 0.307225 | tag_continuous_step_numba.py | pypi |
import math
import numpy as np
import numba.cuda as numba_driver
from numba import float32, int32, boolean
try:
from warp_drive.numba_includes.env_config import *
except ImportError:
raise Exception("warp_drive.numba_includes.env_config is not available")
kIndexToActionArr = np.array([[0, 0], [1, 0], [-1, 0], [0, 1], [0, -1]])
@numba_driver.jit((int32[:, ::1],
int32[:, ::1],
float32[:, :, ::1],
int32,
int32[::1],
int32,
int32,
int32,
boolean),
device=True)
def NumbaTagGridWorldGenerateObservation(
states_x_arr,
states_y_arr,
obs_arr,
world_boundary,
env_timestep_arr,
episode_length,
agent_id,
env_id,
use_full_observation
):
is_tagger = (agent_id < wkNumberAgents - 1)
if use_full_observation:
# obs shape is (num_envs, num_agents, 4 * num_agents + 1)
# state shape is (num_envs, num_agents,)
for ag_id in range(wkNumberAgents):
obs_arr[env_id, ag_id, agent_id] = states_x_arr[env_id, agent_id] / float(world_boundary)
obs_arr[env_id, ag_id, agent_id + wkNumberAgents] = states_y_arr[env_id, agent_id] / float(world_boundary)
obs_arr[env_id, ag_id, agent_id + 2 * wkNumberAgents] = 1.0 * int(agent_id == wkNumberAgents - 1)
obs_arr[env_id, ag_id, agent_id + 3 * wkNumberAgents] = 1.0 * int(ag_id == agent_id)
if agent_id == wkNumberAgents - 1:
obs_arr[env_id, ag_id, agent_id + 3 * wkNumberAgents + 1] = \
env_timestep_arr[env_id] / float(episode_length)
else:
# obs shape is (num_envs, num_agents, 6)
# state shape is (num_envs, num_agents,)
distance = numba_driver.shared.array(shape=(wkNumberAgents,), dtype=np.int32)
obs_arr[env_id, agent_id, 0] = states_x_arr[env_id, agent_id] / float(world_boundary)
obs_arr[env_id, agent_id, 1] = states_y_arr[env_id, agent_id] / float(world_boundary)
if is_tagger:
obs_arr[env_id, agent_id, 2] = states_x_arr[env_id, wkNumberAgents - 1] / float(world_boundary)
obs_arr[env_id, agent_id, 3] = states_y_arr[env_id, wkNumberAgents - 1] / float(world_boundary)
distance[agent_id] = math.pow(states_x_arr[env_id, agent_id] -
states_x_arr[env_id, wkNumberAgents - 1], 2) + \
math.pow(states_y_arr[env_id, agent_id] -
states_y_arr[env_id, wkNumberAgents - 1], 2)
numba_driver.syncthreads()
if not is_tagger:
closest_agent_id = 0
min_distance = 2 * world_boundary * world_boundary
for ag_id in range(wkNumberAgents - 1):
if distance[ag_id] < min_distance:
min_distance = distance[ag_id]
closest_agent_id = ag_id
obs_arr[env_id, agent_id, 2] = states_x_arr[env_id, closest_agent_id] / float(world_boundary)
obs_arr[env_id, agent_id, 3] = states_y_arr[env_id, closest_agent_id] / float(world_boundary)
obs_arr[env_id, agent_id, 4] = 1.0 * int(agent_id == wkNumberAgents - 1)
obs_arr[env_id, agent_id, 5] = env_timestep_arr[env_id] / float(episode_length)
@numba_driver.jit((int32[:, ::1],
int32[:, ::1],
int32[:, :, ::1],
int32[::1],
float32[:, ::1],
float32[:, :, ::1],
float32,
float32,
float32,
float32,
boolean,
int32,
int32[::1],
int32))
def NumbaTagGridWorldStep(
states_x_arr,
states_y_arr,
actions_arr,
done_arr,
rewards_arr,
obs_arr,
wall_hit_penalty,
tag_reward_for_tagger,
tag_penalty_for_runner,
step_cost_for_tagger,
use_full_observation,
world_boundary,
env_timestep_arr,
episode_length
):
# This implements Tagger on a discrete grid.
# There are N taggers and 1 runner.
# The taggers try to tag the runner.
kEnvId = numba_driver.blockIdx.x
kThisAgentId = numba_driver.threadIdx.x
is_tagger = (kThisAgentId < wkNumberAgents - 1)
num_total_tagged = numba_driver.shared.array(shape=(1,), dtype=np.int32)
kAction = numba_driver.const.array_like(kIndexToActionArr)
# Increment time ONCE -- only 1 thread can do this.
# Initialize the shared variable that counts how many runners are tagged.
if kThisAgentId == 0:
env_timestep_arr[kEnvId] += 1
num_total_tagged[0] = 0
numba_driver.syncthreads()
assert 0 < env_timestep_arr[kEnvId] <= episode_length
rewards_arr[kEnvId, kThisAgentId] = 0.0
__rew = 0.0
# -----------------------------------
# Movement
# -----------------------------------
# Take action and check boundary cost.
# Map action index to the real action space.
ac_index = actions_arr[kEnvId, kThisAgentId, 0]
states_x_arr[kEnvId, kThisAgentId] = states_x_arr[kEnvId, kThisAgentId] + kAction[ac_index, 0]
states_y_arr[kEnvId, kThisAgentId] = states_y_arr[kEnvId, kThisAgentId] + kAction[ac_index, 1]
if states_x_arr[kEnvId, kThisAgentId] < 0:
states_x_arr[kEnvId, kThisAgentId] = 0
__rew -= wall_hit_penalty
elif states_x_arr[kEnvId, kThisAgentId] > world_boundary:
states_x_arr[kEnvId, kThisAgentId] = world_boundary
__rew -= wall_hit_penalty
if states_y_arr[kEnvId, kThisAgentId] < 0:
states_y_arr[kEnvId, kThisAgentId] = 0
__rew -= wall_hit_penalty
elif states_y_arr[kEnvId, kThisAgentId] > world_boundary:
states_y_arr[kEnvId, kThisAgentId] = world_boundary
__rew -= wall_hit_penalty
# make sure all agents have finished their movements
numba_driver.syncthreads()
# -----------------------------------
# Check tags
# -----------------------------------
if is_tagger:
if states_x_arr[kEnvId, kThisAgentId] == states_x_arr[kEnvId, wkNumberAgents - 1] and \
states_y_arr[kEnvId, kThisAgentId] == states_y_arr[kEnvId, wkNumberAgents - 1]:
numba_driver.atomic.add(num_total_tagged, 0, 1)
# make sure all agents have finished tag count
numba_driver.syncthreads()
# -----------------------------------
# Rewards
# -----------------------------------
if is_tagger:
if num_total_tagged[0] > 0:
__rew += tag_reward_for_tagger
else:
__rew -= step_cost_for_tagger
else:
if num_total_tagged[0] > 0:
__rew -= tag_penalty_for_runner
else:
__rew += step_cost_for_tagger
rewards_arr[kEnvId, kThisAgentId] = __rew
# -----------------------------------
# Generate observation.
# -----------------------------------
# (x, y, tagger or runner, current_agent_or_not)
NumbaTagGridWorldGenerateObservation(states_x_arr,
states_y_arr,
obs_arr,
world_boundary,
env_timestep_arr,
episode_length,
kThisAgentId,
kEnvId,
use_full_observation)
# -----------------------------------
# End condition
# -----------------------------------
# Determine if we're done (the runner is tagged or not).
if env_timestep_arr[kEnvId] == episode_length or num_total_tagged[0] > 0:
if kThisAgentId == 0:
done_arr[kEnvId] = 1 | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/example_envs/tag_gridworld/tag_gridworld_step_numba.py | 0.474875 | 0.35855 | tag_gridworld_step_numba.py | pypi |
Copyright (c) 2021, salesforce.com, inc. \
All rights reserved. \
SPDX-License-Identifier: BSD-3-Clause. \
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause.
# Introduction
In this tutorial, we will describe how to implement your own environment in CUDA C, and integrate it with WarpDrive for simulating the environment dynamics on the GPU.
In case you haven't familiarized yourself with WarpDrive and its PyCUDA backend, please see the other tutorials:
- [WarpDrive basics for PyCUDA](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
- [WarpDrive sampler for PyCUDA](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
- [WarpDrive reset and log](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
We follow the OpenAI [gym](https://gym.openai.com/) style. Each simulation should have `__init__`, `reset` and `step` methods.
To use WarpDrive, you only need to implement the `step()` method in CUDA C. WarpDrive can automatically reinitialize the environment after it's done, i.e., at every `reset`, using the environment `Wrapper` class. This class takes your CUDA C `step()` function and manages the simulation flow on the GPU.
You can then do RL! See the [next tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb) to learn how to perform end-to-end multi-agent RL on a single GPU with WarpDrive.
# Building Simulations in CUDA C
CUDA C is an extension of C. See [this Nvidia blog](https://developer.nvidia.com/blog/even-easier-introduction-cuda/) and the [CUDA documentation](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html) for more info and CUDA tutorials.
For our initial release of WarpDrive, we focus on relatively simple simulations. A key reason is that CUDA C can give you significantly faster simulations, but requires careful memory management, among other things.
To make sure that everything works properly, one approach is to first implement your simulation logic in Python. You can then implement the same logic in CUDA C and check the simulation behaviors are the same.
To help with this process, we provide an *environment consistency checker* method to do consistency tests between Python and CUDA C simulations.
This workflow helps to familiarize yourself with CUDA C and works well for relatively simple simulations.
# Case Study: Building a CUDA Version of Tag
Within the WarpDrive package, you can find the source code for the discrete and continuous versions of Tag.
- [Tag (GridWorld)](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_gridworld/tag_gridworld.py)
- [Tag (Continuous)](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_continuous/tag_continuous.py)
Tag is a simple multi-agent game involving 'taggers' and 'runners'. The taggers chase and try to tag the runners. Tagged runners leave the game. Runners try to get away from the taggers.
Next, we'll use the *continuous* version of Tag to explain some important elements of building CUDA C simulations.
# Managing CUDA Simulations from Python using WarpDrive
We begin with the Python version of the continuous version [Tag](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_continuous/tag_continuous.py). The simulation follows the [gym](https://gym.openai.com/) format, implementing `reset` and `step` methods. We now detail all the steps necessary to transform the `step` function into [CUDA code](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_continuous/tag_continuous_step_pycuda.cu) that can be run on a GPU. Importantly, WarpDrive lets you call these CUDA methods from Python, so you can design your own RL workflow entirely in Python.
## 1. Add data to be pushed to GPU via the *DataFeed* class
First, we need to push all the data relevant to performing the reset() and step() functions on the GPU. In particular, there are two methods that need to be added to the environment
```python
def get_data_dictionary(self):
data_dict = DataFeed()
...
return data_dict
```
and
```python
def get_tensor_dictionary(self):
data_dict = DataFeed()
...
return data_dict
```
WarpDrive automatically handles pushing the data arrays provided within these methods to the GPU global memory. The data dictionary will be used to push data that will not require to be modified during training - once pushed into the GPU, this data will persist on the GPU, and not be modified. The tensor dictionary comprises data that is directly accessible by PyTorch, and is handy for data that needs to be modified during training. In each of the aforementioned data_dictionary methods, the return type needs to be a `DataFeed` class, which is essentially a dictionary, with additional attributes.
With the help of the DataFeed class, we can push arrays that are created when the environment is initialized, and needs to be re-initialized at every reset.
```python
data_dict = DataFeed()
for feature in ["loc_x", "loc_y", "speed", "direction", "acceleration"]:
data_dict.add_data(
name=feature,
data=self.global_state[feature][0],
save_copy_and_apply_at_reset=True,
)
```
Importantly, notice the `save_copy_and_apply_at_reset` flag set to True. This instructs WarpDrive to make a copy of this data and automatically re-initialize the data array to that exact value at each reset.
We can also push environment configuration parameters, for example,
```python
data_dict.add_data(
name="tag_reward_for_tagger", data=self.tag_reward_for_tagger
)
data_dict.add_data(
name="distance_margin_for_reward", data=self.distance_margin_for_reward
)
```
and any auxiliary variables that will be useful for modeling the step function dynamics:
```python
data_dict.add_data(
name="neighbor_distances",
data=np.zeros((self.num_agents, self.num_agents - 1), dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
```
**Note**: for convenience, the data feed object also supports pushing multiple arrays at once via the `add_data_list()` API, see the [Tag (GridWorld)](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_gridworld/tag_gridworld.py#L337) code for an example.
An important point to note is that CUDA C always uses **32-bit precision**, so it's good to cast all the numpy arrays used in the Python simulation to 32-bit precision as well, before you push them.
## 2. Invoke the CUDA version of *step* in Python
After all the relevant data is added to the data dictionary, we need to invoke the CUDA C kernel code for stepping through the environment (when `self.use_cuda` is `True`). The syntax to do this is as follows
```python
if self.env_backend == "pycuda":
self.cuda_step(
self.cuda_data_manager.device_data("loc_x"),
self.cuda_data_manager.device_data("loc_y"),
self.cuda_data_manager.device_data("speed"),
...
)
```
where you need to add all the keys of the data dictionary (in no particular order) as arguments to the step function. Also, remember to add the imperative `observations`, `sampled_actions` and `rewards` data, respectively as
```python
...
self.cuda_data_manager.device_data("observations"),
self.cuda_data_manager.device_data("sampled_actions"),
self.cuda_data_manager.device_data("rewards"),
...
```
It will also be very useful to add the following reserved keywords: `_done_`, `_timestep_` along with `n_agents`, `episode_length`, `block` and `grid`.
```python
...
self.cuda_data_manager.device_data("_done_"),
self.cuda_data_manager.device_data("_timestep_"),
self.cuda_data_manager.meta_info("n_agents"),
self.cuda_data_manager.meta_info("episode_length"),
block=self.cuda_function_manager.block,
grid=self.cuda_function_manager.grid,
```
Note that `n_agents` and `episode_length` are part of the meta information for the data manager, so they can be directly referenced from therein. In particular, the `block` and `grid` arguments are essential to have the CUDA implementation determine how many threads and blocks to activate and use for the environment simulation.
WarpDrive also supports feeding multiple arguments at once via the `CUDAFunctionFeed` class, see the [Tag (GridWorld)](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_gridworld/tag_gridworld.py#L355) code for an example.
## 3. Write the *step()* method in CUDA C
The most laborious part of this exercise is actually writing out the step function in CUDA C. Importantly ,this function will need to be named `Cuda<env.name>Step`, so that WarpDrive knows it represents the CUDA version of the step function for the particular environment. The order of the arguments should naturally follow the order written out where the CUDA C kernel is invoked.
```C
__global__ void CudaTagContinuousStep(
float* loc_x_arr,
float* loc_y_arr,
float* speed_arr,
...
```
Note the keyword `__global__` used on the increment function. Global functions are also called "kernels" - they are functions you may call from the host. In our implementation of the CUDA C [code](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_continuous/tag_continuous_step.cu) for the tag environment, you will also notice there's also the keyword `__device__` (for example, `__device__ void CudaTagContinuousGenerateObservation()` and `__device__ void CudaTagContinuousComputeReward()`) for functions that cannot be called from the host, but may only be called from other device or global functions.
Also, note the `void` return type - CUDA step functions don't need to return anything, but all the data arrays are modified in place.
While writing out the step code in CUDA C, the environment logic follows the same logic as in the Python step code. Remember that each thread only acts on a single agent, and for a single environment. The code excerpt below is a side-by-side comparison of Python and CUDA C code for updating the agents' x and y location co-ordinates.
On the CUDA C side, we can simplify and make the code mode readable by using constants such as `kThisAgentId` and `kEnvId` (we have used this [naming style guide](https://google.github.io/styleguide/cppguide.html#General_Naming_Rules)) to indicate the thread and block indices, respectively. As you may have noticed by now, since each thread only writes to a specific index of the data array, understanding array indexing is critical.
<table align="left">
<tr>
<th> Python </th>
<th> CUDA C </th>
</tr>
<td>
```python
loc_x_curr_t = loc_x_prev_t + speed_curr_t * np.cos(dir_curr_t)
loc_y_curr_t = loc_y_prev_t + speed_curr_t * np.sin(dir_curr_t)
```
</td>
<td>
```c
const int kThisAgentId = threadIdx.x;
const int kEnvId = blockIdx.x;
if (kThisAgentId < kNumAgents) {
const int kThisAgentArrayIdx = kEnvId * kNumAgents + kThisAgentId;
loc_x_arr[kThisAgentArrayIdx] += speed_arr[kThisAgentArrayIdx] * cos(direction_arr[kThisAgentArrayIdx]);
loc_y_arr[kThisAgentArrayIdx] += speed_arr[kThisAgentArrayIdx] * sin(direction_arr[kThisAgentArrayIdx]);
}
```
</td>
</table>
## 4. Put together as an Environment class
To use an existing Python Environment with
WarpDrive, one needs to add two augmentations. First,
a get data dictionary() method that returns a dictionary-like
DataFeed object with data arrays and parameters that should
be pushed to the GPU. Second, the step-function should call
the cuda step with the data arrays that the CUDA C step
function should have access to.
In general, we can use just a single (*dual-mode*) environment class that can run both the Python and the CUDA C modes of the environment code on a GPU. The `env_backend = "pycuda"` enables switching between those modes. Note that the environment class will need to subclass `CUDAEnvironmentContext`, which essentially adds attributes to the environment (such as the `cuda_data_manager` and `cuda_function_manager`) that are required for running on a GPU. This also means that the environment itself can be stepped through only on a GPU. Please refer to the [Tag (Continuous)](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_continuous/tag_continuous.py) for a detailed working example.
```python
"""
Dual mode Environment class ().
"""
class MyDualModeEnvironment(CUDAEnvironmentContext):
...
def get_data_dictionary(self):
data_dict = DataFeed()
...
return data_dict
def get_tensor_dictionary(self):
tensor_dict = DataFeed()
...
return tensor_dict
def reset(self):
# reset for CPU environment
...
def step(self, actions=None):
args = [YOUR_CUDA_STEP_ARGUMENTS]
if self.use_cuda:
self.cuda_step(
*self.cuda_step_function_feed(args),
block=self.cuda_function_manager.block,
grid=self.cuda_function_manager.grid,
)
return None
else:
...
return obs, rew, done, info
```
Alternatively, if you wish to run the Python environment in a CPU-only hardware (where WarpDrive cannot be installed), we suggest you treat the environment class as a base class (e.g. [Tag (GridWorld)](https://github.com/salesforce/warp-drive/blob/master/example_envs/tag_gridworld/tag_gridworld.py#L23)) and move the code augmentations needed for WarpDrive to a derived class (e.g. [Tag (GridWorld)](https://github.com/salesforce/warp-drive/blob/master/example_envs/tag_gridworld/tag_gridworld.py#L318)) Accordingly, you can run the Python environment without having to install WarpDrive, while the CUDA C mode of the environment will need to run only on a GPU (with WarpDrive installed).
```python
"""
The CUDA environment class is derived from the CPU environment class.
"""
class MyCPUEnvironment:
...
def reset(self):
# reset for CPU environment
...
def step(self):
# step for CPU environment
...
return obs, rew, done, info
class MyCUDAEnvironment(MyCPUEnvironment, CUDAEnvironmentContext):
...
def get_data_dictionary(self):
data_dict = DataFeed()
...
return data_dict
def get_tensor_dictionary(self):
tensor_dict = DataFeed()
...
return tensor_dict
def step(self):
# overwrite the CPU execution in the CPU base class
self.cuda_step(
*self.cuda_step_function_feed(args),
block=self.cuda_function_manager.block,
grid=self.cuda_function_manager.grid,
)
return None
```
## 5. The EnvWrapper Class
Once the CUDA C environment implementation is complete, WarpDrive provides an environment wrapper class to help launch the simulation on the CPU or the GPU. This wrapper determines whether the simulation needs to be launched on the CPU or the GPU (via the `use_cuda` flag), and proceeds accordingly. If the environment runs on the CPU, the `reset` and `step` calls also occur on the CPU. If the environment runs on the GPU, only the first `reset` happens on the CPU, all the relevant data is copied over the GPU after, and the subsequent steps (and resets) all happen on the GPU. In the latter case, the environment wrapper also uses the `num_envs` argument to instantiate multiple replicas of the environment on the GPU.
Additionally, the environment wrapper handles all the tasks required to run the environment on the GPU:
- Determines the environment's observation and action spaces
- Initializes the CUDA data and function managers for the environment
- Registers the CUDA version of the step() function
- Pushes the data listed in the data dictionary and tensor dictionary attributes of the environment, and repeats them across the environment dimension, if necessary.
- Automatically resets each environment when it is done.
### Register the CUDA environment
Here we have some more details about how to use EnvWrapper to identify and build your environment automatically once the CUDA C step environment is ready.
#### 1. Default Environment
You shall register your default environment in `warp_drive/utils/pycuda_utils/misc` and the function `get_default_env_directory()`. There, you can simply provide the path to your CUDA environment source code. Please remember that the register uses the environment name defined in your environment class as the key so EnvWrapper class can link it to the right environment.
The **FULL_PATH_TO_YOUR_ENV_SRC** can be any path inside or outside WarpDrive. For example, you can develop your own CUDA step function and environment in your codebase and register right here.
```python
envs = {
"TagGridWorld": f"{get_project_root()}/example_envs/tag_gridworld/tag_gridworld_step.cu",
"TagContinuous": f"{get_project_root()}/example_envs/tag_continuous/tag_continuous_step_pycuda.cu",
"YOUR_ENVIRONMENT": "FULL_PATH_TO_YOUR_ENV_CUDA_SRC",
}
```
Usually we do not suggest you use this "hard" way because it integrates your environment directly into WarpDrive. So far, we have our Tag games as benchmarks registered right there as we regard them as part of WarpDrive codebase.
#### 2. Customized Environment
You can register a customized environment by using **EnvironmentRegistrar**. Please note that the customized environment has the higher priority than the default environments, i.e., if two environments (one is registered as customized, the other is the default) take the same name, the customized environment will be loaded. However, it is recommended to not have any environment name conflict in any circumstance.
```python
from warp_drive.utils.env_registrar import EnvironmentRegistrar
import Your_Env_Class
env_registrar = EnvironmentRegistrar()
env_registrar.add_cuda_env_src_path(Your_Env_Class.name, "FULL_PATH_TO_YOUR_ENV_CUDA_SRC", env_backend="pycuda")
env_wrapper = EnvWrapper(
Your_Env_Class(**run_config["env"]),
num_envs=num_envs,
env_backend="numba",
env_registrar=env_registrar)
```
Now, inside the EnvWrapper, function managers will be able to feed the `self.num_env` and `self.num_agents` to the CUDA compiler at compile time to build and load a unique CUDA environment context for all the tasks.
## 6. Environment Consistency Checker
Given the environment is implemented in both Python and CUDA C (for running on the CPU and GPU, respectively), please use **EnvironmentCPUvsGPU** class to test the consistency of your implementation. The module will instantiate two separate environment objects (with the `use_cuda` flag set to True and False), step through `num_episodes` episodes (with the same actions) and determine if there are inconsistencies in terms of the generated states, rewards or done flags.
Here is an example for the dual mode environment class. Please refer the to [Tag (Continuous) test](https://www.github.com/salesforce/warp-drive/blob/master/tests/example_envs/test_tag_continuous.py) for a detailed working example.
```python
from warp_drive.env_cpu_gpu_consistency_checker import EnvironmentCPUvsGPU
from warp_drive.utils.env_registrar import EnvironmentRegistrar
import Your_Dual_Mode_Env_Class
env_registrar = EnvironmentRegistrar()
env_registrar.add_cuda_env_src_path(Your_Dual_Mode_Env_Class.name, "FULL_PATH_TO_YOUR_ENV_CUDA_SRC")
env_configs = {
"test1": {
"num_agents": 4,
}
}
testing_class = EnvironmentCPUvsGPU(
dual_mode_env_class=Your_Dual_Mode_Env_Class,
env_configs=env_configs,
num_envs=2,
num_episodes=2,
env_registrar=env_registrar,
)
testing_class.test_env_reset_and_step()
```
And the following is an example for the parent-child environment classes. It is actually the same as the dual mode, the only difference is that `EnvironmentCPUvsGPU` will take two corresponding classes. Please refer to the [Tag (GridWorld) test](https://www.github.com/salesforce/warp-drive/blob/master/tests/example_envs/test_tag_gridworld.py) for a detailed working example.
```python
from warp_drive.env_cpu_gpu_consistency_checker import EnvironmentCPUvsGPU
from warp_drive.utils.env_registrar import EnvironmentRegistrar
import Your_CPU_Env_Class, Your_GPU_Env_Class
env_registrar = EnvironmentRegistrar()
env_registrar.add_cuda_env_src_path(Your_GPU_Env_Class.name, "FULL_PATH_TO_YOUR_ENV_CUDA_SRC")
env_configs = {
"test1": {
"num_agents": 4,
}
}
testing_class = EnvironmentCPUvsGPU(
cpu_env_class=Your_CPU_Env_Class,
cuda_env_class=Your_GPU_Env_Class,
env_configs=env_configs,
num_envs=2,
num_episodes=2,
env_registrar=env_registrar,
)
testing_class.test_env_reset_and_step()
```
The `EnvironmentCPUvsGPU` class also takes in a few optional arguments that will need to be correctly set, if required.
- `use_gpu_testing_mode`: a flag to determine whether to simply load the cuda binaries (.cubin) or compile the cuda source code (.cu) each time to create a binary.` Defaults to False.
- `env_registrar`: the EnvironmentRegistrar object. It provides the customized env info (like src path) for the build.
- `env_wrapper`: allows for the user to provide their own EnvWrapper.
- `policy_tag_to_agent_id_map`: a dictionary mapping policy tag to agent ids.
- `create_separate_placeholders_for_each_policy`: a flag indicating whether there exist separate observations, actions and rewards placeholders, for each policy, as designed in the step function. The placeholders will be used in the step() function and during training. When there's only a single policy, this flag will be False. It can also be True when there are multiple policies, yet all the agents have the same obs and action space shapes, so we can share the same placeholder. Defaults to False.
- `obs_dim_corresponding_to_num_agents`: this is indicative of which dimension in the observation corresponds to the number of agents, as designed in the step function. It may be "first" or "last". In other words, observations may be shaped (num_agents, *feature_dim) or (*feature_dim, num_agents). This is required in order for WarpDrive to process the observations correctly. This is only relevant when a single obs key corresponds to multiple agents. Defaults to "first".
## 7. Unittest WarpDrive
The build and test can be done automatically by directly go to the CUDA source code folder and make
`cd warp_drive/cuda_includes; make compile-test`
Or, you can run `python warp_drive/utils/run_unittests_pycuda.py`
# Important CUDA C Concepts
Writing CUDA programs requires basic knowledge of C and how CUDA C extends C. Here's a [quick reference](https://learnxinyminutes.com/docs/c/) to see the syntax of C.
For many simulations, basic C concepts should get you very far. However, you could make very complex simulations -- the sky is the limit!
Below, we'll discuss two important CUDA C concepts -- we're constantly planning to add more materials and tools in the future to facilitate developing CUDA simulations.
## Array Indexing
As described in the first [tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1-warp_drive_basics.ipynb#Array-indexing), CUDA stores arrays in a C-contiguous or a row-major fashion;
In general, it helps to set up some indexing constants as you develop code, so you can reuse them across your code. For example, the index for a specific agent id `kThisAgentId` ($0 \leq \text{kThisAgentId} < \text{NumAgents}$) in the location arrays (shaped (`NumEnvs, NumAgents`)) would be
```C
const int kThisAgentArrayIdx = kEnvId * kNumAgents + kThisAgentId;
```
and this index can be reused across different contexts.
Note: to facilitate simulation development, we also created a `get_flattened_array_index` helper function to provide the flattened array index; please see [Tag (GridWorld)](https://github.com/salesforce/warp-drive/blob/master/example_envs/tag_gridworld/tag_gridworld_step.cu#L151) for a working example.
## __syncthreads
Another keyword that is useful to understand in the context of multi-agent simulations is `__syncthreads()`. While all the agents can operate fully in parallel, there are often operations that may need to be performed sequentially by the agents or only by one of the agents. For such cases, we may use **__syncthreads()** command, a thread block-level synchronization barrier. All the threads will wait for all the threads in the block to reach that point, until processing further.
```C
// Increment time ONCE -- only 1 thread can do this.
if (kThisAgentId == 0) {
env_timestep_arr[kEnvId] += 1;
}
// Wait here until timestep has been updated
__syncthreads();
```
# Debugging and Checking Consistency
Once you are done building your environment, you may use the `env_cpu_gpu_consistency_checker` function in WarpDrive to ensure the Python and CUDA C versions of the environment are logically consistent with one another. The consistency tests run across two full episode lengths (to ensure consistent behavior even beyond the point when the environments are reset), and ensure that the observations, rewards, and done flags match one another. For catching syntax errors, the C compiler is pretty good at pointing out the exact error and the line number. Often, to figure out deeper issues with the code, `printf` is your best friend.
# Learn More and Explore our Tutorials!
And that's it for this tutorial. Good luck building your environments.
Once you are done building, see our next [tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb) on training your environment with WarpDrive and the subsequent [tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md) on scaling up training.
For your reference, all our tutorials are here:
1. [WarpDrive basics(intro and pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
2. [WarpDrive basics(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
3. [WarpDrive sampler(pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
4. [WarpDrive sampler(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
5. [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
6. [Create custom environments (pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
7. [Create custom environments (numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
8. [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
9. [Scaling Up training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md)
10. [Training with WarpDrive + Pytorch Lightning](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb)
```python
```
| /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/tutorials/tutorial-4.a-create_custom_environments_pycuda.md | 0.524395 | 0.939637 | tutorial-4.a-create_custom_environments_pycuda.md | pypi |
Copyright (c) 2021, salesforce.com, inc.\
All rights reserved.\
SPDX-License-Identifier: BSD-3-Clause\
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Get started quickly with end-to-end multi-agent RL using WarpDrive! This shows a basic example to create a simple multi-agent Tag environment and get training. For more configuration options and indepth explanations, check out the other tutorials and source code.
**Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/simple-end-to-end-example.ipynb)!**
# ⚠️ PLEASE NOTE:
This notebook runs on a GPU runtime.\
If running on Colab, choose Runtime > Change runtime type from the menu, then select `GPU` in the 'Hardware accelerator' dropdown menu.
# Dependencies
You can install the warp_drive package using
- the pip package manager, OR
- by cloning the warp_drive package and installing the requirements.
We will install the latest version of WarpDrive using the pip package manager.
```
! pip install --quiet "rl-warp-drive>=2.4"
import torch
assert torch.cuda.device_count() > 0, "This notebook needs a GPU to run!"
from example_envs.tag_continuous.tag_continuous import TagContinuous
from warp_drive.env_wrapper import EnvWrapper
from warp_drive.training.trainer import Trainer
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR
import logging
logging.getLogger().setLevel(logging.ERROR)
```
# Environment, Training, and Model Hyperparameters
```
# Specify a set of run configurations for your experiments.
# Note: these override some of the default configurations in 'warp_drive/training/run_configs/default_configs.yaml'.
run_config = dict(
name="tag_continuous",
# Environment settings
env=dict(
num_taggers=5,
num_runners=20,
episode_length=100,
seed=1234,
use_full_observation=False,
num_other_agents_observed=10,
tagging_distance=0.02,
),
# Trainer settings
trainer=dict(
num_envs=100, # number of environment replicas (number of GPU blocks used)
train_batch_size=10000, # total batch size used for training per iteration (across all the environments)
num_episodes=5000, # total number of episodes to run the training for (can be arbitrarily high!)
),
# Policy network settings
policy=dict(
runner=dict(
to_train=True, # flag indicating whether the model needs to be trained
algorithm="A2C", # algorithm used to train the policy
gamma=0.98, # discount rate
lr=0.005, # learning rate
model=dict(
type="fully_connected", fc_dims=[256, 256], model_ckpt_filepath=""
), # policy model settings
),
tagger=dict(
to_train=True,
algorithm="A2C",
gamma=0.98,
lr=0.002,
model=dict(
type="fully_connected", fc_dims=[256, 256], model_ckpt_filepath=""
),
),
),
# Checkpoint saving setting
saving=dict(
metrics_log_freq=10, # how often (in iterations) to print the metrics
model_params_save_freq=5000, # how often (in iterations) to save the model parameters
basedir="/tmp", # base folder used for saving
name="continuous_tag", # experiment name
tag="example", # experiment tag
),
)
```
# End-to-End Training Loop
```
# Create a wrapped environment object via the EnvWrapper
# Ensure that env_backend is set to be "pycuda" or "numba" (in order to run on the GPU)
env_wrapper = EnvWrapper(
TagContinuous(**run_config["env"]),
num_envs=run_config["trainer"]["num_envs"],
env_backend='pycuda',
)
# Agents can share policy models: this dictionary maps policy model names to agent ids.
policy_tag_to_agent_id_map = {
"tagger": list(env_wrapper.env.taggers),
"runner": list(env_wrapper.env.runners),
}
# Create the trainer object
trainer = Trainer(
env_wrapper=env_wrapper,
config=run_config,
policy_tag_to_agent_id_map=policy_tag_to_agent_id_map,
)
# Perform training!
trainer.train()
# Shut off gracefully
trainer.graceful_close()
```
# Learn more and explore our tutorials
For your reference, all our tutorials are here:
1. [WarpDrive basics(intro and pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
2. [WarpDrive basics(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
3. [WarpDrive sampler(pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
4. [WarpDrive sampler(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
5. [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
6. [Create custom environments (pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
7. [Create custom environments (numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
8. [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
9. [Scaling Up training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md)
10. [Training with WarpDrive + Pytorch Lightning](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb)
```
```
| /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/tutorials/simple-end-to-end-example.ipynb | 0.808446 | 0.776411 | simple-end-to-end-example.ipynb | pypi |
Copyright (c) 2021, salesforce.com, inc.\
All rights reserved.\
SPDX-License-Identifier: BSD-3-Clause\
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)!
# ⚠️ PLEASE NOTE:
This notebook runs on a GPU runtime.\
If running on Colab, choose Runtime > Change runtime type from the menu, then select `GPU` in the 'Hardware accelerator' dropdown menu.
# Introduction
In this tutorial, we describe how to
- Use the WarpDrive framework to perform end-to-end training of multi-agent reinforcement learning (RL) agents.
- Visualize the behavior using the trained policies.
In case you haven't familiarized yourself with WarpDrive, please see the other tutorials we have prepared for you
1. [WarpDrive basics(intro and pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
2. [WarpDrive basics(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
3. [WarpDrive sampler(pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
4. [WarpDrive sampler(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
5. [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
Please also see our tutorials
6. [Create custom environments (pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
7. [Create custom environments (numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
on creating your own RL environment in CUDA C or Numba. Once you have your own environment, this tutorial explains how to integrate it with the WarpDrive framework to perform training.
# Dependencies
You can install the warp_drive package using
- the pip package manager, OR
- by cloning the warp_drive package and installing the requirements.
We will install the latest version of WarpDrive using the pip package manager.
```
! pip install --quiet "rl-warp-drive>=2.4" "ffmpeg-python"
import torch
assert torch.cuda.device_count() > 0, "This notebook needs a GPU to run!"
from warp_drive.env_wrapper import EnvWrapper
from warp_drive.training.trainer import Trainer
from warp_drive.utils.common import get_project_root
from example_envs.tag_continuous.tag_continuous import TagContinuous
from example_envs.tag_continuous.generate_rollout_animation import (
generate_tag_env_rollout_animation,
)
from gym.spaces import Discrete, MultiDiscrete
from IPython.display import HTML
import yaml
import numpy as np
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR
import logging
logging.getLogger().setLevel(logging.ERROR)
```
# Training the continuous version of Tag with WarpDrive
We will now explain how to train your environments using WarpDrive in just a few steps. For the sake of exposition, we consider the continuous version of Tag.
For your reference, there is also an example end-to-end RL training script [here](https://github.com/salesforce/warp-drive/blob/master/warp_drive/training/example_training_script_pycuda.py) that contains all the steps below. It can be used to set up your own custom training pipeline. Invoke training by using
```shell
python warp_drive/training/example_training_script_pycuda.py --env <ENV-NAME>
```
where `<ENV-NAME>` can be `tag_gridworld` or `tag_continuous` (or any new env that you build).
If you want to use Numba as the backend, simply invoke training by using
```shell
python warp_drive/training/example_training_script_numba.py --env <ENV-NAME>
```
## Step 1: Specify a set of run configurations for your experiments.
In order to run the training for these environments, we first need to specify a *run config*, which comprises the set of environment, training, and model parameters.
Note: there are also some default configurations in 'warp_drive/training/run_configs/default_configs.yaml', and the run configurations you provide will override them.
For this tutorial, we will use the configuration [here](assets/tag_continuous_training/run_config.yaml). Specifically, we'll use $5$ taggers and $100$ runners in a $20 \times 20$ square grid. The taggers and runners have the same skill level, i.e., the runners can move just as fast as the taggers.
The sequence of snapshots below shows a sample realization of the game with randomly chosen agent actions. The $5$ taggers are marked in pink, while the $100$ blue agents are the runners. The snapshots are taken at 1) the beginning of the episode ($t=0$), 2) timestep $250$, and 3) end of the episode ($t=500$). Only $20\%$ of the runners remain at the end of the episode.
<img src="assets/tag_continuous_training/t=0.png" width="250" height="250"/> <img src="assets/tag_continuous_training/t=250.png" width="250" height="250"/> <img src="assets/tag_continuous_training/t=500.png" width="250" height="250"/>
We train the agents using $200$ environments or simulations running in parallel. With WarpDrive, each simulation runs on sepate GPU blocks.
There are two separate policy networks used for the tagger and runner agents. Each network is a fully-connected model with two layers each of $256$ dimensions. We use the Advantage Actor Critic (A2C) algorithm for training. WarpDrive also currently provides the option to use the Proximal Policy Optimization (PPO) algorithm instead.
```
# Load the run config.
# Here we show an example configures
CFG = """
# Sample YAML configuration for the tag continuous environment
name: "tag_continuous"
# Environment settings
env:
num_taggers: 5
num_runners: 100
grid_length: 20
episode_length: 500
max_acceleration: 0.1
min_acceleration: -0.1
max_turn: 2.35 # 3*pi/4 radians
min_turn: -2.35 # -3*pi/4 radians
num_acceleration_levels: 10
num_turn_levels: 10
skill_level_runner: 1
skill_level_tagger: 1
seed: 274880
use_full_observation: False
runner_exits_game_after_tagged: True
num_other_agents_observed: 10
tag_reward_for_tagger: 10.0
tag_penalty_for_runner: -10.0
step_penalty_for_tagger: -0.00
step_reward_for_runner: 0.00
edge_hit_penalty: -0.0
end_of_game_reward_for_runner: 1.0
tagging_distance: 0.02
# Trainer settings
trainer:
num_envs: 400 # number of environment replicas
train_batch_size: 10000 # total batch size used for training per iteration (across all the environments)
num_episodes: 500 # number of episodes to run the training for (can be arbitrarily high)
# Policy network settings
policy: # list all the policies below
runner:
to_train: True # flag indicating whether the model needs to be trained
algorithm: "A2C" # algorithm used to train the policy
gamma: 0.98 # discount rate gamms
lr: 0.005 # learning rate
vf_loss_coeff: 1 # loss coefficient for the value function loss
entropy_coeff:
- [0, 0.5]
- [2000000, 0.05]
model: # policy model settings
type: "fully_connected" # model type
fc_dims: [256, 256] # dimension(s) of the fully connected layers as a list
model_ckpt_filepath: "" # filepath (used to restore a previously saved model)
tagger:
to_train: True
algorithm: "A2C"
gamma: 0.98
lr: 0.002
vf_loss_coeff: 1
model:
type: "fully_connected"
fc_dims: [256, 256]
model_ckpt_filepath: ""
# Checkpoint saving setting
saving:
metrics_log_freq: 100 # how often (in iterations) to print the metrics
model_params_save_freq: 5000 # how often (in iterations) to save the model parameters
basedir: "/tmp" # base folder used for saving
name: "tag_continuous"
tag: "100runners_5taggers"
"""
run_config = yaml.safe_load(CFG)
```
## Step 2: Create the environment object using WarpDrive's envWrapper.
### Important! Since v2.0, WarpDrive is fully supporting Numba as the backend.
Before v1.8: Ensure that 'use_cuda' is set to True (in order to run the simulation on the GPU).
Since v1.8: Ensure that 'env_backend' is set to 'pycuda' or 'numba' below (in order to run the simulation on the GPU) depending on your backend.
```
env_wrapper = EnvWrapper(
env_obj=TagContinuous(**run_config["env"]),
num_envs=run_config["trainer"]["num_envs"],
env_backend="pycuda",
)
```
Creating the env wrapper initializes the CUDA data manager and pushes some reserved data arrays to the GPU. It also initializes the CUDA function manager, and loads some WarpDrive library CUDA kernels.
## Step 3: Specify a mapping from the policy to agent indices.
Next, we will need to map each trainable policy to the agent indices that are using it. As such, we have the tagger and runner policies, and we will map those to the corresponding agents.
```
policy_tag_to_agent_id_map = {
"tagger": list(env_wrapper.env.taggers),
"runner": list(env_wrapper.env.runners),
}
```
Note that if you wish to use just a single policy across all the agents (or if you wish to use many other policies), you will need to update the run configuration as well as the policy_to_agent_id_mapping.
For example, for using a shared policy across all agents (say `shared_policy`), for example, you can just use the run configuration as
```python
"policy": {
"shared_policy": {
"to_train": True,
...
},
},
```
and also set all the agent ids to use this shared policy
```python
policy_tag_to_agent_id_map = {
"shared_policy": np.arange(envObj.env.num_agents),
}
```
**Importantly, make sure the `policy` keys and the `policy_tag_to_agent_id_map` keys are identical.**
## Step 4: Create the Trainer object.
```
trainer = Trainer(env_wrapper, run_config, policy_tag_to_agent_id_map)
```
The `Trainer` class also takes in a few optional arguments that will need to be correctly set, if required.
- `create_separate_placeholders_for_each_policy`: a flag indicating whether there exist separate observations, actions and rewards placeholders, for each policy, used in the step() function and during training. When there's only a single policy, this flag will be False. It can also be True when there are multiple policies, yet all the agents have the same obs and action space shapes, so we can share the same placeholder. Defaults to "False".
- `obs_dim_corresponding_to_num_agents`: indicative of which dimension in the observation corresponds to the number of agents, as designed in the step function. It may be "first" or "last". In other words, observations may be shaped (num_agents, feature_dims) or (feature_dims, num_agents). This is required in order for WarpDrive to process the observations correctly. This is only relevant when a single obs key corresponds to multiple agents. Defaults to "first".
- `num_devices`: number of GPU devices used for (distributed) training. Defaults to 1.
- `device_id`: the device ID. This is set in the context of multi-GPU training.
- `results_dir`: (optional) name of the directory to save results into. If this is not set, the current timestamp will be used instead.
- `verbose`: if False, training metrics are not printed to the screen. Defaults to True.
When the trainer object is created, the environment(s) are reset and all the relevant data arrays (e.g., "loc_x", "loc_y, "speed") are automatically pushed from the CPU to the GPU (just once). Additionally, the observation, reward, action and done flag data array sizes are determined and the array placeholders are also automatically pushed to the GPU. After training begins, these arrays are only updated in-place, and there's no data transferred back to the CPU.
# Visualizing the trainer policies
## Visualizing an episode roll-out before training
We have created a helper function (`generate_tag_env_rollout_animation`) in order to visualize an episode rollout. Internally, this function uses the WarpDrive module's `fetch_episode_states` to fetch the data arrays on the GPU for the duration of an entire episode. Specifically, we fetch the state arrays pertaining to agents' x and y locations on the plane and indicators on which agents are still active in the game, and will use these to visualize an episode roll-out. Note that this function may be invoked at any time during training, and it will use the state of the policy models at that time to sample actions and generate the visualization.
```
# Visualize the entire episode roll-out
anim = generate_tag_env_rollout_animation(trainer)
HTML(anim.to_html5_video())
```
In the visualization above, the large purple dots represent the taggers, while the smaller blue dots represent the runners. Before training, the runners and taggers move around randomly, and that results in a lot of the runners getting tagged, just by chance.
## Step 5: Perform training
Training is performed by calling trainer.train(). We run training for just $500$ episodes, as specified in the run configuration.
```
trainer.train()
```
As training happens, we log the speed performance numbers and the metrics for all the trained policies every `metrics_log_freq` iterations. The training results and the model checkpoints are also saved on a timely (as specified in the run configuration parameters `model_params_save_freq`) basis.
## Visualize an episode-rollout after training (for about 2M episodes)
We can also initialize the trainer model parameters using saved model checkpoints via the `load_model_checkpoint` API. With this, we will be able to fetch the episode states for a trained model, for example. We will now visualize an episode roll-out using trained tagger and runner policy model weights (trained for $2$M episodes or $1$B steps), that are located in [this](assets/tag_continuous_training/) folder.
```
trainer.load_model_checkpoint(
{
"tagger": f"{get_project_root()}/tutorials/assets/tag_continuous_training/tagger_1000010000.state_dict",
"runner": f"{get_project_root()}/tutorials/assets/tag_continuous_training/runner_1000010000.state_dict",
}
)
# Visualize the entire episode roll-out
anim = generate_tag_env_rollout_animation(trainer)
HTML(anim.to_html5_video())
```
After training, the runners learn to run away from the taggers, and the taggers learn to chase them; there are some instances where we see that taggers also team up to chase and tag the runners.
You have now seen how to train an entire multi-agent RL pipeline end-to-end. Please see the next [tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md) on scaling up training.
We also have a [trainer](https://github.com/salesforce/warp-drive/blob/master/warp_drive/training/lightning_trainer.py) compatible with [Pytorch Lightning](https://www.pytorchlightning.ai/) and have prepared a tutorial on training with WarpDrive and Pytorch Lightning [here](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb).
## Step 6: Gracefully shut down the trainer object
```
# Close the trainer to clear up the CUDA memory heap
trainer.graceful_close()
```
# Learn More and Explore our Tutorials!
For your reference, all our tutorials are here:
1. [WarpDrive basics(intro and pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
2. [WarpDrive basics(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
3. [WarpDrive sampler(pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
4. [WarpDrive sampler(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
5. [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
6. [Create custom environments (pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
7. [Create custom environments (numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
8. [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
9. [Scaling Up training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md)
10. [Training with WarpDrive + Pytorch Lightning](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb)
```
```
| /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/tutorials/tutorial-5-training_with_warp_drive.ipynb | 0.891043 | 0.932913 | tutorial-5-training_with_warp_drive.ipynb | pypi |
Copyright (c) 2021, salesforce.com, inc. \
All rights reserved. \
SPDX-License-Identifier: BSD-3-Clause \
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
**Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)!**
# ⚠️ PLEASE NOTE:
This notebook runs on a GPU runtime.\
If running on Colab, choose Runtime > Change runtime type from the menu, then select `GPU` in the 'Hardware accelerator' dropdown menu.
```
import torch
assert torch.cuda.device_count() > 0, "This notebook needs a GPU to run!"
```
# Welcome to WarpDrive!
This is our third (and an advanced) tutorial about WarpDrive, a framework for extremely parallelized multi-agent reinforcement learning (RL) on a single GPU. If you haven't yet, please also checkout our previous tutorials
- WarpDrive basics
- [Introduction and PyCUDA](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
- [Numba](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
- WarpDrive sampler
- [PyCUDA](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
- [Numba](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
In this tutorial, we describe **PyCUDAEnvironmentReset** and **PyCUDALogController**.
- PyCUDAEnvironmentReset works exclusively on the GPU to reset the environment in-place.
- PyCUDALogController works exclusively in the GPU device to log the episode history.
They both play important roles in the WarpDrive framework.
# Dependencies
You can install the warp_drive package using
- the pip package manager, OR
- by cloning the warp_drive package and installing the requirements.
On Colab, we will do the latter.
```
import sys
IN_COLAB = "google.colab" in sys.modules
if IN_COLAB:
! git clone https://github.com/salesforce/warp-drive.git
% cd warp-drive
! pip install -e .
else:
! pip install -U rl_warp_drive
import numpy as np
from warp_drive.managers.pycuda_managers.pycuda_data_manager import PyCUDADataManager
from warp_drive.managers.pycuda_managers.pycuda_function_manager import (
PyCUDAFunctionManager,
PyCUDALogController,
PyCUDAEnvironmentReset,
)
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
from warp_drive.utils.common import get_project_root
_MAIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_includes"
_CUBIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_bin"
_ACTIONS = Constants.ACTIONS
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR
import logging
logging.getLogger().setLevel(logging.INFO)
```
# CUDAEnvironmentReset and CUDALogController
Assuming you have developed a CUDA environment `step` function, here we show how WarpDrive can help to facilitate the environment rollout by resetting and logging the environment on the GPU. If you do not have "test_build.cubin" built, you can refer to the previous tutorial [WarpDrive sampler](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2-warp_drive_sampler.ipynb) about how to automatically build it.
```
cuda_data_manager = PyCUDADataManager(num_agents=5, num_envs=2, episode_length=2)
cuda_function_manager = PyCUDAFunctionManager(
num_agents=cuda_data_manager.meta_info("n_agents"),
num_envs=cuda_data_manager.meta_info("n_envs"),
)
main_example_file = f"{_MAIN_FILEPATH}/test_build.cu"
bin_example_file = f"{_CUBIN_FILEPATH}/test_build.fatbin"
cuda_function_manager._compile(main_file=main_example_file,
cubin_file=bin_example_file)
cuda_function_manager.load_cuda_from_binary_file(bin_example_file)
cuda_env_resetter = PyCUDAEnvironmentReset(function_manager=cuda_function_manager)
cuda_env_logger = PyCUDALogController(function_manager=cuda_function_manager)
```
## Step Function
We have an example step function already checked in and compiled inside `test_build.cubin`.
The source code of this dummy step function can be found [here](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/dummy_env/test_step.cu). For each step, array `x` will be divided by `multiplier` while array `y` will be multiplied by the same `multiplier`:
```
x[index] = x[index] / multiplier;
y[index] = y[index] * multiplier;
```
Now we just need to initialize it with CUDAFunctionManager and wrap up it with a Python/CUDA step callable. In `dummy_env` this function is called `cuda_dummy_step()`.
Notice that we provide the **EnvWrapper** to wrap up most of processes below automatically. However, the unique Python/CUDA step callable you developed needs to be defined inside your environment so **EnvWrapper** can find and wrap it up.
For concrete examples on how to define more complex `step` functions, you can refer to [example1](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_gridworld/tag_gridworld_step.cu) and [example2](https://www.github.com/salesforce/warp-drive/blob/master/example_envs/tag_continous/tag_continuous_step.cu).
```
cuda_function_manager.initialize_functions(["testkernel"])
def cuda_dummy_step(
function_manager: PyCUDAFunctionManager,
data_manager: PyCUDADataManager,
env_resetter: PyCUDAEnvironmentReset,
target: int,
step: int,
):
env_resetter.reset_when_done(data_manager)
step = np.int32(step)
target = np.int32(target)
test_step = function_manager.get_function("testkernel")
test_step(
data_manager.device_data("X"),
data_manager.device_data("Y"),
data_manager.device_data("_done_"),
data_manager.device_data(f"{_ACTIONS}"),
data_manager.device_data("multiplier"),
target,
step,
data_manager.meta_info("episode_length"),
block=function_manager.block,
grid=function_manager.grid,
)
```
## Reset and Log Function
In the `step` function above, besides the step function managed by CUDAFunctionManager, you can see the function called `CUDAEnvironmentReset.reset_when_done()`. This function will reset the corresponding env to its initial state when the `done` flag becomes true on the GPU. This reset only resets the env that is done.
To make it work properly, you need to specify which data (usually the feature arrays and observations) can be reset.
This is where the flag **save_copy_and_apply_at_reset** comes into play. If the data has `save_copy_and_apply_at_reset` set to True, a dedicated copy will be maintained in the device for resetting.
On the other hand, **log_data_across_episode** will create a buffer on the GPU for logs. This lets you record a complete episode.
These two functions can be independently used!
```
data = DataFeed()
data.add_data(
name="X",
data=[[0.1, 0.2, 0.3, 0.4, 0.5], [0.6, 0.7, 0.8, 0.9, 1.0]],
save_copy_and_apply_at_reset=True,
log_data_across_episode=True,
)
data.add_data(
name="Y",
data=np.array([[6, 7, 8, 9, 10], [1, 2, 3, 4, 5]]),
save_copy_and_apply_at_reset=True,
log_data_across_episode=True,
)
data.add_data(name="multiplier", data=2.0)
tensor = DataFeed()
tensor.add_data(
name=f"{_ACTIONS}",
data=[
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
],
)
cuda_data_manager.push_data_to_device(data)
cuda_data_manager.push_data_to_device(tensor, torch_accessible=True)
assert cuda_data_manager.is_data_on_device("X")
assert cuda_data_manager.is_data_on_device("Y")
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}")
```
Now, we run an complete set of parallel episodes and inspect the log for the first environment.
## Test Run
```
# t = 0 is reserved for the initial state.
cuda_env_logger.reset_log(data_manager=cuda_data_manager, env_id=0)
for t in range(1, cuda_data_manager.meta_info("episode_length") + 1):
cuda_dummy_step(
function_manager=cuda_function_manager,
data_manager=cuda_data_manager,
env_resetter=cuda_env_resetter,
target=100,
step=t,
)
cuda_env_logger.update_log(data_manager=cuda_data_manager, step=t)
dense_log = cuda_env_logger.fetch_log(data_manager=cuda_data_manager, names=["X", "Y"])
# Test after two steps that the log buffers for X and Y log are updating.
X_update = dense_log["X_for_log"]
Y_update = dense_log["Y_for_log"]
assert abs(X_update[1].mean() - 0.15) < 1e-5
assert abs(X_update[2].mean() - 0.075) < 1e-5
assert Y_update[1].mean() == 16
assert Y_update[2].mean() == 32
# Right now, the reset functions have not been activated.
# The done flags should be all True now.
done = cuda_data_manager.pull_data_from_device("_done_")
print(f"The done array = {done}")
```
For this demo, we can explicitly reset the environment to see how it works. The `dummy_step` function will do this in the next step by itself as well. After resetting, you can see that all the done flags go back to False and the `X` and `Y` arrays get reset successfully as well.
```
cuda_env_resetter.reset_when_done(data_manager=cuda_data_manager)
done = cuda_data_manager.pull_data_from_device("_done_")
assert done[0] == 0
assert done[1] == 0
X_after_reset = cuda_data_manager.pull_data_from_device("X")
Y_after_reset = cuda_data_manager.pull_data_from_device("Y")
# the 0th dim is env
assert abs(X_after_reset[0].mean() - 0.3) < 1e-5
assert abs(X_after_reset[1].mean() - 0.8) < 1e-5
assert Y_after_reset[0].mean() == 8
assert Y_after_reset[1].mean() == 3
```
# Learn More and Explore our Tutorials!
Now that you have familiarized yourself with WarpDrive, we suggest you take a look at our tutorials on [creating custom environments](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4-create_custom_environments.md) and on how to use WarpDrive to perform end-to-end multi-agent reinforcement learning [training](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)!
For your reference, all our tutorials are here:
1. [WarpDrive basics(intro and pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
2. [WarpDrive basics(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
3. [WarpDrive sampler(pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
4. [WarpDrive sampler(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
5. [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
6. [Create custom environments (pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
7. [Create custom environments (numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
8. [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
9. [Scaling Up training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md)
10. [Training with WarpDrive + Pytorch Lightning](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb)
```
```
| /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb | 0.488527 | 0.838746 | tutorial-3-warp_drive_reset_and_log.ipynb | pypi |
Copyright (c) 2021, salesforce.com, inc.\
All rights reserved.\
SPDX-License-Identifier: BSD-3-Clause\
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
**Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)!**
# ⚠️ PLEASE NOTE:
This notebook runs on a GPU runtime.\
If running on Colab, choose Runtime > Change runtime type from the menu, then select `GPU` in the 'Hardware accelerator' dropdown menu.
```
import torch
assert torch.cuda.device_count() > 0, "This notebook needs a GPU to run!"
```
# Dependencies
You can install the warp_drive package using
- the pip package manager, OR
- by cloning the warp_drive package and installing the requirements.
We will install the latest version of WarpDrive using the pip package manager.
```
! pip install -U rl_warp_drive
import numpy as np
from timeit import Timer
from warp_drive.managers.numba_managers.numba_data_manager import NumbaDataManager
from warp_drive.managers.numba_managers.numba_function_manager import NumbaFunctionManager
from warp_drive.utils.data_feed import DataFeed
from warp_drive.utils.common import get_project_root
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR
import logging
logging.getLogger().setLevel(logging.INFO)
```
# Numba Example
In this tutorial, we will focus on using the Numba backend to run the same content in tutorial 1.a.
In the following, we will demonstrate how to push and pull data between the host and the device, and how to write simple CUDA functions to manipulate the date. Let's begin by creating a CUDADataManager object.
We specify a few multi-agent RL parameters in the `DataManager` creator.
We'll create a multi-agent RL environment with 3 agents, an episode length of 5, and 2 environment replicas.
```
num_agents = 3
num_envs = 2
episode_length = 5
cuda_data_manager = NumbaDataManager(num_agents, num_envs, episode_length=episode_length)
```
Now, let's create some (random) data that we would like to push to the device. In the context of RL, this can pertain to the starting states created by `env reset()`.
The starting states are arrays that need to hold data such as observations, actions and rewards during the course of the episode. They could also contain environment configuration settings and hyperparameters.
Each environment and agent will have its own data, so we create a `(num_envs, num_agents)`-shaped array that will be pushed to the GPU.
```
random_data = np.random.rand(num_envs, num_agents)
random_data
```
# Push and pull data from host (CPU) to device (GPU)
In order to push data to the device, we have created a **DataFeed** helper object. For all data pushed from the host to device, we will need to provide a name identifier, the actual data, and two flags (both default to False):
- `save_copy_and_apply_at_reset` - if `True`, we make a copy of the starting data so that we can set the data array to that value at every environment reset, and
- `log_data_across_episode` - if `True`, we add a time dimension to the data, of size `episode_length`, set all $t>0$ index values to zeros, and store the data array at each time step separately. This is primarily used for logging the data for an episode rollout.
```
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
save_copy_and_apply_at_reset=False,
log_data_across_episode=False,
)
data_feed
```
The CUDA data manager provides the **push_data_to_device()** and **pull_data_from_device()** apis to handle data transfer between the host and the device.
```
cuda_data_manager.push_data_to_device(data_feed)
```
Notice that the data manager casted the data from float64 to float32. CUDA always uses 32-bit floating or integer representations of numbers.
```
data_fetched_from_device = cuda_data_manager.pull_data_from_device("random_data")
```
The data fetched from the device matches the data pushed (the small differences are due to type-casting).
```
data_fetched_from_device
```
Another integral part of RL is training. We also need to hold the observations, actions and rewards arrays. So fo training, we will wrap the data into a Pytorch Tensor.
## Making Training Data Accessible To PyTorch
Note that pushing and pulling data several times between the host and the device causes a lot of communication overhead. So, it's advisable that we push the data from the host to device only once, and then manipulate all the data on the GPU in-place. This is particularly important when data needs to be accessed frequently. A common example is the batch of observations and rewards gathered for each training iteration.
Fortunately, our framework lets Pytorch access the data we pushed onto the GPU via pointers with minimal overhead. To make data accessible by Pytorch, we set the `torch_accessible` flag to True.
```
tensor_feed = DataFeed()
tensor_feed.add_data(name="random_tensor", data=random_data)
cuda_data_manager.push_data_to_device(tensor_feed, torch_accessible=True)
tensor_on_device = cuda_data_manager.data_on_device_via_torch("random_tensor")
```
## Time comparison for data pull (`torch_accessible` True versus False)
```
large_array = np.random.rand(1000, 1000)
```
### `torch_accessible=False`
```
data_feed = DataFeed()
data_feed.add_data(
name="large_array",
data=large_array,
)
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=False)
Timer(lambda: cuda_data_manager.pull_data_from_device("large_array")).timeit(
number=1000
)
```
### `torch_accessible=True`
```
data_feed = DataFeed()
data_feed.add_data(
name="large_array_torch",
data=large_array,
)
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
Timer(lambda: cuda_data_manager.data_on_device_via_torch("random_tensor")).timeit(1000)
```
You can see the time for accessing torch tensors on the GPU is negligible compared to data arrays!
Currently, the `DataManager` supports primitive data types, such as ints, floats, lists, and arrays. If you would like to push more sophisticated data structures or types to the GPU, such as dictionaries, you may do so by pushing / pulling each key-value pair as a separate array.
# Code Execution Inside CUDA
Once we push all the relevant data to the GPU, we will need to write functions to manipulate the data. To this end, we will need to write code in Numba, but invoke it from the host node. The `FunctionManager` is built to facilitate function initialization on the host and execution on the device. As we mentioned before, all the arrays on GPU will be modified on the GPU, and in-place. Let's begin by creating a CUDAFunctionManager object.
```
cuda_function_manager = NumbaFunctionManager(
num_agents=cuda_data_manager.meta_info("n_agents"),
num_envs=cuda_data_manager.meta_info("n_envs"),
)
```
## Array manipulation inside Numba
In the previous tutorial, we have discussed array indexing and our utility functions to facilitate the indexing in CUDA. One great benefit for Numba is its intrisinc syntax for multi-dimensional array indexing. Let's rewrite the same example in Numba this time. To recap,
We write a simple function to add one to each element of the pushed data. We will perform this operation in parallel on the (num_envs) number of GPU blocks and the (num_agents) number of threads within.
In general, the operation is (almost) parallel. Going into a bit more detail - CUDA employs a Single Instruction Multiple Thread (SIMT) architecture to manage and execute threads in groups of 32 called warps. So, as long as the number of agents is a multiple of 32, all the threads ar utilized, otherwise few threads remain idle. For example, if we use $1000$ agents, $24$ threads will remain idle, for a utilization rate of $97.65\%$.
```
source_code = """
import numba.cuda as numba_driver
@numba_driver.jit
def cuda_increment(data, num_agents):
env_id = numba_driver.blockIdx.x
agent_id = numba_driver.threadIdx.x
if agent_id < num_agents:
increment = env_id + agent_id
data[env_id, agent_id] += increment
"""
```
Next, we use the `FunctionManager` API method **import_numba_from_source_code()** to build and load the Numba code.
*Note: WarpDrive does not support the direct string-type source code loading. In general, it's standard practice to have several standalone source codes written out in .py file, here, the source_code is saved in example_envs/dummy_env*
```
source_code_path = f"example_envs.dummy_env.tutorial_basics"
cuda_function_manager.import_numba_from_source_code(
source_code_path, default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
```
We will use the `FunctionManager`'s API method **get_function()** to load the CUDA kernel function and get an handle to invoke it from the host device.
```
increment_function = cuda_function_manager.get_function("cuda_increment")
```
Now, when invoking the `increment` function, along with the `data` and `num_agents` arguments, we also need to provide the block and grid arguments. These are also attributes of the CUDA `FunctionManager`: simply use\
- `block=cuda_function_manager.block`, and
- `grid=cuda_function_manager.grid`
Also, since we need to use the `num_agents` parameter, we also need to push it to the device. Instead of using a `DataFeed`, we may also push as follows:
```
cuda_data_manager.push_data_to_device(
{
"num_agents": {
"data": num_agents,
"attributes": {
"save_copy_and_apply_at_reset": False,
"log_data_across_episode": False,
},
}
}
)
block=cuda_function_manager.block
grid=cuda_function_manager.grid
increment_function[grid, block](
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
)
```
Below is the original (random) data that we pushed to the GPU:
```
random_data
```
and here's the incremented data:
```
cuda_data_manager.pull_data_from_device("random_data")
```
As expected, this method incremented each entry at index `(env_id, agent_id)` of the original data by `(env_id + agent_id)`! The differences are below.
```
cuda_data_manager.pull_data_from_device("random_data") - random_data
```
And we can invoke the increment function again to increment one more time (also in-place on the GPU), and the differences double.
```
block=cuda_function_manager.block
grid=cuda_function_manager.grid
increment_function[grid, block](
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
)
cuda_data_manager.pull_data_from_device("random_data") - random_data
```
# Validating CUDA parallelism
We put all the pieces introduced so far together, and record the times for parallelized operations with different `num_envs` and `num_agents` settings.
```
def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code_path=None,
episode_length=100,
):
assert source_code_path is not None
# Initialize the CUDA data manager
cuda_data_manager = NumbaDataManager(
num_agents=num_agents, num_envs=num_envs, episode_length=episode_length
)
# Initialize the CUDA function manager
cuda_function_manager = NumbaFunctionManager(
num_agents=cuda_data_manager.meta_info("n_agents"),
num_envs=cuda_data_manager.meta_info("n_envs"),
)
# Load source code and initialize function
cuda_function_manager.import_numba_from_source_code(
source_code_path, default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager.get_function("cuda_increment")
def push_random_data(num_agents, num_envs):
# Create random data
random_data = np.random.rand(num_envs, num_agents)
# Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(name="num_agents", data=num_agents)
cuda_data_manager.push_data_to_device(data_feed)
def increment_data():
block=cuda_function_manager.block
grid=cuda_function_manager.grid
increment_function[grid, block](
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
)
# One-time data push
data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(
number=1
)
# Increment the arrays 'num_runs' times
program_run_time = Timer(lambda: increment_data()).timeit(number=num_runs)
return {"data push times": data_push_time, "code run time": program_run_time}
```
## Record the times for a single data push and 10000 increment kernel calls.
```
%%capture
num_runs = 10000
times = {}
for scenario in [
(1, 1),
(1, 10),
(1, 100),
(10, 10),
(1, 1000),
(100, 100),
(1000, 1000),
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}": push_random_data_and_increment_timer(
num_runs, num_envs, num_agents, source_code_path
)
}
)
print(f"Times for {num_runs} function calls")
print("*" * 40)
for key, value in times.items():
print(
f"{key:30}: data push time: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s"
)
```
As we increase the number of environments and agents, the data size becomes larges, so pushing data becomes slower, but since all the threads operate in parallel, the average time taken in the increment function remains about the same!
Also notice that Numba is much slower (~1/10X) than PyCUDA in this simple example. The main reason is that JIT will repeat its runtime compilation everytime when it is being called. Since the execution of the kernel function is pretty lightweight in this example, the compilation time actually dominates the time. This problem will be improved much in the real problem when the kernel function itself takes much more time and JIT will also help to optimize the kernel execution at the runtime.
And that's it! By using building blocks such as the increment function, we can create arbitrarily complex functions in CUDA C. For some comparative examples, please see the example environments that have both Python implementations in `examples/envs` and corresponding CUDA C implementations in `src/envs`.
Below are some useful starting resources for CUDA C programming:
- [CUDA tutorial](https://cuda-tutorial.readthedocs.io/en/latest/)
- [Learn C](https://learnxinyminutes.com/docs/c/)
- [CUDA Quick Reference](http://www.icl.utk.edu/~mgates3/docs/cuda.html)
<!-- - [Thrust](https://developer.nvidia.com/thrust). Note: thrust is a flexible, high-level interface for GPU programming that greatly enhances developer productivity. -->
# Learn More and Explore our Tutorials!
This is the first tutorial on WarpDrive. Next, we suggest you check out our advanced tutorials on [WarpDrive's sampler](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2-warp_drive_sampler.ipynb) and [WarpDrive's reset and log controller](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb).
For your reference, all our tutorials are here:
1. [WarpDrive basics(intro and pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
2. [WarpDrive basics(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
3. [WarpDrive sampler(pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
4. [WarpDrive sampler(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
5. [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
6. [Create custom environments (pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
7. [Create custom environments (numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
8. [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
9. [Scaling Up training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md)
10. [Training with WarpDrive + Pytorch Lightning](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb)
```
```
| /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/tutorials/tutorial-1.b-warp_drive_basics.ipynb | 0.768646 | 0.919715 | tutorial-1.b-warp_drive_basics.ipynb | pypi |
Copyright (c) 2021, salesforce.com, inc. \
All rights reserved. \
SPDX-License-Identifier: BSD-3-Clause \
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
**Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)!**
# ⚠️ PLEASE NOTE:
This notebook runs on a GPU runtime.\
If running on Colab, choose Runtime > Change runtime type from the menu, then select `GPU` in the 'Hardware accelerator' dropdown menu.
```
import torch
assert torch.cuda.device_count() > 0, "This notebook needs a GPU to run!"
```
# Welcome to WarpDrive!
This is the second tutorial on WarpDrive, a PyCUDA-based framework for extremely parallelized multi-agent reinforcement learning (RL) on a single graphics processing unit (GPU). At this stage, we assume you have read our first tutorial for [introduction and pycuda](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb), and [numba](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb) on WarpDrive basics.
In this tutorial, we describe **CUDASampler**, a lightweight and fast action sampler based on the policy distribution across several RL agents and environment replicas. `CUDASampler` utilizes the GPU to parallelize operations to efficiently sample a large number of actions in parallel.
Notably:
1. It reads the distribution on the GPU through Pytorch and samples actions exclusively at the GPU. There is no data transfer.
2. It maximizes parallelism down to the individual thread level, i.e., each agent at each environment has its own random seed and independent random sampling process.
3. It runs much faster than most GPU samplers. For example, it is significantly faster than Pytorch.
# Dependencies
You can install the warp_drive package using
- the pip package manager, OR
- by cloning the warp_drive package and installing the requirements.
We will install the latest version of WarpDrive using the pip package manager.
```
pip install -U rl_warp_drive
import numpy as np
from warp_drive.managers.numba_managers.numba_function_manager import NumbaFunctionManager, NumbaSampler
from warp_drive.managers.numba_managers.numba_data_manager import NumbaDataManager
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
from warp_drive.utils.common import get_project_root
_ACTIONS = Constants.ACTIONS
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR
import logging
logging.getLogger().setLevel(logging.INFO)
```
# Initialize NumbaSampler
We first initialize the **NumbaDataManager** and **NumbaFunctionManager**. To illustrate the sampler, we first load a "test_build_sampler.py". Note that these low-level managers and modules will be hidden and called automatically by WarpDrive in any end-to-end training and simulation. In this and the next tutorials, we want to show how a few fundamental modules work and their performance, that is why some low-level APIs such as "import_numba_from_source_code()" are called.
```
cuda_data_manager = NumbaDataManager(num_agents=5, episode_length=10, num_envs=2)
cuda_function_manager = NumbaFunctionManager(
num_agents=cuda_data_manager.meta_info("n_agents"),
num_envs=cuda_data_manager.meta_info("n_envs"),
)
_NUMBA_FILEPATH = f"warp_drive.numba_includes"
```
This `_NUMBA_FILEPATH` includes several Numba core services provided by WarpDrive. Since Numba uses JIT compiler, we do not need to compile an executable before loading it as we did in the PyCUDA mode, however, in most cases, the source code still needs some environment configuration file to populate a few global environment constants and settings to the source code, for example, number of environment replicas. WarpDrive provides such template configure file and automatically update placeholders according to the current environment. (import_numba_env_config() is a low-level API, user will not need call those internal APIs directly for any WarpDrive end-to-end simulation and training.)
```
cuda_function_manager.import_numba_env_config(template_header_file="template_env_config.txt")
```
Now we can use **NumbaFunctionManager** to load the source code. In this demo, we use `test_build.py` which collects those core services and includes the backend source code for `NumbaSampleController`.
```
cuda_function_manager.import_numba_from_source_code(f"{_NUMBA_FILEPATH}.test_build")
```
Finally, we initialize **NumbaSampler** and assign the random seed. `NumbaSampler` keeps independent randomness across all threads and blocks. Notice that `NumbaSampler` requires `NumbaFunctionManager` because `NumbaFunctionManager` manages all the CUDA function pointers including to the sampler. Also notice this test binary uses 2 environment replicas and 5 agents.
```
cuda_sampler = NumbaSampler(function_manager=cuda_function_manager)
cuda_sampler.init_random(seed=None)
```
# Sampling
## Actions Placeholder
Now, we feed the **actions_a** placeholder into the GPU. It has the shape `(n_envs=2, n_agents=5)` as expected. Also we make it accessible by Pytorch, because during RL training, actions will be fed into the Pytorch trainer directly.
```
data_feed = DataFeed()
data_feed.add_data(name=f"{_ACTIONS}_a", data=[[[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]]])
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}_a")
```
## Action Sampled Distribution
We define an action **distribution** here. During training, this distribution would be provided by the policy model implemented in Pytorch. The distribution has the shape `(n_envs, n_agents, **n_actions**)`. The last dimension `n_actions` defines the size of the action space for a particular *discrete* action. For example, if we have up, down, left, right and no-ops, `n_actions=5`.
**n_actions** needs to be registered by the sampler so the sampler is able to pre-allocate a global memory space in GPU to speed up action sampling. This can be done by calling `sampler.register_actions()`.
In this tutorial, we check if our sampled action distribution follows the given distribution. For example, the distribution [0.333, 0.333, 0.333] below suggests the 1st agent has 3 possible actions and each of them have equal probability.
```
cuda_sampler.register_actions(
cuda_data_manager, action_name=f"{_ACTIONS}_a", num_actions=3
)
distribution = np.array(
[
[
[0.333, 0.333, 0.333],
[0.2, 0.5, 0.3],
[0.95, 0.02, 0.03],
[0.02, 0.95, 0.03],
[0.02, 0.03, 0.95],
],
[
[0.1, 0.7, 0.2],
[0.7, 0.2, 0.1],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
# Run 10000 times to collect statistics
actions_batch = torch.from_numpy(np.empty((10000, 2, 5), dtype=np.int32)).cuda()
for i in range(10000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_a")
actions_batch[i] = cuda_data_manager.data_on_device_via_torch(f"{_ACTIONS}_a")[:, :, 0]
actions_batch_host = actions_batch.cpu().numpy()
actions_env_0 = actions_batch_host[:, 0]
actions_env_1 = actions_batch_host[:, 1]
print(
"Sampled actions distribution versus the given distribution (in bracket) for env 0: \n"
)
for agent_id in range(5):
print(
f"Sampled action distribution for agent_id: {agent_id}:\n"
f"{(actions_env_0[:, agent_id] == 0).sum() / 10000.0}({distribution[0, agent_id, 0]}), \n"
f"{(actions_env_0[:, agent_id] == 1).sum() / 10000.0}({distribution[0, agent_id, 1]}), \n"
f"{(actions_env_0[:, agent_id] == 2).sum() / 10000.0}({distribution[0, agent_id, 2]}) \n"
)
print(
"Sampled actions distribution versus the given distribution (in bracket) for env 1: "
)
for agent_id in range(5):
print(
f"Sampled action distribution for agent_id: {agent_id}:\n"
f"{(actions_env_1[:, agent_id] == 0).sum() / 10000.0}({distribution[1, agent_id, 0]}), \n"
f"{(actions_env_1[:, agent_id] == 1).sum() / 10000.0}({distribution[1, agent_id, 1]}), \n"
f"{(actions_env_1[:, agent_id] == 2).sum() / 10000.0}({distribution[1, agent_id, 2]}) \n"
)
```
## Action Randomness Across Threads
Another important validation is whether the sampler provides independent randomness across different agents and environment replicas. Given the same policy model for all the agents and environment replicas, we can check if the sampled actions are independently distributed.
Here, we assign all agents across all envs the same distribution [0.25, 0.25, 0.25, 0.25]. It is equivalent to an uniform action distribution among all actions [0,1,2,3], across 5 agents and 2 envs. Then we check the standard deviation across the agents.
```
data_feed = DataFeed()
data_feed.add_data(name=f"{_ACTIONS}_b", data=[[[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]]])
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}_b")
cuda_sampler.register_actions(
cuda_data_manager, action_name=f"{_ACTIONS}_b", num_actions=4
)
distribution = np.array(
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
],
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
# Run 10000 times to collect statistics.
actions_batch = torch.from_numpy(np.empty((10000, 2, 5), dtype=np.int32)).cuda()
for i in range(10000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_b")
actions_batch[i] = cuda_data_manager.data_on_device_via_torch(f"{_ACTIONS}_b")[:, :, 0]
actions_batch_host = actions_batch.cpu().numpy()
actions_batch_host
actions_batch_host.std(axis=2).mean(axis=0)
```
To check the independence of randomness among all threads, we can compare it with a Numpy implementation. Here we use `numpy.choice(4, 5)` to repeat the same process for an uniform action distribution among all actions [0,1,2,3], 5 agents and 2 envs. We should see that the variation of Numpy output is very close to our sampler.
```
actions_batch_numpy = np.empty((10000, 2, 5), dtype=np.int32)
for i in range(10000):
actions_batch_numpy[i, 0, :] = np.random.choice(4, 5)
actions_batch_numpy[i, 1, :] = np.random.choice(4, 5)
actions_batch_numpy.std(axis=2).mean(axis=0)
```
## Running Speed
The total time for sampling includes receiving a new distribution and using this to sample. For Numba, it also includes the JIT time. In fact, JIT becomes the main time consumer in this simple example.
Comparing our sampler with [torch.Categorical sampler](https://pytorch.org/docs/stable/distributions.html),
we are almost the same.
*Note: our sampler runs in parallel across threads, so this speed-up is almost constant when scaling up the number of agents or environment replicas, i.e., increasing the number of used threads.*
```
from torch.distributions import Categorical
distribution = np.array(
[
[
[0.333, 0.333, 0.333],
[0.2, 0.5, 0.3],
[0.95, 0.02, 0.03],
[0.02, 0.95, 0.03],
[0.02, 0.03, 0.95],
],
[
[0.1, 0.7, 0.2],
[0.7, 0.2, 0.1],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(1000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_a")
end_event.record()
torch.cuda.synchronize()
print(f"time elapsed: {start_event.elapsed_time(end_event)} ms")
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(1000):
Categorical(distribution).sample()
end_event.record()
torch.cuda.synchronize()
print(f"time elapsed: {start_event.elapsed_time(end_event)} ms")
```
# Learn More and Explore our Tutorials!
Next, we suggest you check out our advanced [tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb) on WarpDrive's reset and log controller!
For your reference, all our tutorials are here:
1. [WarpDrive basics(intro and pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
2. [WarpDrive basics(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
3. [WarpDrive sampler(pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
4. [WarpDrive sampler(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
5. [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
6. [Create custom environments (pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
7. [Create custom environments (numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
8. [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
9. [Scaling Up training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md)
10. [Training with WarpDrive + Pytorch Lightning](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb)
```
```
| /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/tutorials/tutorial-2.b-warp_drive_sampler.ipynb | 0.782579 | 0.935876 | tutorial-2.b-warp_drive_sampler.ipynb | pypi |
Copyright (c) 2021, salesforce.com, inc. \
All rights reserved. \
SPDX-License-Identifier: BSD-3-Clause \
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
**Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)!**
# ⚠️ PLEASE NOTE:
This notebook runs on a GPU runtime.\
If running on Colab, choose Runtime > Change runtime type from the menu, then select `GPU` in the 'Hardware accelerator' dropdown menu.
```
import torch
assert torch.cuda.device_count() > 0, "This notebook needs a GPU to run!"
```
# Welcome to WarpDrive!
This is the second tutorial on WarpDrive, a PyCUDA-based framework for extremely parallelized multi-agent reinforcement learning (RL) on a single graphics processing unit (GPU). At this stage, we assume you have read our [first tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1-warp_drive_basics.ipynb) on WarpDrive basics.
In this tutorial, we describe **CUDASampler**, a lightweight and fast action sampler based on the policy distribution across several RL agents and environment replicas. `CUDASampler` utilizes the GPU to parallelize operations to efficiently sample a large number of actions in parallel.
Notably:
1. It reads the distribution on the GPU through Pytorch and samples actions exclusively at the GPU. There is no data transfer.
2. It maximizes parallelism down to the individual thread level, i.e., each agent at each environment has its own random seed and independent random sampling process.
3. It runs much faster than most GPU samplers. For example, it is significantly faster than Pytorch.
# Dependencies
You can install the warp_drive package using
- the pip package manager, OR
- by cloning the warp_drive package and installing the requirements.
We will install the latest version of WarpDrive using the pip package manager.
```
pip install -U rl_warp_drive
import numpy as np
from warp_drive.managers.pycuda_managers.pycuda_function_manager import PyCUDAFunctionManager, PyCUDASampler
from warp_drive.managers.pycuda_managers.pycuda_data_manager import PyCUDADataManager
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
from warp_drive.utils.common import get_project_root
_MAIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_includes"
_CUBIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_bin"
_ACTIONS = Constants.ACTIONS
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR
import logging
logging.getLogger().setLevel(logging.INFO)
```
# Initialize PyCUDASampler
We first initialize the **PyCUDADataManager** and **PyCUDAFunctionManager**. To illustrate the sampler, we first load a pre-compiled binary file called "test_build.cubin". Note that these low-level managers and modules will be hidden and called automatically by WarpDrive in any end-to-end training and simulation. In this and the next tutorials, we want to show how a few fundamental modules work and their performance, that is why some low-level APIs are called.
```
cuda_data_manager = PyCUDADataManager(num_agents=5, episode_length=10, num_envs=2)
cuda_function_manager = PyCUDAFunctionManager(
num_agents=cuda_data_manager.meta_info("n_agents"),
num_envs=cuda_data_manager.meta_info("n_envs"),
)
main_example_file = f"{_MAIN_FILEPATH}/test_build.cu"
bin_example_file = f"{_CUBIN_FILEPATH}/test_build.fatbin"
```
This binary is compiled with inclusion of auxiliary files in `warp_drive/cuda_includes/core` which includes several CUDA core services provided by WarpDrive. These include the backend source code for `CUDASampleController`.
To make "test_build.fatbin" available, we compiled this test cubin by calling `_compile()` from `CUDAFunctionManager`.
For this notebook demonstration, in the bin folder, we have already provided a pre-compiled binary but we suggest that you still execute the cell below to re-compile it to avoid possilble binary incompatible issues across different platforms. (`_compile()` is a low-level API, user will not need call those internal APIs directly for any WarpDrive end-to-end simulation and training.)
```
cuda_function_manager._compile(main_file=main_example_file,
cubin_file=bin_example_file)
```
Finally, we initialize **PyCUDASampler** and assign the random seed. `PyCUDASampler` keeps independent randomness across all threads and blocks. Notice that `PyCUDASampler` requires `PyCUDAFunctionManager` because `PyCUDAFunctionManager` manages all the CUDA function pointers including to the sampler. Also notice this test binary uses 2 environment replicas and 5 agents.
```
cuda_function_manager.load_cuda_from_binary_file(
bin_example_file, default_functions_included=True
)
cuda_sampler = PyCUDASampler(function_manager=cuda_function_manager)
cuda_sampler.init_random(seed=None)
```
# Sampling
## Actions Placeholder
Now, we feed the **actions_a** placeholder into the GPU. It has the shape `(n_envs=2, n_agents=5)` as expected. Also we make it accessible by Pytorch, because during RL training, actions will be fed into the Pytorch trainer directly.
```
data_feed = DataFeed()
data_feed.add_data(name=f"{_ACTIONS}_a", data=[[[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]]])
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}_a")
```
## Action Sampled Distribution
We define an action **distribution** here. During training, this distribution would be provided by the policy model implemented in Pytorch. The distribution has the shape `(n_envs, n_agents, **n_actions**)`. The last dimension `n_actions` defines the size of the action space for a particular *discrete* action. For example, if we have up, down, left, right and no-ops, `n_actions=5`.
**n_actions** needs to be registered by the sampler so the sampler is able to pre-allocate a global memory space in GPU to speed up action sampling. This can be done by calling `sampler.register_actions()`.
In this tutorial, we check if our sampled action distribution follows the given distribution. For example, the distribution [0.333, 0.333, 0.333] below suggests the 1st agent has 3 possible actions and each of them have equal probability.
```
cuda_sampler.register_actions(
cuda_data_manager, action_name=f"{_ACTIONS}_a", num_actions=3
)
distribution = np.array(
[
[
[0.333, 0.333, 0.333],
[0.2, 0.5, 0.3],
[0.95, 0.02, 0.03],
[0.02, 0.95, 0.03],
[0.02, 0.03, 0.95],
],
[
[0.1, 0.7, 0.2],
[0.7, 0.2, 0.1],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
# Run 10000 times to collect statistics
actions_batch = torch.from_numpy(np.empty((10000, 2, 5), dtype=np.int32)).cuda()
for i in range(10000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_a")
actions_batch[i] = cuda_data_manager.data_on_device_via_torch(f"{_ACTIONS}_a")[:, :, 0]
actions_batch_host = actions_batch.cpu().numpy()
actions_env_0 = actions_batch_host[:, 0]
actions_env_1 = actions_batch_host[:, 1]
print(
"Sampled actions distribution versus the given distribution (in bracket) for env 0: \n"
)
for agent_id in range(5):
print(
f"Sampled action distribution for agent_id: {agent_id}:\n"
f"{(actions_env_0[:, agent_id] == 0).sum() / 10000.0}({distribution[0, agent_id, 0]}), \n"
f"{(actions_env_0[:, agent_id] == 1).sum() / 10000.0}({distribution[0, agent_id, 1]}), \n"
f"{(actions_env_0[:, agent_id] == 2).sum() / 10000.0}({distribution[0, agent_id, 2]}) \n"
)
print(
"Sampled actions distribution versus the given distribution (in bracket) for env 1: "
)
for agent_id in range(5):
print(
f"Sampled action distribution for agent_id: {agent_id}:\n"
f"{(actions_env_1[:, agent_id] == 0).sum() / 10000.0}({distribution[1, agent_id, 0]}), \n"
f"{(actions_env_1[:, agent_id] == 1).sum() / 10000.0}({distribution[1, agent_id, 1]}), \n"
f"{(actions_env_1[:, agent_id] == 2).sum() / 10000.0}({distribution[1, agent_id, 2]}) \n"
)
```
## Action Randomness Across Threads
Another important validation is whether the sampler provides independent randomness across different agents and environment replicas. Given the same policy model for all the agents and environment replicas, we can check if the sampled actions are independently distributed.
Here, we assign all agents across all envs the same distribution [0.25, 0.25, 0.25, 0.25]. It is equivalent to an uniform action distribution among all actions [0,1,2,3], across 5 agents and 2 envs. Then we check the standard deviation across the agents.
```
data_feed = DataFeed()
data_feed.add_data(name=f"{_ACTIONS}_b", data=[[[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]]])
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}_b")
cuda_sampler.register_actions(
cuda_data_manager, action_name=f"{_ACTIONS}_b", num_actions=4
)
distribution = np.array(
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
],
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
# Run 10000 times to collect statistics.
actions_batch = torch.from_numpy(np.empty((10000, 2, 5), dtype=np.int32)).cuda()
for i in range(10000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_b")
actions_batch[i] = cuda_data_manager.data_on_device_via_torch(f"{_ACTIONS}_b")[:, :, 0]
actions_batch_host = actions_batch.cpu().numpy()
actions_batch_host
actions_batch_host.std(axis=2).mean(axis=0)
```
To check the independence of randomness among all threads, we can compare it with a Numpy implementation. Here we use `numpy.choice(4, 5)` to repeat the same process for an uniform action distribution among all actions [0,1,2,3], 5 agents and 2 envs. We should see that the variation of Numpy output is very close to our sampler.
```
actions_batch_numpy = np.empty((10000, 2, 5), dtype=np.int32)
for i in range(10000):
actions_batch_numpy[i, 0, :] = np.random.choice(4, 5)
actions_batch_numpy[i, 1, :] = np.random.choice(4, 5)
actions_batch_numpy.std(axis=2).mean(axis=0)
```
## Running Speed
The total time for sampling includes receiving a new distribution and using this to sample.
Comparing our sampler with [torch.Categorical sampler](https://pytorch.org/docs/stable/distributions.html),
we reach **7-8X** speed up for the distribution above.
*Note: our sampler runs in parallel across threads, so this speed-up is almost constant when scaling up the number of agents or environment replicas, i.e., increasing the number of used threads.*
```
from torch.distributions import Categorical
distribution = np.array(
[
[
[0.333, 0.333, 0.333],
[0.2, 0.5, 0.3],
[0.95, 0.02, 0.03],
[0.02, 0.95, 0.03],
[0.02, 0.03, 0.95],
],
[
[0.1, 0.7, 0.2],
[0.7, 0.2, 0.1],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(1000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_a")
end_event.record()
torch.cuda.synchronize()
print(f"time elapsed: {start_event.elapsed_time(end_event)} ms")
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(1000):
Categorical(distribution).sample()
end_event.record()
torch.cuda.synchronize()
print(f"time elapsed: {start_event.elapsed_time(end_event)} ms")
```
# Learn More and Explore our Tutorials!
Next, we suggest you check out our advanced [tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb) on WarpDrive's reset and log controller!
For your reference, all our tutorials are here:
1. [WarpDrive basics(intro and pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.a-warp_drive_basics.ipynb)
2. [WarpDrive basics(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1.b-warp_drive_basics.ipynb)
3. [WarpDrive sampler(pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.a-warp_drive_sampler.ipynb)
4. [WarpDrive sampler(numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-2.b-warp_drive_sampler.ipynb)
5. [WarpDrive resetter and logger](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-3-warp_drive_reset_and_log.ipynb)
6. [Create custom environments (pycuda)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.a-create_custom_environments_pycuda.md)
7. [Create custom environments (numba)](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-4.b-create_custom_environments_numba.md)
8. [Training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-5-training_with_warp_drive.ipynb)
9. [Scaling Up training with WarpDrive](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-6-scaling_up_training_with_warp_drive.md)
10. [Training with WarpDrive + Pytorch Lightning](https://github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-7-training_with_warp_drive_and_pytorch_lightning.ipynb)
```
```
| /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/tutorials/tutorial-2.a-warp_drive_sampler.ipynb | 0.799833 | 0.871639 | tutorial-2.a-warp_drive_sampler.ipynb | pypi |
from numba import cuda as numba_driver
from numba import float32, int32, boolean, from_dtype
from numba.cuda.random import init_xoroshiro128p_states, xoroshiro128p_uniform_float32
import numpy as np
kEps = 1.0e-8
xoroshiro128p_type = from_dtype(np.dtype([("s0", np.uint64), ("s1", np.uint64)], align=True))
@numba_driver.jit(int32(float32[:, :, ::1], float32, int32, int32, int32), device=True, inline=True)
def search_index(distr, p, env_id, agent_id, r):
left = 0
right = r
while left <= right:
mid = left + int((right - left) / 2)
if abs(distr[env_id, agent_id, mid] - p) < kEps:
return mid
elif distr[env_id, agent_id, mid] < p:
left = mid + 1
else:
right = mid - 1
if left > r:
return r
else:
return left
def init_random(rng_states, seed):
init_xoroshiro128p_states(states=rng_states, seed=seed)
@numba_driver.jit((xoroshiro128p_type[::1], float32[:, :, ::1], int32[:, :, ::1], float32[:, :, ::1], int32, int32))
def sample_actions(rng_states, distr, action_indices, cum_distr, num_actions, use_argmax):
env_id = numba_driver.blockIdx.x
# Block id in a 1D grid
agent_id = numba_driver.threadIdx.x
posidx = numba_driver.grid(1)
if posidx >= rng_states.shape[0]:
return
if use_argmax > 0.5:
max_dist = distr[env_id, agent_id, 0]
max_ind = 0
for i in range(1, num_actions):
if max_dist < distr[env_id, agent_id, i]:
max_dist = distr[env_id, agent_id, i]
max_ind = i
action_indices[env_id, agent_id, 0] = max_ind
return
p = xoroshiro128p_uniform_float32(rng_states, posidx)
cum_distr[env_id, agent_id, 0] = distr[env_id, agent_id, 0]
for i in range(1, num_actions):
cum_distr[env_id, agent_id, i] = (
distr[env_id, agent_id, i] + cum_distr[env_id, agent_id, i - 1]
)
ind = search_index(cum_distr, p, env_id, agent_id, num_actions - 1)
# action_indices in the shape of [n_env, n_agent, 1]
action_indices[env_id, agent_id, 0] = ind | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/numba_includes/core/random.py | 0.60054 | 0.453322 | random.py | pypi |
import logging
from typing import Optional
import numba.cuda as numba_driver
import numpy as np
import torch
from warp_drive.managers.data_manager import CUDADataManager
class NumbaDataManager(CUDADataManager):
""""""
"""
Example:
numba_data_manager = NumbaDataManager(
num_agents=10, num_envs=5, episode_length=100
)
data1 = DataFeed()
data1.add_data(name="X",
data=np.array([[1, 2, 3, 4, 5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
)
data1.add_data(name="a", data=100)
numba_data_manager.push_data_to_device(data)
data2 = DataFeed()
data2.add_data(name="Y",
data=[[0.1,0.2,0.3,0.4,0.5],
[0.0,0.0,0.0,0.0,0.0],
[0.0,0.0,0.0,0.0,0.0]]
)
numba_data_manager.push_data_to_device(data2, torch_accessible=True)
X_copy_at_host = numba_data_manager.pull_data_from_device(name="X")
Y_copy_at_host = numba_data_manager.pull_data_from_device(name="Y")
if numba_data_manager.is_data_on_device_via_torch("Y"):
Y_tensor_accessible_by_torch =
numba_data_manager.data_on_device_via_torch("Y")
block=(10,1,1)
grid=(5,1)
numba_function[grid, block](cuda_data_manager.device_data("X"),
cuda_data_manager.device_data("Y"),)
"""
def pull_data_from_device(self, name: str):
assert name in self._host_data
if name in self._scalar_data_list:
return self._host_data[name]
if self.is_data_on_device_via_torch(name):
return self._device_data_via_torch[name].cpu().numpy()
else:
assert name in self._device_data_pointer
v = self._device_data_pointer[name].copy_to_host()
return v
def reset_device(self, name: Optional[str] = None):
if name is not None:
assert name in self._device_data_pointer
assert name in self._host_data
self._device_data_pointer[name] = numba_driver.to_device(
self._host_data[name]
)
else:
for name, host_array in self._host_data.items():
self._device_data_pointer[name] = numba_driver.to_device(host_array)
def _to_device(
self,
name: str,
name_on_device: Optional[str] = None,
torch_accessible: bool = False,
):
assert name in self._host_data
host_array = self._host_data[name]
if name_on_device is None:
name_on_device = name
assert name_on_device not in self._device_data_pointer
if not torch_accessible:
self._device_data_pointer[name_on_device] = numba_driver.to_device(
host_array
)
else:
torch_tensor_device = torch.from_numpy(host_array).cuda()
self._device_data_via_torch[name_on_device] = torch_tensor_device
self._device_data_pointer[name_on_device] = numba_driver.as_cuda_array(
torch_tensor_device
) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/managers/numba_managers/numba_data_manager.py | 0.838746 | 0.323233 | numba_data_manager.py | pypi |
from typing import Optional
import numpy as np
from warp_drive.utils import autoinit_pycuda
import pycuda.driver as pycuda_driver
import torch
from warp_drive.managers.data_manager import CUDADataManager
class CudaTensorHolder(pycuda_driver.PointerHolderBase):
"""
A class that facilitates casting tensors to pointers.
"""
def __init__(self, t):
super().__init__()
self.gpudata = t.data_ptr()
class PyCUDADataManager(CUDADataManager):
""""""
"""
Example:
cuda_data_manager = PyCUDADataManager(
num_agents=10, num_envs=5, episode_length=100
)
data1 = DataFeed()
data1.add_data(name="X", data=np.array([[1, 2, 3, 4, 5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
)
data1.add_data(name="a", data=100)
cuda_data_manager.push_data_to_device(data)
data2 = DataFeed()
data2.add_data(name="Y", data=[[0.1,0.2,0.3,0.4,0.5],
[0.0,0.0,0.0,0.0,0.0],
[0.0,0.0,0.0,0.0,0.0]]
)
cuda_data_manager.push_data_to_device(data2, torch_accessible=True)
X_copy_at_host = cuda_data_manager.pull_data_from_device(name="X")
Y_copy_at_host = cuda_data_manager.pull_data_from_device(name="Y")
if cuda_data_manager.is_data_on_device_via_torch("Y"):
Y_tensor_accessible_by_torch =
cuda_data_manager.data_on_device_via_torch("Y")
# cuda_function here assumes a compiled CUDA C function
cuda_function(cuda_data_manager.device_data("X"),
cuda_data_manager.device_data("Y"),
block=(10,1,1), grid=(5,1))
"""
def __init__(
self,
num_agents: int = None,
num_envs: int = None,
blocks_per_env: int = 1,
episode_length: int = None,
):
super().__init__(
num_agents=num_agents,
num_envs=num_envs,
blocks_per_env=blocks_per_env,
episode_length=episode_length,
)
def pull_data_from_device(self, name: str):
assert name in self._host_data
if name in self._scalar_data_list:
return self._host_data[name]
if self.is_data_on_device_via_torch(name):
return self._device_data_via_torch[name].cpu().numpy()
assert name in self._device_data_pointer
v = np.empty_like(self._host_data[name])
pycuda_driver.memcpy_dtoh(v, self._device_data_pointer[name])
return v
def reset_device(self, name: Optional[str] = None):
if name is not None:
assert name in self._device_data_pointer
assert name in self._host_data
device_array_ptr = self._device_data_pointer[name]
pycuda_driver.memcpy_htod(device_array_ptr, self._host_data[name])
else:
for key, host_array in self._host_data.items():
device_array_ptr = self._device_data_pointer[key]
pycuda_driver.memcpy_htod(device_array_ptr, host_array)
def _to_device(
self,
name: str,
name_on_device: Optional[str] = None,
torch_accessible: bool = False,
):
assert name in self._host_data
host_array = self._host_data[name]
if name_on_device is None:
name_on_device = name
assert name_on_device not in self._device_data_pointer
if not torch_accessible:
device_array_ptr = pycuda_driver.mem_alloc(host_array.nbytes)
pycuda_driver.memcpy_htod(device_array_ptr, host_array)
self._device_data_pointer[name_on_device] = device_array_ptr
else:
torch_tensor_device = torch.from_numpy(host_array).cuda()
self._device_data_via_torch[name_on_device] = torch_tensor_device
self._device_data_pointer[name_on_device] = CudaTensorHolder(
torch_tensor_device
) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/managers/pycuda_managers/pycuda_data_manager.py | 0.884077 | 0.341939 | pycuda_data_manager.py | pypi |
import logging
from warp_drive.utils import autoinit_pycuda
from pycuda.driver import Context
class DeviceArchitectures:
"""
Reference:
"https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities"
"""
MaxBlocksPerSM = {
"sm_35": 16,
"sm_37": 16,
"sm_50": 32,
"sm_52": 32,
"sm_53": 32,
"sm_60": 32,
"sm_61": 32,
"sm_62": 32,
"sm_70": 32,
"sm_72": 32,
"sm_75": 16,
"sm_80": 32,
"sm_86": 16,
"sm_87": 16,
}
MaxThreadsPerSM = {
"sm_35": 2048,
"sm_37": 2048,
"sm_50": 2048,
"sm_52": 2048,
"sm_53": 2048,
"sm_60": 2048,
"sm_61": 2048,
"sm_62": 2048,
"sm_70": 2048,
"sm_72": 2048,
"sm_75": 1024,
"sm_80": 2048,
"sm_86": 1536,
"sm_87": 1536,
}
def calculate_blocks_per_env(num_agents):
max_threads_per_block = Context.get_device().max_threads_per_block
return (num_agents - 1) // max_threads_per_block + 1
def validate_device_setup(arch, num_blocks, threads_per_block, blocks_per_env):
try:
# max_blocks_per_multiprocessor is only supported after CUDA 11.0 build
max_blocks_per_sm = Context.get_device().max_blocks_per_multiprocessor
except AttributeError:
max_blocks_per_sm = DeviceArchitectures.MaxBlocksPerSM.get(arch, None)
try:
max_threads_per_sm = Context.get_device().max_threads_per_multiprocessor
except AttributeError:
max_threads_per_sm = DeviceArchitectures.MaxThreadsPerSM.get(arch, None)
try:
num_sm = Context.get_device().multiprocessor_count
except Exception as err:
logging.error(err)
num_sm = None
if max_blocks_per_sm is None or max_threads_per_sm is None or num_sm is None:
raise Exception("Unknown GPU architecture.")
max_blocks_by_threads = int((max_threads_per_sm - 1) // threads_per_block + 1)
available_blocks_per_sm = min(max_blocks_per_sm, max_blocks_by_threads)
max_blocks = available_blocks_per_sm * num_sm
if max_blocks < num_blocks:
if blocks_per_env == 1:
logging.warning(
f"Warning: max number of blocks available for simultaneous "
f"run is {max_blocks}, "
f"however, the number of blocks requested is {num_blocks}. "
f"Therefore, the simulation will likely under-perform"
)
else:
logging.warning(
f"Warning: max number of blocks available for simultaneous "
f"run is {max_blocks}, "
f"however, the number of blocks requested is {num_blocks}. "
f"Since blocks_per_env > 1, block synchronization scheduling "
f"can cause a dead-lock "
)
return False
return True | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/utils/architecture_validate.py | 0.58439 | 0.277381 | architecture_validate.py | pypi |
import logging
class EnvironmentRegistrar:
"""
Environment Registrar Class
"""
_cpu_envs = {}
_cuda_envs = {}
_numba_envs = {}
_customized_cuda_env_src_paths = {
"pycuda": {},
"numba": {},
}
def add(self, env_backend="cpu", cuda_env_src_path=None):
if not isinstance(env_backend, list):
env_backends = [env_backend.lower()]
else:
env_backends = [d.lower() for d in env_backend]
def add_wrapper(cls):
cls_name = cls.name.lower()
for backend in env_backends:
if backend == "cpu":
if cls_name not in self._cpu_envs:
self._cpu_envs[cls_name] = cls
else:
raise Exception(
f"CPU environment {cls_name} already registered, "
f"you may need to go to your env class to "
f"define a different class name "
)
elif backend == "pycuda":
if cls_name not in self._cuda_envs:
self._cuda_envs[cls_name] = cls
else:
raise Exception(
f"PyCUDA environment {cls_name} already registered, "
f"you may need to go to your env class to "
f"define a different class name "
)
if cuda_env_src_path is not None:
self.add_cuda_env_src_path(
cls_name, cuda_env_src_path, env_backend
)
elif backend == "numba":
if cls_name not in self._numba_envs:
self._numba_envs[cls_name] = cls
else:
raise Exception(
f"Numba environment {cls_name} already registered, "
f"you may need to go to your env class to "
f"define a different class name "
)
if cuda_env_src_path is not None:
self.add_cuda_env_src_path(
cls_name, cuda_env_src_path, env_backend
)
else:
raise Exception("Invalid device: only support CPU and CUDA/GPU")
return cls
return add_wrapper
def get(self, name, env_backend="cpu"):
name = name.lower()
if env_backend == "cpu":
if name not in self._cpu_envs:
raise Exception(f"CPU environment {name} not found ")
logging.info(f"returning CPU environment {name} ")
return self._cpu_envs[name]
elif env_backend == "pycuda":
if name not in self._cuda_envs:
raise Exception(f"PyCUDA environment {name} not found ")
logging.info(f"returning CUDA environment {name} ")
return self._cuda_envs[name]
elif env_backend == "numba":
if name not in self._numba_envs:
raise Exception(f"Numba environment {name} not found ")
logging.info(f"returning Numba environment {name} ")
return self._numba_envs[name]
else:
raise Exception("Invalid backend: only support CPU, PyCUDA/CUDA and Numba")
def add_cuda_env_src_path(self, name, cuda_env_src_path, env_backend="pycuda"):
"""
Register the customized environment for developers.
The FunctionManager will then be able to include the
environment source code in the compilation.
:param name: name of your customized environment
:param cuda_env_src_path: ABSOLUTE path to the customized
environment source code in CUDA
"""
name = name.lower()
if name in self._customized_cuda_env_src_paths[env_backend]:
logging.warning(
f"EnvironmentRegistrar has already registered an "
f"environment path called {name} but we will re-register it "
f"by overwriting the previous source code path"
)
if env_backend == "pycuda":
assert (
cuda_env_src_path.rsplit(".", 1)[1] == "cu"
), "the customized environment is expected to be a CUDA source code (*.cu)"
elif env_backend == "numba":
assert (
"/" not in cuda_env_src_path
), "the customized environment is expected to be a valid PYTHONPATH"
else:
raise Exception(f"unknown env_backend: {env_backend}")
self._customized_cuda_env_src_paths[env_backend][name] = cuda_env_src_path
def get_cuda_env_src_path(self, name, env_backend="pycuda"):
name = name.lower()
assert env_backend in ("pycuda", "numba"), f"unknown env_backend: {env_backend}"
return self._customized_cuda_env_src_paths[env_backend].get(name, None)
def has_env(self, name, env_backend="cpu"):
name = name.lower()
if env_backend == "cpu":
return name in self._cpu_envs
if env_backend in ("pycuda", "cuda", "gpu"):
return name in self._cuda_envs
if env_backend == "numba":
return name in self._numba_envs
raise Exception("Invalid device: only support CPU and CUDA/GPU")
env_registrar = EnvironmentRegistrar() | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/utils/env_registrar.py | 0.42322 | 0.255853 | env_registrar.py | pypi |
class DataFeed(dict):
"""
Helper class to build up the data dict for CUDADataManager.push_data_to_device(data)
Example:
data = DataFeed()
data.add(name="X", data=[1,2,3], save_copy_and_apply_at_reset=True,
log_data_across_episode=True)
"""
def add_data(
self,
name: str,
data,
save_copy_and_apply_at_reset: bool = False,
log_data_across_episode: bool = False,
**kwargs
):
"""
:param name: name of the data
:param data: data in the form of list, array or scalar
:param save_copy_and_apply_at_reset: the copy of the data will be saved
inside GPU for the reset
:param log_data_across_episode: a data buffer of episode length is
reserved for logging data
"""
d = {
"data": data,
"attributes": {
"save_copy_and_apply_at_reset": save_copy_and_apply_at_reset,
"log_data_across_episode": log_data_across_episode,
},
}
for key, value in kwargs.items():
d["attributes"][key] = value
self[name] = d
def add_data_list(self, data_list):
"""
:param data_list: list of data configures either in dict or in tuple
for example
add_data_list([("x1", x1, True),
("x2", x2, False, True),
{"name": "x3",
"data": x3,
"save_copy_and_apply_at_reset": False},
]
"""
assert isinstance(data_list, list)
for d in data_list:
assert len(d) >= 2, "name and data are strictly required"
if isinstance(d, tuple):
name = d[0]
assert isinstance(name, str)
data = d[1]
save_copy_and_apply_at_reset = (
d[2] if (len(d) > 2 and isinstance(d[2], bool)) else False
)
log_data_across_episode = (
d[3] if (len(d) > 3 and isinstance(d[3], bool)) else False
)
self.add_data(
name, data, save_copy_and_apply_at_reset, log_data_across_episode
)
elif isinstance(d, dict):
self.add_data(
name=d["name"],
data=d["data"],
save_copy_and_apply_at_reset=d.get(
"save_copy_and_apply_at_reset", False
),
log_data_across_episode=d.get("log_data_across_episode", False),
)
else:
raise Exception(
"Unknown type of data configure, only support tuple and dictionary"
)
def add_pool_for_reset(self, name, data, reset_target):
"""
a special data that serves for the reset function to pick up values at random
:param name: name of the data
:param data: data in the form of list, array or scalar
:param reset_target: specify the name of the data for the reset pool to apply for
for example, the following will add a reset pool called 'position_reset_pool' to reset 'position'
add_pool_for_reset("position_reset_pool", [1,2,3], "position")
"""
self.add_data(name,
data,
save_copy_and_apply_at_reset = False,
log_data_across_episode = False,
is_reset_pool=True,
reset_target=reset_target) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/utils/data_feed.py | 0.815967 | 0.593344 | data_feed.py | pypi |
import logging
import os
import re
from warp_drive.utils.common import get_project_root
from warp_drive.utils.env_registrar import EnvironmentRegistrar
def get_default_env_directory(env_name):
envs = {
"DummyEnv": "example_envs.dummy_env.test_step_numba",
"TagGridWorld": "example_envs.tag_gridworld.tag_gridworld_step_numba",
"TagContinuous": "example_envs.tag_continuous.tag_continuous_step_numba",
"YOUR_ENVIRONMENT": "PYTHON_PATH_TO_YOUR_ENV_SRC",
}
return envs.get(env_name, None)
def update_env_header(
template_header_file, path=None, num_envs=1, num_agents=1, blocks_per_env=1
):
def from_dict(dct):
def lookup(match):
key = match.group(1)
return dct.get(key, f"<{key} not found>")
return lookup
destination_header_path = f"{get_project_root()}/warp_drive/numba_includes"
if path is None:
path = destination_header_path
destination_header_file = "env_config.py"
if os.path.exists(f"{destination_header_path}/{destination_header_file}"):
logging.warning(
f"the destination header file {destination_header_path}/"
f"{destination_header_file} already exists; remove and rebuild."
)
os.remove(f"{destination_header_path}/{destination_header_file}")
header_subs = {
"N_ENVS": str(num_envs),
"N_AGENTS": str(num_agents),
"N_BLOCKS_PER_ENV": str(blocks_per_env),
}
header_content = ""
with open(f"{path}/{template_header_file}", "r", encoding="utf8") as reader:
for line in reader.readlines():
updated_line = re.sub("<<(.*?)>>", from_dict(header_subs), line)
header_content += updated_line
with open(
f"{destination_header_path}/{destination_header_file}", "w", encoding="utf8"
) as writer:
writer.write(header_content)
def check_env_header(
header_file="env_config.py", path=None, num_envs=1, num_agents=1, blocks_per_env=1
):
if path is None:
path = f"{get_project_root()}/warp_drive/numba_includes"
with open(f"{path}/{header_file}", "r", encoding="utf8") as reader:
for line in reader.readlines():
if "wkNumberEnvs" in line:
res = re.findall(r"\b\d+\b", line)
assert (
len(res) == 1 and int(res[0]) == num_envs
), f"{header_file} has different num_envs"
elif "wkNumberAgents" in line:
res = re.findall(r"\b\d+\b", line)
assert (
len(res) == 1 and int(res[0]) == num_agents
), f"{header_file} has different num_agents"
elif "wkBlocksPerEnv" in line:
res = re.findall(r"\b\d+\b", line)
assert (
len(res) == 1 and int(res[0]) == blocks_per_env
), f"{header_file} has different blocks_per_env"
def update_env_runner(
template_runner_file,
path=None,
env_name=None,
customized_env_registrar: EnvironmentRegistrar = None,
):
def from_dict(dct):
def lookup(match):
key = match.group(1)
return dct.get(key, f"<{key} not found>")
return lookup
destination_runner_path = f"{get_project_root()}/warp_drive/numba_includes"
if path is None:
path = destination_runner_path
destination_runner_file = "env_runner.py"
if os.path.exists(f"{destination_runner_path}/{destination_runner_file}"):
logging.warning(
f"the destination runner file {destination_runner_path}/"
f"{destination_runner_file} already exists; remove and rebuild."
)
os.remove(f"{destination_runner_path}/{destination_runner_file}")
env_numba = None
if (
customized_env_registrar is not None
and customized_env_registrar.get_cuda_env_src_path(
env_name, env_backend="numba"
)
is not None
):
env_numba = customized_env_registrar.get_cuda_env_src_path(
env_name, env_backend="numba"
)
logging.info(
f"Finding the targeting environment source code "
f"from the customized environment directory: {env_numba}"
)
elif get_default_env_directory(env_name) is not None:
env_numba = get_default_env_directory(env_name)
logging.info(
f"Finding the targeting environment source code "
f"from the default environment directory: {env_numba}"
)
assert env_numba is not None and isinstance(
env_numba, str
), "Failed to find or validate the targeting environment"
runner_subs = {"ENV_NUMBA": env_numba}
runner_content = ""
logging.info(
f"Building the targeting environment "
f"with source code at: {runner_subs['ENV_NUMBA']}"
)
with open(f"{path}/{template_runner_file}", "r", encoding="utf8") as reader:
for line in reader.readlines():
updated_line = re.sub("<<(.*?)>>", from_dict(runner_subs), line)
runner_content += updated_line
with open(
f"{destination_runner_path}/{destination_runner_file}", "w", encoding="utf8"
) as writer:
writer.write(runner_content) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/utils/numba_utils/misc.py | 0.518546 | 0.259204 | misc.py | pypi |
import logging
import os
import re
from warp_drive.utils.common import get_project_root
from warp_drive.utils.env_registrar import EnvironmentRegistrar
def get_default_env_directory(env_name):
envs = {
"TagGridWorld": f"{get_project_root()}"
f"/example_envs/tag_gridworld/tag_gridworld_step_pycuda.cu",
"TagContinuous": f"{get_project_root()}"
f"/example_envs/tag_continuous/tag_continuous_step_pycuda.cu",
"YOUR_ENVIRONMENT": "FULL_PATH_TO_YOUR_ENV_SRC",
}
return envs.get(env_name, None)
def update_env_header(
template_header_file, path=None, num_envs=1, num_agents=1, blocks_per_env=1
):
def from_dict(dct):
def lookup(match):
key = match.group(1)
return dct.get(key, f"<{key} not found>")
return lookup
destination_header_path = f"{get_project_root()}/warp_drive/cuda_includes"
if path is None:
path = destination_header_path
destination_header_file = "env_config.h"
if os.path.exists(f"{destination_header_path}/{destination_header_file}"):
logging.warning(
f"the destination header file {destination_header_path}/"
f"{destination_header_file} already exists; remove and rebuild."
)
os.remove(f"{destination_header_path}/{destination_header_file}")
header_subs = {
"N_ENVS": str(num_envs),
"N_AGENTS": str(num_agents),
"N_BLOCKS_PER_ENV": str(blocks_per_env),
}
header_content = ""
with open(f"{path}/{template_header_file}", "r", encoding="utf8") as reader:
for line in reader.readlines():
updated_line = re.sub("<<(.*?)>>", from_dict(header_subs), line)
header_content += updated_line
with open(
f"{destination_header_path}/{destination_header_file}", "w", encoding="utf8"
) as writer:
writer.write(header_content)
def check_env_header(
header_file="env_config.h", path=None, num_envs=1, num_agents=1, blocks_per_env=1
):
if path is None:
path = f"{get_project_root()}/warp_drive/cuda_includes"
with open(f"{path}/{header_file}", "r", encoding="utf8") as reader:
for line in reader.readlines():
if "wkNumberEnvs" in line:
res = re.findall(r"\b\d+\b", line)
assert (
len(res) == 1 and int(res[0]) == num_envs
), f"{header_file} has different num_envs"
elif "wkNumberAgents" in line:
res = re.findall(r"\b\d+\b", line)
assert (
len(res) == 1 and int(res[0]) == num_agents
), f"{header_file} has different num_agents"
elif "wkBlocksPerEnv" in line:
res = re.findall(r"\b\d+\b", line)
assert (
len(res) == 1 and int(res[0]) == blocks_per_env
), f"{header_file} has different blocks_per_env"
def update_env_runner(
template_runner_file,
path=None,
env_name=None,
customized_env_registrar: EnvironmentRegistrar = None,
):
def from_dict(dct):
def lookup(match):
key = match.group(1)
return dct.get(key, f"<{key} not found>")
return lookup
destination_runner_path = f"{get_project_root()}/warp_drive/cuda_includes"
if path is None:
path = destination_runner_path
destination_runner_file = "env_runner.cu"
if os.path.exists(f"{destination_runner_path}/{destination_runner_file}"):
logging.warning(
f"the destination runner file {destination_runner_path}/"
f"{destination_runner_file} already exists; remove and rebuild."
)
os.remove(f"{destination_runner_path}/{destination_runner_file}")
env_cuda = None
if (
customized_env_registrar is not None
and customized_env_registrar.get_cuda_env_src_path(env_name) is not None
):
env_cuda = customized_env_registrar.get_cuda_env_src_path(env_name)
logging.info(
f"Finding the targeting environment source code "
f"from the customized environment directory: {env_cuda}"
)
elif get_default_env_directory(env_name) is not None:
env_cuda = get_default_env_directory(env_name)
logging.info(
f"Finding the targeting environment source code "
f"from the default environment directory: {env_cuda}"
)
assert env_cuda is not None and isinstance(
env_cuda, str
), "Failed to find or validate the targeting environment"
runner_subs = {"ENV_CUDA": env_cuda}
runner_content = ""
logging.info(
f"Building the targeting environment "
f"with source code at: {runner_subs['ENV_CUDA']}"
)
with open(f"{path}/{template_runner_file}", "r", encoding="utf8") as reader:
for line in reader.readlines():
updated_line = re.sub("<<(.*?)>>", from_dict(runner_subs), line)
runner_content += updated_line
with open(
f"{destination_runner_path}/{destination_runner_file}", "w", encoding="utf8"
) as writer:
writer.write(runner_content) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/utils/pycuda_utils/misc.py | 0.515376 | 0.247214 | misc.py | pypi |
import argparse
import logging
import os
import sys
import time
import torch
import yaml
from example_envs.tag_continuous.tag_continuous import TagContinuous
from example_envs.tag_gridworld.tag_gridworld import CUDATagGridWorld, CUDATagGridWorldWithResetPool
from warp_drive.env_wrapper import EnvWrapper
from warp_drive.training.trainer import Trainer
from warp_drive.training.utils.distributed_train.distributed_trainer_numba import (
perform_distributed_training,
)
from warp_drive.training.utils.vertical_scaler import perform_auto_vertical_scaling
from warp_drive.utils.common import get_project_root
_ROOT_DIR = get_project_root()
_TAG_CONTINUOUS = "tag_continuous"
_TAG_GRIDWORLD = "tag_gridworld"
_TAG_GRIDWORLD_WITH_RESET_POOL = "tag_gridworld_with_reset_pool"
# Example usages (from the root folder):
# >> python warp_drive/training/example_training_script.py -e tag_gridworld
# >> python warp_drive/training/example_training_script.py --env tag_continuous
def setup_trainer_and_train(
run_configuration,
device_id=0,
num_devices=1,
event_messenger=None,
results_directory=None,
verbose=True,
):
"""
Create the environment wrapper, define the policy mapping to agent ids,
and create the trainer object. Also, perform training.
"""
logging.getLogger().setLevel(logging.ERROR)
num_envs = run_configuration["trainer"]["num_envs"]
# Create a wrapped environment object via the EnvWrapper
# Ensure that use_cuda is set to True (in order to run on the GPU)
# ----------------------------------------------------------------
if run_configuration["name"] == _TAG_CONTINUOUS:
env_wrapper = EnvWrapper(
TagContinuous(**run_configuration["env"]),
num_envs=num_envs,
env_backend="numba",
event_messenger=event_messenger,
process_id=device_id,
)
elif run_configuration["name"] == _TAG_GRIDWORLD:
env_wrapper = EnvWrapper(
CUDATagGridWorld(**run_configuration["env"]),
num_envs=num_envs,
env_backend="numba",
event_messenger=event_messenger,
process_id=device_id,
)
elif run_configuration["name"] == _TAG_GRIDWORLD_WITH_RESET_POOL:
env_wrapper = EnvWrapper(
CUDATagGridWorldWithResetPool(**run_configuration["env"]),
num_envs=num_envs,
env_backend="numba",
event_messenger=event_messenger,
process_id=device_id,
)
else:
raise NotImplementedError(
f"Currently, the environments supported are ["
f"{_TAG_GRIDWORLD}, "
f"{_TAG_CONTINUOUS}"
f"{_TAG_GRIDWORLD_WITH_RESET_POOL}"
f"]",
)
# Policy mapping to agent ids: agents can share models
# The policy_tag_to_agent_id_map dictionary maps
# policy model names to agent ids.
# ----------------------------------------------------
if len(run_configuration["policy"].keys()) == 1:
# Using a single (or shared policy) across all agents
policy_name = list(run_configuration["policy"])[0]
policy_tag_to_agent_id_map = {
policy_name: list(env_wrapper.env.taggers) + list(env_wrapper.env.runners)
}
else:
# Using different policies for different (sets of) agents
policy_tag_to_agent_id_map = {
"tagger": list(env_wrapper.env.taggers),
"runner": list(env_wrapper.env.runners),
}
# Assert that all the valid policies are mapped to at least one agent
assert set(run_configuration["policy"].keys()) == set(
policy_tag_to_agent_id_map.keys()
)
# Trainer object
# --------------
trainer = Trainer(
env_wrapper=env_wrapper,
config=run_configuration,
policy_tag_to_agent_id_map=policy_tag_to_agent_id_map,
device_id=device_id,
num_devices=num_devices,
results_dir=results_directory,
verbose=verbose,
)
# Perform training
# ----------------
trainer.train()
trainer.graceful_close()
perf_stats = trainer.perf_stats
print(f"Training steps/s: {perf_stats.steps / perf_stats.total_time:.2f}. \n")
if __name__ == "__main__":
num_gpus_available = torch.cuda.device_count()
assert num_gpus_available > 0, "The training script needs a GPU machine to run!"
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR\n",
logging.getLogger().setLevel(logging.WARNING)
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
"-e",
type=str,
help="the environment to train. This also refers to the"
"yaml file name in run_configs/.",
)
parser.add_argument(
"--auto_scale",
"-a",
action="store_true",
help="perform auto scaling.",
)
parser.add_argument(
"--num_gpus",
"-n",
type=int,
default=-1,
help="the number of GPU devices for (horizontal) scaling, "
"default=-1 (using configure setting)",
)
parser.add_argument(
"--results_dir", type=str, help="name of the directory to save results into."
)
args = parser.parse_args()
assert args.env is not None, (
"No env specified. Please use the '-e'- or '--env' option "
"to specify an environment. The environment name should "
"match the name of the yaml file in training/run_configs/."
)
# Read the run configurations specific to each environment.
# Note: The run config yaml(s) can be edited at warp_drive/training/run_configs
# -----------------------------------------------------------------------------
config_path = os.path.join(
_ROOT_DIR, "warp_drive", "training", "run_configs", f"{args.env}.yaml"
)
if not os.path.exists(config_path):
raise ValueError(
"Invalid environment specified! The environment name should "
"match the name of the yaml file in training/run_configs/."
)
with open(config_path, "r", encoding="utf8") as fp:
run_config = yaml.safe_load(fp)
if args.auto_scale:
# Automatic scaling
print("Performing Auto Scaling!\n")
# First, perform vertical scaling.
run_config = perform_auto_vertical_scaling(setup_trainer_and_train, run_config)
# Next, perform horizontal scaling.
# Set `num_gpus` to the maximum number of GPUs available
run_config["trainer"]["num_gpus"] = num_gpus_available
print(f"We will be using {num_gpus_available} GPU(s) for training.")
elif args.num_gpus >= 1:
# Set the appropriate num_gpus configuration parameter
if args.num_gpus <= num_gpus_available:
print(f"We have successfully found {args.num_gpus} GPUs!")
run_config["trainer"]["num_gpus"] = args.num_gpus
else:
print(
f"You requested for {args.num_gpus} GPUs, but we were only able to "
f"find {num_gpus_available} GPU(s)! \nDo you wish to continue? [Y/n]"
)
if input() != "Y":
print("Terminating program.")
sys.exit()
else:
run_config["trainer"]["num_gpus"] = num_gpus_available
elif "num_gpus" not in run_config["trainer"]:
run_config["trainer"]["num_gpus"] = 1
if args.results_dir is not None:
results_dir = args.results_dir
else:
results_dir = f"{time.time():10.0f}"
print(f"Training with {run_config['trainer']['num_gpus']} GPU(s).")
if run_config["trainer"]["num_gpus"] > 1:
perform_distributed_training(setup_trainer_and_train, run_config, results_dir)
else:
setup_trainer_and_train(run_config, results_directory=results_dir) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/training/example_training_script_numba.py | 0.591015 | 0.215021 | example_training_script_numba.py | pypi |
import argparse
import logging
import os
import sys
import time
import torch
import yaml
from example_envs.tag_continuous.tag_continuous import TagContinuous
from example_envs.tag_gridworld.tag_gridworld import CUDATagGridWorld
from warp_drive.env_wrapper import EnvWrapper
from warp_drive.training.trainer import Trainer
from warp_drive.training.utils.distributed_train.distributed_trainer_pycuda import (
perform_distributed_training,
)
from warp_drive.training.utils.vertical_scaler import perform_auto_vertical_scaling
from warp_drive.utils.common import get_project_root
_ROOT_DIR = get_project_root()
_TAG_CONTINUOUS = "tag_continuous"
_TAG_GRIDWORLD = "tag_gridworld"
# Example usages (from the root folder):
# >> python warp_drive/training/example_training_script.py -e tag_gridworld
# >> python warp_drive/training/example_training_script.py --env tag_continuous
def setup_trainer_and_train(
run_configuration,
device_id=0,
num_devices=1,
event_messenger=None,
results_directory=None,
verbose=True,
):
"""
Create the environment wrapper, define the policy mapping to agent ids,
and create the trainer object. Also, perform training.
"""
logging.getLogger().setLevel(logging.ERROR)
num_envs = run_configuration["trainer"]["num_envs"]
# Create a wrapped environment object via the EnvWrapper
# Ensure that use_cuda is set to True (in order to run on the GPU)
# ----------------------------------------------------------------
if run_configuration["name"] == _TAG_GRIDWORLD:
env_wrapper = EnvWrapper(
CUDATagGridWorld(**run_configuration["env"]),
num_envs=num_envs,
env_backend="pycuda",
event_messenger=event_messenger,
process_id=device_id,
)
elif run_configuration["name"] == _TAG_CONTINUOUS:
env_wrapper = EnvWrapper(
TagContinuous(**run_configuration["env"]),
num_envs=num_envs,
env_backend="pycuda",
event_messenger=event_messenger,
process_id=device_id,
)
else:
raise NotImplementedError(
f"Currently, the environments supported are ["
f"{_TAG_GRIDWORLD}, "
f"{_TAG_CONTINUOUS}"
f"]",
)
# Initialize shared constants for action index to sampled_actions_placeholder
# ---------------------------------------------------------------------------
if run_configuration["name"] == _TAG_GRIDWORLD:
kIndexToActionArr = env_wrapper.env.step_actions
env_wrapper.env.cuda_data_manager.add_shared_constants(
{"kIndexToActionArr": kIndexToActionArr}
)
env_wrapper.env.cuda_function_manager.initialize_shared_constants(
env_wrapper.env.cuda_data_manager, constant_names=["kIndexToActionArr"]
)
# Policy mapping to agent ids: agents can share models
# The policy_tag_to_agent_id_map dictionary maps
# policy model names to agent ids.
# ----------------------------------------------------
if len(run_configuration["policy"].keys()) == 1:
# Using a single (or shared policy) across all agents
policy_name = list(run_configuration["policy"])[0]
policy_tag_to_agent_id_map = {
policy_name: list(env_wrapper.env.taggers) + list(env_wrapper.env.runners)
}
else:
# Using different policies for different (sets of) agents
policy_tag_to_agent_id_map = {
"tagger": list(env_wrapper.env.taggers),
"runner": list(env_wrapper.env.runners),
}
# Assert that all the valid policies are mapped to at least one agent
assert set(run_configuration["policy"].keys()) == set(
policy_tag_to_agent_id_map.keys()
)
# Trainer object
# --------------
trainer = Trainer(
env_wrapper=env_wrapper,
config=run_configuration,
policy_tag_to_agent_id_map=policy_tag_to_agent_id_map,
device_id=device_id,
num_devices=num_devices,
results_dir=results_directory,
verbose=verbose,
)
# Perform training
# ----------------
trainer.train()
trainer.graceful_close()
perf_stats = trainer.perf_stats
print(f"Training steps/s: {perf_stats.steps / perf_stats.total_time:.2f}. \n")
if __name__ == "__main__":
num_gpus_available = torch.cuda.device_count()
assert num_gpus_available > 0, "The training script needs a GPU machine to run!"
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR\n",
logging.getLogger().setLevel(logging.WARNING)
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
"-e",
type=str,
help="the environment to train. This also refers to the"
"yaml file name in run_configs/.",
)
parser.add_argument(
"--auto_scale",
"-a",
action="store_true",
help="perform auto scaling.",
)
parser.add_argument(
"--num_gpus",
"-n",
type=int,
default=-1,
help="the number of GPU devices for (horizontal) scaling, "
"default=-1 (using configure setting)",
)
parser.add_argument(
"--results_dir", type=str, help="name of the directory to save results into."
)
args = parser.parse_args()
assert args.env is not None, (
"No env specified. Please use the '-e'- or '--env' option "
"to specify an environment. The environment name should "
"match the name of the yaml file in training/run_configs/."
)
# Read the run configurations specific to each environment.
# Note: The run config yaml(s) can be edited at warp_drive/training/run_configs
# -----------------------------------------------------------------------------
config_path = os.path.join(
_ROOT_DIR, "warp_drive", "training", "run_configs", f"{args.env}.yaml"
)
if not os.path.exists(config_path):
raise ValueError(
"Invalid environment specified! The environment name should "
"match the name of the yaml file in training/run_configs/."
)
with open(config_path, "r", encoding="utf8") as fp:
run_config = yaml.safe_load(fp)
if args.auto_scale:
# Automatic scaling
print("Performing Auto Scaling!\n")
# First, perform vertical scaling.
run_config = perform_auto_vertical_scaling(setup_trainer_and_train, run_config)
# Next, perform horizontal scaling.
# Set `num_gpus` to the maximum number of GPUs available
run_config["trainer"]["num_gpus"] = num_gpus_available
print(f"We will be using {num_gpus_available} GPU(s) for training.")
elif args.num_gpus >= 1:
# Set the appropriate num_gpus configuration parameter
if args.num_gpus <= num_gpus_available:
print(f"We have successfully found {args.num_gpus} GPUs!")
run_config["trainer"]["num_gpus"] = args.num_gpus
else:
print(
f"You requested for {args.num_gpus} GPUs, but we were only able to "
f"find {num_gpus_available} GPU(s)! \nDo you wish to continue? [Y/n]"
)
if input() != "Y":
print("Terminating program.")
sys.exit()
else:
run_config["trainer"]["num_gpus"] = num_gpus_available
elif "num_gpus" not in run_config["trainer"]:
run_config["trainer"]["num_gpus"] = 1
if args.results_dir is not None:
results_dir = args.results_dir
else:
results_dir = f"{time.time():10.0f}"
print(f"Training with {run_config['trainer']['num_gpus']} GPU(s).")
if run_config["trainer"]["num_gpus"] > 1:
perform_distributed_training(setup_trainer_and_train, run_config, results_dir)
else:
setup_trainer_and_train(run_config, results_directory=results_dir) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/training/example_training_script_pycuda.py | 0.599602 | 0.233008 | example_training_script_pycuda.py | pypi |
import numpy as np
import torch
import torch.nn.functional as func
from gym.spaces import Box, Dict, Discrete, MultiDiscrete
from torch import nn
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
from warp_drive.training.utils.data_loader import get_flattened_obs_size
_OBSERVATIONS = Constants.OBSERVATIONS
_PROCESSED_OBSERVATIONS = Constants.PROCESSED_OBSERVATIONS
_ACTION_MASK = Constants.ACTION_MASK
_LARGE_NEG_NUM = -1e20
def apply_logit_mask(logits, mask=None):
"""
Mask values of 1 are valid actions.
Add huge negative values to logits with 0 mask values.
"""
if mask is None:
return logits
logit_mask = torch.ones_like(logits) * _LARGE_NEG_NUM
logit_mask = logit_mask * (1 - mask)
return logits + logit_mask
# Policy networks
# ---------------
class FullyConnected(nn.Module):
"""
Fully connected network implementation in Pytorch
"""
name = "torch_fully_connected"
def __init__(
self,
env,
model_config,
policy,
policy_tag_to_agent_id_map,
create_separate_placeholders_for_each_policy=False,
obs_dim_corresponding_to_num_agents="first",
):
super().__init__()
self.env = env
fc_dims = model_config["fc_dims"]
assert isinstance(fc_dims, list)
num_fc_layers = len(fc_dims)
self.policy = policy
self.policy_tag_to_agent_id_map = policy_tag_to_agent_id_map
self.create_separate_placeholders_for_each_policy = (
create_separate_placeholders_for_each_policy
)
assert obs_dim_corresponding_to_num_agents in ["first", "last"]
self.obs_dim_corresponding_to_num_agents = obs_dim_corresponding_to_num_agents
sample_agent_id = self.policy_tag_to_agent_id_map[self.policy][0]
# Flatten obs space
self.observation_space = self.env.env.observation_space[sample_agent_id]
self.flattened_obs_size = self.get_flattened_obs_size(self.observation_space)
if isinstance(self.env.env.action_space[sample_agent_id], Discrete):
action_space = [self.env.env.action_space[sample_agent_id].n]
elif isinstance(self.env.env.action_space[sample_agent_id], MultiDiscrete):
action_space = self.env.env.action_space[sample_agent_id].nvec
else:
raise NotImplementedError
input_dims = [self.flattened_obs_size] + fc_dims[:-1]
output_dims = fc_dims
self.fc = nn.ModuleDict()
for fc_layer in range(num_fc_layers):
self.fc[str(fc_layer)] = nn.Sequential(
nn.Linear(input_dims[fc_layer], output_dims[fc_layer]),
nn.ReLU(),
)
# policy network (list of heads)
policy_heads = [None for _ in range(len(action_space))]
self.output_dims = [] # Network output dimension(s)
for idx, act_space in enumerate(action_space):
self.output_dims += [act_space]
policy_heads[idx] = nn.Linear(fc_dims[-1], act_space)
self.policy_head = nn.ModuleList(policy_heads)
# value-function network head
self.vf_head = nn.Linear(fc_dims[-1], 1)
# used for action masking
self.action_mask = None
# max batch size allowed
name = f"{_PROCESSED_OBSERVATIONS}_batch_{self.policy}"
self.batch_size = self.env.cuda_data_manager.get_shape(name=name)[0]
def get_flattened_obs_size(self, observation_space):
"""Get the total size of the observations after flattening"""
return get_flattened_obs_size(observation_space)
def reshape_and_flatten_obs(self, obs):
"""
# Note: WarpDrive assumes that all the observation are shaped
# (num_agents, *feature_dim), i.e., the observation dimension
# corresponding to 'num_agents' is the first one. If the observation
# dimension corresponding to num_agents is last, we will need to
# permute the axes to align with WarpDrive's assumption.
"""
num_envs = obs.shape[0]
if self.create_separate_placeholders_for_each_policy:
num_agents = len(self.policy_tag_to_agent_id_map[self.policy])
else:
num_agents = self.env.n_agents
if self.obs_dim_corresponding_to_num_agents == "first":
pass
elif self.obs_dim_corresponding_to_num_agents == "last":
shape_len = len(obs.shape)
if shape_len == 1:
obs = obs.reshape(-1, num_agents) # valid only when num_agents = 1
obs = obs.permute(0, -1, *range(1, shape_len - 1))
else:
raise ValueError(
"num_agents can only be the first "
"or the last dimension in the observations."
)
return obs.reshape(num_envs, num_agents, -1)
def get_flattened_obs(self):
"""
If the obs is of Box type, it will already be flattened.
If the obs is of Dict type, then concatenate all the
obs values and flatten them out.
Returns the concatenated and flattened obs.
"""
if isinstance(self.observation_space, Box):
if self.create_separate_placeholders_for_each_policy:
obs = self.env.cuda_data_manager.data_on_device_via_torch(
f"{_OBSERVATIONS}_{self.policy}"
)
else:
obs = self.env.cuda_data_manager.data_on_device_via_torch(_OBSERVATIONS)
flattened_obs = self.reshape_and_flatten_obs(obs)
elif isinstance(self.observation_space, Dict):
obs_dict = {}
for key in self.observation_space:
if self.create_separate_placeholders_for_each_policy:
obs = self.env.cuda_data_manager.data_on_device_via_torch(
f"{_OBSERVATIONS}_{self.policy}_{key}"
)
else:
obs = self.env.cuda_data_manager.data_on_device_via_torch(
f"{_OBSERVATIONS}_{key}"
)
if key == _ACTION_MASK:
self.action_mask = self.reshape_and_flatten_obs(obs)
assert self.action_mask.shape[-1] == sum(self.output_dims)
else:
obs_dict[key] = obs
flattened_obs_dict = {}
for key, value in obs_dict.items():
flattened_obs_dict[key] = self.reshape_and_flatten_obs(value)
flattened_obs = torch.cat(list(flattened_obs_dict.values()), dim=-1)
else:
raise NotImplementedError("Observation space must be of Box or Dict type")
assert flattened_obs.shape[-1] == self.flattened_obs_size, \
f"The flattened observation size of {flattened_obs.shape[-1]} is different " \
f"from the designated size of {self.flattened_obs_size} "
return flattened_obs
def forward(self, obs=None, batch_index=None):
"""
Forward pass through the model.
Returns action probabilities and value functions.
"""
if obs is None:
assert batch_index < self.batch_size, f"batch_index: {batch_index}, self.batch_size: {self.batch_size}"
# Read in observation from the placeholders and flatten them
# before passing through the fully connected layers.
# This is particularly relevant if the observations space is a Dict.
obs = self.get_flattened_obs()
if self.create_separate_placeholders_for_each_policy:
ip = obs
else:
agent_ids_for_policy = self.policy_tag_to_agent_id_map[self.policy]
ip = obs[:, agent_ids_for_policy]
# Push the processed (in this case, flattened) obs to the GPU (device).
# The writing happens to a specific batch index in the processed obs batch.
# The processed obs batch is required for training.
if batch_index >= 0:
self.push_processed_obs_to_batch(batch_index, ip)
else:
ip = obs
# Feed through the FC layers
for layer in range(len(self.fc)):
op = self.fc[str(layer)](ip)
ip = op
logits = op
# Compute the action probabilities and the value function estimate
# Apply action mask to the logits as well.
action_masks = [None for _ in range(len(self.output_dims))]
if self.action_mask is not None:
start = 0
for idx, dim in enumerate(self.output_dims):
action_masks[idx] = self.action_mask[..., start : start + dim]
start = start + dim
action_probs = [
func.softmax(apply_logit_mask(ph(logits), action_masks[idx]), dim=-1)
for idx, ph in enumerate(self.policy_head)
]
vals = self.vf_head(logits)[..., 0]
return action_probs, vals
def push_processed_obs_to_batch(self, batch_index, processed_obs):
name = f"{_PROCESSED_OBSERVATIONS}_batch_{self.policy}"
self.env.cuda_data_manager.data_on_device_via_torch(name=name)[
batch_index
] = processed_obs | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/training/models/fully_connected.py | 0.918918 | 0.575648 | fully_connected.py | pypi |
import logging
from torch.optim.lr_scheduler import LambdaLR
def _linear_interpolation(l_v, r_v, slope):
"""linear interpolation between l_v and r_v with a slope"""
return l_v + slope * (r_v - l_v)
class ParamScheduler:
"""
A generic scheduler for the adapting parameters such as
learning rate and entropy coefficient.
Available scheduler types are ["constant", "piecewise_linear"].
"""
def __init__(self, schedule):
"""
schedule: schedule for how to vary the parameter.
Types of parameter schedules:
- constant: a constant parameter throughout training
- piecewise_linear: the schedule may be specified as a
list of lists.
Note: Each entry in the schedule must be a list with
signature (timestep, parameter value),and the times need to
be in an increasing order. The parameter values are
linearly interpolated between boundaries.
e.g. schedule = [[1000, 0.1], [2000, 0.05]] implements a schedule
[0.1 if t <= 1000,
0.05 if t > 2000,
and linearly interpolated between 1000 and 2000 steps.]
For instance, the value at 1500 steps will equal 0.075.
"""
if isinstance(schedule, (int, float)):
# The schedule corresponds to the param value itself.
self.type = "constant"
elif isinstance(schedule, list):
self.type = "piecewise_linear"
# The schedule itself is a list.
# Each item in the schedule must be a list of [time, param_value].
# Note: The times must be specified in an increasing (sorted) order.
assert isinstance(schedule, list), (
"Please specify " "the schedule as a list of tuples!"
)
for item in schedule:
assert isinstance(item, list), (
"Each entry in the schedule must"
" be a list with signature "
"[time, param_value]."
)
times = [item[0] for item in schedule]
assert times == sorted(times), (
"All the times must be sorted in" " an increasing order!"
)
else:
raise NotImplementedError
self.schedule = schedule
def get_param_value(self, timestep):
"""Obtain the parameter value at a desired timestep."""
assert timestep >= 0
if self.type == "constant":
param_value = self.schedule
elif self.type == "piecewise_linear":
if timestep <= self.schedule[0][0]:
param_value = self.schedule[0][1]
elif timestep >= self.schedule[-1][0]:
param_value = self.schedule[-1][1]
else:
for (l_t, l_v), (r_t, r_v) in zip(
self.schedule[:-1], self.schedule[1:]
):
if l_t <= timestep < r_t:
slope = float(timestep - l_t) / (r_t - l_t)
param_value = _linear_interpolation(l_v, r_v, slope)
else:
raise NotImplementedError
logging.info(f"Setting the param value at t={timestep} to {param_value}.")
return param_value
class LRScheduler(ParamScheduler, LambdaLR):
"""
A learning rate scheduler with Pytorch-style APIs,
compatible with Pytorch Lightning.
"""
def __init__(
self, schedule, optimizer=None, init_timestep=0, timesteps_per_iteration=1
):
"""
optimizer: optimizer associated with the LR scheduler.
init_timestep: the value of timestep at initialization.
timesteps_per_iteration: number of timesteps in each iteration.
"""
assert optimizer is not None
self.optimizer = optimizer
assert init_timestep >= 0
self.init_timestep = init_timestep
assert timesteps_per_iteration > 0
self.timesteps_per_iteration = timesteps_per_iteration
ParamScheduler.__init__(self, schedule)
def lr_lambda(iteration):
return self.get_param_value(
self.init_timestep + iteration * self.timesteps_per_iteration
) / self.get_param_value(self.init_timestep)
LambdaLR.__init__(self, optimizer, lr_lambda) | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/training/utils/param_scheduler.py | 0.9067 | 0.750324 | param_scheduler.py | pypi |
import logging
from warp_drive.training.utils.device_child_process.child_process_base import ProcessWrapper
def best_param_search(low=1, margin=1, func=None):
"""
Perform a binary search to determine the best parameter value.
In this specific context, the best
parameter is (the highest) value of the parameter (e.g. batch size)
that can be used to run a func(tion)
(e.g., training) successfully. Beyond a certain value,
the function fails to run for reasons such as out-of-memory.
param low: a starting low value to start searching from (defaults to 1).
param margin: denotes the margin allowed when choosing the
configuration parameter (and the optimal parameter).
param func: the function that is required to be run with the
configuration parameter.
"""
assert low > 0
assert margin > 0
assert func is not None
# Determine if the function succeeds to run at the starting (low) value.
# If not, keep lowering the value of low until the run succeeds.
try:
print(f"Trying with a parameter value of {low}.")
func(low)
success = True
except Exception as err:
logging.error(err)
success = False
print("Run failed! The starting value of the parameter is itself too high!\n")
while not success and low > 0:
try:
low = low // 2
print(f"Trying with a parameter value of {low}.")
func(low)
success = True
except Exception as err:
logging.error(err)
print("Run failed! Lowering the parameter value.\n")
if not success:
print("The function failed to run even at the lowest parameter value !")
return None
# Set coarse limits on low (function succeeds to run) and
# high (function does not succeed running).
while success:
high = 2 * low
try:
print(f"Trying with a parameter value of {high}.")
func(high)
low = high
except Exception as err:
logging.error(err)
success = False
print("Run failed!\n")
print(
f"Low and high parameter values set to {low} and {high} respectively."
)
# Binary search to find the optimal value of low (within the margin).
current_margin = high - low
while current_margin > margin:
mid = (low + high) // 2
try:
print(f"Trying with a parameter value of {mid}.")
func(mid)
low = mid
except Exception as err:
logging.error(err)
high = mid
print("Run failed!\n")
print(f"Low and high parameter values set to {low} and {high} respectively.")
current_margin = high - low
print(f"Setting the parameter value to {low}\n")
return low
def perform_auto_vertical_scaling(setup_trainer_and_train, config, num_iters=2):
"""
Auto-scale the number of envs and batch size to maximize GPU utilization.
param num_iters: number of iterations to use when performing automatic
vertical scaling.
"""
def launch_process(func, kwargs):
"""
Run a Python function on a separate process.
"""
p = ProcessWrapper(target=func, kwargs=kwargs)
p.start()
p.join()
if p.exception:
raise p.exception
def set_num_envs_and_train(num_envs, run_config=config):
run_config["trainer"]["num_envs"] = num_envs
# Note that we also set the train batch size equal to
# the number of environments, so that each block only
# captures one timestep of the simulation.
run_config["trainer"]["train_batch_size"] = num_envs
# Set the appropriate number of episodes in order only
# run for just `num_iters` iterations (i.e., train_batch_size = num_envs).
run_config["trainer"]["num_episodes"] = (
num_iters
* run_config["trainer"]["train_batch_size"]
/ run_config["env"]["episode_length"]
)
# Performing training on a separate process
launch_process(
setup_trainer_and_train,
kwargs={"run_configuration": config, "verbose": False},
)
def set_batch_size_per_env_and_train(train_batch_size_per_env, run_config=config):
run_config["trainer"]["train_batch_size"] = (
train_batch_size_per_env * config["trainer"]["num_envs"]
)
# Set the appropriate number of episodes in order only
# run for just `num_iters` iterations (i.e., train_batch_size = num_envs).
run_config["trainer"]["num_episodes"] = (
num_iters
* run_config["trainer"]["train_batch_size"]
/ run_config["env"]["episode_length"]
)
# Performing training on a separate process
launch_process(
setup_trainer_and_train,
kwargs={"run_configuration": config, "verbose": False},
)
# Save some initial configs
num_episodes = config["trainer"]["num_episodes"]
use_wandb = config["saving"].get("use_wandb", False)
# disable wandb
config["saving"]["use_wandb"] = False
# First, determine the maximum number of environments (i.e., GPU blocks)
# that can be run in parallel before running out of thread memory.
print("=" * 80)
print("Determining the maximum number of environment replicas to run in parallel.")
print("=" * 80)
num_envs = config["trainer"]["num_envs"]
max_envs = best_param_search(low=num_envs, func=set_num_envs_and_train)
# Set the `num_envs` parameter to the max value found from above.
config["trainer"]["num_envs"] = max_envs
# Next, determine the maximum batch size that can be used
# without running out of memory.
print("=" * 80)
print("Determining the maximum training batch size.")
print("=" * 80)
max_batch_size_per_env = best_param_search(func=set_batch_size_per_env_and_train)
config["trainer"]["train_batch_size"] = (
max_batch_size_per_env * config["trainer"]["num_envs"]
)
# Put back the original number of episodes and use_wandb settings.
config["trainer"]["num_episodes"] = num_episodes
config["saving"]["use_wandb"] = use_wandb
return config | /rl-warp-drive-2.5.0.tar.gz/rl-warp-drive-2.5.0/warp_drive/training/utils/vertical_scaler.py | 0.765769 | 0.750313 | vertical_scaler.py | pypi |
import argparse
import difflib
import importlib
import os
import time
import uuid
import gym as gym26
import gymnasium as gym
import numpy as np
import stable_baselines3 as sb3
import torch as th
from stable_baselines3.common.utils import set_random_seed
# Register custom envs
import rl_zoo3.import_envs # noqa: F401 pytype: disable=import-error
from rl_zoo3.exp_manager import ExperimentManager
from rl_zoo3.utils import ALGOS, StoreDict
def train() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--algo", help="RL Algorithm", default="ppo", type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument("--env", type=str, default="CartPole-v1", help="environment ID")
parser.add_argument("-tb", "--tensorboard-log", help="Tensorboard log dir", default="", type=str)
parser.add_argument("-i", "--trained-agent", help="Path to a pretrained agent to continue training", default="", type=str)
parser.add_argument(
"--truncate-last-trajectory",
help="When using HER with online sampling the last trajectory "
"in the replay buffer will be truncated after reloading the replay buffer.",
default=True,
type=bool,
)
parser.add_argument("-n", "--n-timesteps", help="Overwrite the number of timesteps", default=-1, type=int)
parser.add_argument("--num-threads", help="Number of threads for PyTorch (-1 to use default)", default=-1, type=int)
parser.add_argument("--log-interval", help="Override log interval (default: -1, no change)", default=-1, type=int)
parser.add_argument(
"--eval-freq",
help="Evaluate the agent every n steps (if negative, no evaluation). "
"During hyperparameter optimization n-evaluations is used instead",
default=25000,
type=int,
)
parser.add_argument(
"--optimization-log-path",
help="Path to save the evaluation log and optimal policy for each hyperparameter tried during optimization. "
"Disabled if no argument is passed.",
type=str,
)
parser.add_argument("--eval-episodes", help="Number of episodes to use for evaluation", default=5, type=int)
parser.add_argument("--n-eval-envs", help="Number of environments for evaluation", default=1, type=int)
parser.add_argument("--save-freq", help="Save the model every n steps (if negative, no checkpoint)", default=-1, type=int)
parser.add_argument(
"--save-replay-buffer", help="Save the replay buffer too (when applicable)", action="store_true", default=False
)
parser.add_argument("-f", "--log-folder", help="Log folder", type=str, default="logs")
parser.add_argument("--seed", help="Random generator seed", type=int, default=-1)
parser.add_argument("--vec-env", help="VecEnv type", type=str, default="dummy", choices=["dummy", "subproc"])
parser.add_argument("--device", help="PyTorch device to be use (ex: cpu, cuda...)", default="auto", type=str)
parser.add_argument(
"--n-trials",
help="Number of trials for optimizing hyperparameters. "
"This applies to each optimization runner, not the entire optimization process.",
type=int,
default=500,
)
parser.add_argument(
"--max-total-trials",
help="Number of (potentially pruned) trials for optimizing hyperparameters. "
"This applies to the entire optimization process and takes precedence over --n-trials if set.",
type=int,
default=None,
)
parser.add_argument(
"-optimize", "--optimize-hyperparameters", action="store_true", default=False, help="Run hyperparameters search"
)
parser.add_argument(
"--no-optim-plots", action="store_true", default=False, help="Disable hyperparameter optimization plots"
)
parser.add_argument("--n-jobs", help="Number of parallel jobs when optimizing hyperparameters", type=int, default=1)
parser.add_argument(
"--sampler",
help="Sampler to use when optimizing hyperparameters",
type=str,
default="tpe",
choices=["random", "tpe", "skopt"],
)
parser.add_argument(
"--pruner",
help="Pruner to use when optimizing hyperparameters",
type=str,
default="median",
choices=["halving", "median", "none"],
)
parser.add_argument("--n-startup-trials", help="Number of trials before using optuna sampler", type=int, default=10)
parser.add_argument(
"--n-evaluations",
help="Training policies are evaluated every n-timesteps // n-evaluations steps when doing hyperparameter optimization."
"Default is 1 evaluation per 100k timesteps.",
type=int,
default=None,
)
parser.add_argument(
"--storage", help="Database storage path if distributed optimization should be used", type=str, default=None
)
parser.add_argument("--study-name", help="Study name for distributed optimization", type=str, default=None)
parser.add_argument("--verbose", help="Verbose mode (0: no output, 1: INFO)", default=1, type=int)
parser.add_argument(
"--gym-packages",
type=str,
nargs="+",
default=[],
help="Additional external Gym environment package modules to import",
)
parser.add_argument(
"--env-kwargs", type=str, nargs="+", action=StoreDict, help="Optional keyword argument to pass to the env constructor"
)
parser.add_argument(
"-params",
"--hyperparams",
type=str,
nargs="+",
action=StoreDict,
help="Overwrite hyperparameter (e.g. learning_rate:0.01 train_freq:10)",
)
parser.add_argument(
"-conf",
"--conf-file",
type=str,
default=None,
help="Custom yaml file or python package from which the hyperparameters will be loaded."
"We expect that python packages contain a dictionary called 'hyperparams' which contains a key for each environment.",
)
parser.add_argument("-uuid", "--uuid", action="store_true", default=False, help="Ensure that the run has a unique ID")
parser.add_argument(
"--track",
action="store_true",
default=False,
help="if toggled, this experiment will be tracked with Weights and Biases",
)
parser.add_argument("--wandb-project-name", type=str, default="sb3", help="the wandb's project name")
parser.add_argument("--wandb-entity", type=str, default=None, help="the entity (team) of wandb's project")
parser.add_argument(
"-P",
"--progress",
action="store_true",
default=False,
help="if toggled, display a progress bar using tqdm and rich",
)
parser.add_argument(
"-tags", "--wandb-tags", type=str, default=[], nargs="+", help="Tags for wandb run, e.g.: -tags optimized pr-123"
)
args = parser.parse_args()
# Going through custom gym packages to let them register in the global registory
for env_module in args.gym_packages:
importlib.import_module(env_module)
env_id = args.env
registered_envs = set(gym.envs.registry.keys()) # pytype: disable=module-attr
# Add gym 0.26 envs
registered_envs.update(gym26.envs.registry.keys()) # pytype: disable=module-attr
# If the environment is not found, suggest the closest match
if env_id not in registered_envs:
try:
closest_match = difflib.get_close_matches(env_id, registered_envs, n=1)[0]
except IndexError:
closest_match = "'no close match found...'"
raise ValueError(f"{env_id} not found in gym registry, you maybe meant {closest_match}?")
# Unique id to ensure there is no race condition for the folder creation
uuid_str = f"_{uuid.uuid4()}" if args.uuid else ""
if args.seed < 0:
# Seed but with a random one
args.seed = np.random.randint(2**32 - 1, dtype="int64").item() # type: ignore[attr-defined]
set_random_seed(args.seed)
# Setting num threads to 1 makes things run faster on cpu
if args.num_threads > 0:
if args.verbose > 1:
print(f"Setting torch.num_threads to {args.num_threads}")
th.set_num_threads(args.num_threads)
if args.trained_agent != "":
assert args.trained_agent.endswith(".zip") and os.path.isfile(
args.trained_agent
), "The trained_agent must be a valid path to a .zip file"
print("=" * 10, env_id, "=" * 10)
print(f"Seed: {args.seed}")
if args.track:
try:
import wandb
except ImportError as e:
raise ImportError(
"if you want to use Weights & Biases to track experiment, please install W&B via `pip install wandb`"
) from e
run_name = f"{args.env}__{args.algo}__{args.seed}__{int(time.time())}"
tags = [*args.wandb_tags, f"v{sb3.__version__}"]
run = wandb.init(
name=run_name,
project=args.wandb_project_name,
entity=args.wandb_entity,
tags=tags,
config=vars(args),
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
monitor_gym=True, # auto-upload the videos of agents playing the game
save_code=True, # optional
)
args.tensorboard_log = f"runs/{run_name}"
exp_manager = ExperimentManager(
args,
args.algo,
env_id,
args.log_folder,
args.tensorboard_log,
args.n_timesteps,
args.eval_freq,
args.eval_episodes,
args.save_freq,
args.hyperparams,
args.env_kwargs,
args.trained_agent,
args.optimize_hyperparameters,
args.storage,
args.study_name,
args.n_trials,
args.max_total_trials,
args.n_jobs,
args.sampler,
args.pruner,
args.optimization_log_path,
n_startup_trials=args.n_startup_trials,
n_evaluations=args.n_evaluations,
truncate_last_trajectory=args.truncate_last_trajectory,
uuid_str=uuid_str,
seed=args.seed,
log_interval=args.log_interval,
save_replay_buffer=args.save_replay_buffer,
verbose=args.verbose,
vec_env_type=args.vec_env,
n_eval_envs=args.n_eval_envs,
no_optim_plots=args.no_optim_plots,
device=args.device,
config=args.conf_file,
show_progress=args.progress,
)
# Prepare experiment and launch hyperparameter optimization if needed
results = exp_manager.setup_experiment()
if results is not None:
model, saved_hyperparams = results
if args.track:
# we need to save the loaded hyperparameters
args.saved_hyperparams = saved_hyperparams
assert run is not None # make mypy happy
run.config.setdefaults(vars(args))
# Normal training
if model is not None:
exp_manager.learn(model)
exp_manager.save_trained_model(model)
else:
exp_manager.hyperparameters_optimization()
if __name__ == "__main__":
train() | /rl_zoo3-2.1.0.tar.gz/rl_zoo3-2.1.0/rl_zoo3/train.py | 0.592784 | 0.167866 | train.py | pypi |
from typing import Any, Dict
import numpy as np
# Deprecation warning with gym 0.26 and numpy 1.24
np.bool8 = np.bool_ # type: ignore[attr-defined]
import gym # noqa: E402
import gymnasium # noqa: E402
class PatchedRegistry(dict):
"""
gym.envs.registration.registry
is now a dictionnary and no longer an EnvRegistry() object.
"""
@property
def env_specs(self) -> Dict[str, Any]:
return self
class PatchedTimeLimit(gymnasium.wrappers.TimeLimit):
"""
See https://github.com/openai/gym/issues/3102
and https://github.com/Farama-Foundation/Gymnasium/pull/101:
keep the behavior as before and provide additionnal info
that the episode reached a timeout, but only
when the episode is over because of that.
"""
def step(self, action):
observation, reward, terminated, truncated, info = self.env.step(action)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = truncated or terminated
# TimeLimit.truncated key may have been already set by the environment
# do not overwrite it
# only set it when the episode is not over for other reasons
episode_truncated = not done or info.get("TimeLimit.truncated", False)
info["TimeLimit.truncated"] = episode_truncated
# truncated may have been set by the env too
truncated = truncated or episode_truncated
return observation, reward, terminated, truncated, info
# Use gym as base class otherwise the patch_env won't work
class PatchedGymTimeLimit(gym.wrappers.TimeLimit):
"""
See https://github.com/openai/gym/issues/3102
and https://github.com/Farama-Foundation/Gymnasium/pull/101:
keep the behavior as before and provide additionnal info
that the episode reached a timeout, but only
when the episode is over because of that.
"""
def step(self, action):
observation, reward, terminated, truncated, info = self.env.step(action)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = truncated or terminated
# TimeLimit.truncated key may have been already set by the environment
# do not overwrite it
# only set it when the episode is not over for other reasons
episode_truncated = not done or info.get("TimeLimit.truncated", False)
info["TimeLimit.truncated"] = episode_truncated
# truncated may have been set by the env too
truncated = truncated or episode_truncated
return observation, reward, terminated, truncated, info
# Patch Gym registry (for Pybullet)
patched_registry = PatchedRegistry()
patched_registry.update(gym.envs.registration.registry)
gym.envs.registry = patched_registry
gym.envs.registration.registry = patched_registry
# Patch gym TimeLimit
gym.wrappers.TimeLimit = PatchedGymTimeLimit # type: ignore[misc]
gym.wrappers.time_limit.TimeLimit = PatchedGymTimeLimit # type: ignore[misc]
gym.envs.registration.TimeLimit = PatchedGymTimeLimit # type: ignore[misc]
# Patch Gymnasium TimeLimit
gymnasium.wrappers.TimeLimit = PatchedTimeLimit # type: ignore[misc]
gymnasium.wrappers.time_limit.TimeLimit = PatchedTimeLimit # type: ignore[misc]
gymnasium.envs.registration.TimeLimit = PatchedTimeLimit # type: ignore[misc,attr-defined] | /rl_zoo3-2.1.0.tar.gz/rl_zoo3-2.1.0/rl_zoo3/gym_patches.py | 0.888704 | 0.439086 | gym_patches.py | pypi |
import os
import tempfile
import time
from copy import deepcopy
from functools import wraps
from threading import Thread
from typing import Optional, Type, Union
import optuna
from sb3_contrib import TQC
from stable_baselines3 import SAC
from stable_baselines3.common.callbacks import BaseCallback, EvalCallback
from stable_baselines3.common.logger import TensorBoardOutputFormat
from stable_baselines3.common.vec_env import VecEnv
class TrialEvalCallback(EvalCallback):
"""
Callback used for evaluating and reporting a trial.
"""
def __init__(
self,
eval_env: VecEnv,
trial: optuna.Trial,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
deterministic: bool = True,
verbose: int = 0,
best_model_save_path: Optional[str] = None,
log_path: Optional[str] = None,
) -> None:
super().__init__(
eval_env=eval_env,
n_eval_episodes=n_eval_episodes,
eval_freq=eval_freq,
deterministic=deterministic,
verbose=verbose,
best_model_save_path=best_model_save_path,
log_path=log_path,
)
self.trial = trial
self.eval_idx = 0
self.is_pruned = False
def _on_step(self) -> bool:
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
super()._on_step()
self.eval_idx += 1
# report best or report current ?
# report num_timesteps or elasped time ?
self.trial.report(self.last_mean_reward, self.eval_idx)
# Prune trial if need
if self.trial.should_prune():
self.is_pruned = True
return False
return True
class SaveVecNormalizeCallback(BaseCallback):
"""
Callback for saving a VecNormalize wrapper every ``save_freq`` steps
:param save_freq: (int)
:param save_path: (str) Path to the folder where ``VecNormalize`` will be saved, as ``vecnormalize.pkl``
:param name_prefix: (str) Common prefix to the saved ``VecNormalize``, if None (default)
only one file will be kept.
"""
def __init__(self, save_freq: int, save_path: str, name_prefix: Optional[str] = None, verbose: int = 0):
super().__init__(verbose)
self.save_freq = save_freq
self.save_path = save_path
self.name_prefix = name_prefix
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
# make mypy happy
assert self.model is not None
if self.n_calls % self.save_freq == 0:
if self.name_prefix is not None:
path = os.path.join(self.save_path, f"{self.name_prefix}_{self.num_timesteps}_steps.pkl")
else:
path = os.path.join(self.save_path, "vecnormalize.pkl")
if self.model.get_vec_normalize_env() is not None:
self.model.get_vec_normalize_env().save(path) # type: ignore[union-attr]
if self.verbose > 1:
print(f"Saving VecNormalize to {path}")
return True
class ParallelTrainCallback(BaseCallback):
"""
Callback to explore (collect experience) and train (do gradient steps)
at the same time using two separate threads.
Normally used with off-policy algorithms and `train_freq=(1, "episode")`.
TODO:
- blocking mode: wait for the model to finish updating the policy before collecting new experience
at the end of a rollout
- force sync mode: stop training to update to the latest policy for collecting
new experience
:param gradient_steps: Number of gradient steps to do before
sending the new policy
:param verbose: Verbosity level
:param sleep_time: Limit the fps in the thread collecting experience.
"""
def __init__(self, gradient_steps: int = 100, verbose: int = 0, sleep_time: float = 0.0):
super().__init__(verbose)
self.batch_size = 0
self._model_ready = True
self._model: Union[SAC, TQC]
self.gradient_steps = gradient_steps
self.process: Thread
self.model_class: Union[Type[SAC], Type[TQC]]
self.sleep_time = sleep_time
def _init_callback(self) -> None:
temp_file = tempfile.TemporaryFile()
# Windows TemporaryFile is not a io Buffer
# we save the model in the logs/ folder
if os.name == "nt":
temp_file = os.path.join("logs", "model_tmp.zip") # type: ignore[arg-type,assignment]
# make mypy happy
assert isinstance(self.model, (SAC, TQC)), f"{self.model} is not supported for parallel training"
self.model.save(temp_file) # type: ignore[arg-type]
# TODO: add support for other algorithms
for model_class in [SAC, TQC]:
if isinstance(self.model, model_class):
self.model_class = model_class # type: ignore[assignment]
break
assert self.model_class is not None, f"{self.model} is not supported for parallel training"
self._model = self.model_class.load(temp_file) # type: ignore[arg-type]
self.batch_size = self._model.batch_size
# Disable train method
def patch_train(function):
@wraps(function)
def wrapper(*args, **kwargs):
return
return wrapper
# Add logger for parallel training
self._model.set_logger(self.model.logger)
self.model.train = patch_train(self.model.train) # type: ignore[assignment]
# Hack: Re-add correct values at save time
def patch_save(function):
@wraps(function)
def wrapper(*args, **kwargs):
return self._model.save(*args, **kwargs)
return wrapper
self.model.save = patch_save(self.model.save) # type: ignore[assignment]
def train(self) -> None:
self._model_ready = False
self.process = Thread(target=self._train_thread, daemon=True)
self.process.start()
def _train_thread(self) -> None:
self._model.train(gradient_steps=self.gradient_steps, batch_size=self.batch_size)
self._model_ready = True
def _on_step(self) -> bool:
if self.sleep_time > 0:
time.sleep(self.sleep_time)
return True
def _on_rollout_end(self) -> None:
# Make mypy happy
assert isinstance(self.model, (SAC, TQC))
if self._model_ready:
self._model.replay_buffer = deepcopy(self.model.replay_buffer)
self.model.set_parameters(deepcopy(self._model.get_parameters())) # type: ignore[arg-type]
self.model.actor = self.model.policy.actor # type: ignore[union-attr, attr-defined, assignment]
if self.num_timesteps >= self._model.learning_starts:
self.train()
# Do not wait for the training loop to finish
# self.process.join()
def _on_training_end(self) -> None:
# Wait for the thread to terminate
if self.process is not None:
if self.verbose > 0:
print("Waiting for training thread to terminate")
self.process.join()
class RawStatisticsCallback(BaseCallback):
"""
Callback used for logging raw episode data (return and episode length).
"""
def __init__(self, verbose=0):
super().__init__(verbose)
# Custom counter to reports stats
# (and avoid reporting multiple values for the same step)
self._timesteps_counter = 0
self._tensorboard_writer = None
def _init_callback(self) -> None:
assert self.logger is not None
# Retrieve tensorboard writer to not flood the logger output
for out_format in self.logger.output_formats:
if isinstance(out_format, TensorBoardOutputFormat):
self._tensorboard_writer = out_format
assert self._tensorboard_writer is not None, "You must activate tensorboard logging when using RawStatisticsCallback"
def _on_step(self) -> bool:
for info in self.locals["infos"]:
if "episode" in info:
logger_dict = {
"raw/rollouts/episodic_return": info["episode"]["r"],
"raw/rollouts/episodic_length": info["episode"]["l"],
}
exclude_dict = {key: None for key in logger_dict.keys()}
self._timesteps_counter += info["episode"]["l"]
self._tensorboard_writer.write(logger_dict, exclude_dict, self._timesteps_counter)
return True | /rl_zoo3-2.1.0.tar.gz/rl_zoo3-2.1.0/rl_zoo3/callbacks.py | 0.771801 | 0.246567 | callbacks.py | pypi |
import argparse
import glob
import importlib
import os
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import gym as gym26
import gymnasium as gym
import stable_baselines3 as sb3 # noqa: F401
import torch as th # noqa: F401
import yaml
from gymnasium import spaces
from huggingface_hub import HfApi
from huggingface_sb3 import EnvironmentName, ModelName
from sb3_contrib import ARS, QRDQN, TQC, TRPO, RecurrentPPO
from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.sb2_compat.rmsprop_tf_like import RMSpropTFLike # noqa: F401
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnv, VecFrameStack, VecNormalize
# For custom activation fn
from torch import nn as nn
ALGOS: Dict[str, Type[BaseAlgorithm]] = {
"a2c": A2C,
"ddpg": DDPG,
"dqn": DQN,
"ppo": PPO,
"sac": SAC,
"td3": TD3,
# SB3 Contrib,
"ars": ARS,
"qrdqn": QRDQN,
"tqc": TQC,
"trpo": TRPO,
"ppo_lstm": RecurrentPPO,
}
def flatten_dict_observations(env: gym.Env) -> gym.Env:
assert isinstance(env.observation_space, spaces.Dict)
return gym.wrappers.FlattenObservation(env)
def get_wrapper_class(hyperparams: Dict[str, Any], key: str = "env_wrapper") -> Optional[Callable[[gym.Env], gym.Env]]:
"""
Get one or more Gym environment wrapper class specified as a hyper parameter
"env_wrapper".
Works also for VecEnvWrapper with the key "vec_env_wrapper".
e.g.
env_wrapper: gym_minigrid.wrappers.FlatObsWrapper
for multiple, specify a list:
env_wrapper:
- rl_zoo3.wrappers.PlotActionWrapper
- rl_zoo3.wrappers.TimeFeatureWrapper
:param hyperparams:
:return: maybe a callable to wrap the environment
with one or multiple gym.Wrapper
"""
def get_module_name(wrapper_name):
return ".".join(wrapper_name.split(".")[:-1])
def get_class_name(wrapper_name):
return wrapper_name.split(".")[-1]
if key in hyperparams.keys():
wrapper_name = hyperparams.get(key)
if wrapper_name is None:
return None
if not isinstance(wrapper_name, list):
wrapper_names = [wrapper_name]
else:
wrapper_names = wrapper_name
wrapper_classes = []
wrapper_kwargs = []
# Handle multiple wrappers
for wrapper_name in wrapper_names:
# Handle keyword arguments
if isinstance(wrapper_name, dict):
assert len(wrapper_name) == 1, (
"You have an error in the formatting "
f"of your YAML file near {wrapper_name}. "
"You should check the indentation."
)
wrapper_dict = wrapper_name
wrapper_name = next(iter(wrapper_dict.keys()))
kwargs = wrapper_dict[wrapper_name]
else:
kwargs = {}
wrapper_module = importlib.import_module(get_module_name(wrapper_name))
wrapper_class = getattr(wrapper_module, get_class_name(wrapper_name))
wrapper_classes.append(wrapper_class)
wrapper_kwargs.append(kwargs)
def wrap_env(env: gym.Env) -> gym.Env:
"""
:param env:
:return:
"""
for wrapper_class, kwargs in zip(wrapper_classes, wrapper_kwargs):
env = wrapper_class(env, **kwargs)
return env
return wrap_env
else:
return None
def get_class_by_name(name: str) -> Type:
"""
Imports and returns a class given the name, e.g. passing
'stable_baselines3.common.callbacks.CheckpointCallback' returns the
CheckpointCallback class.
:param name:
:return:
"""
def get_module_name(name: str) -> str:
return ".".join(name.split(".")[:-1])
def get_class_name(name: str) -> str:
return name.split(".")[-1]
module = importlib.import_module(get_module_name(name))
return getattr(module, get_class_name(name))
def get_callback_list(hyperparams: Dict[str, Any]) -> List[BaseCallback]:
"""
Get one or more Callback class specified as a hyper-parameter
"callback".
e.g.
callback: stable_baselines3.common.callbacks.CheckpointCallback
for multiple, specify a list:
callback:
- rl_zoo3.callbacks.PlotActionWrapper
- stable_baselines3.common.callbacks.CheckpointCallback
:param hyperparams:
:return:
"""
callbacks: List[BaseCallback] = []
if "callback" in hyperparams.keys():
callback_name = hyperparams.get("callback")
if callback_name is None:
return callbacks
if not isinstance(callback_name, list):
callback_names = [callback_name]
else:
callback_names = callback_name
# Handle multiple wrappers
for callback_name in callback_names:
# Handle keyword arguments
if isinstance(callback_name, dict):
assert len(callback_name) == 1, (
"You have an error in the formatting "
f"of your YAML file near {callback_name}. "
"You should check the indentation."
)
callback_dict = callback_name
callback_name = next(iter(callback_dict.keys()))
kwargs = callback_dict[callback_name]
else:
kwargs = {}
callback_class = get_class_by_name(callback_name)
callbacks.append(callback_class(**kwargs))
return callbacks
def create_test_env(
env_id: str,
n_envs: int = 1,
stats_path: Optional[str] = None,
seed: int = 0,
log_dir: Optional[str] = None,
should_render: bool = True,
hyperparams: Optional[Dict[str, Any]] = None,
env_kwargs: Optional[Dict[str, Any]] = None,
) -> VecEnv:
"""
Create environment for testing a trained agent
:param env_id:
:param n_envs: number of processes
:param stats_path: path to folder containing saved running averaged
:param seed: Seed for random number generator
:param log_dir: Where to log rewards
:param should_render: For Pybullet env, display the GUI
:param hyperparams: Additional hyperparams (ex: n_stack)
:param env_kwargs: Optional keyword argument to pass to the env constructor
:return:
"""
# Avoid circular import
from rl_zoo3.exp_manager import ExperimentManager
# Create the environment and wrap it if necessary
assert hyperparams is not None
env_wrapper = get_wrapper_class(hyperparams)
hyperparams = {} if hyperparams is None else hyperparams
if "env_wrapper" in hyperparams.keys():
del hyperparams["env_wrapper"]
vec_env_kwargs: Dict[str, Any] = {}
vec_env_cls = DummyVecEnv
if n_envs > 1 or (ExperimentManager.is_bullet(env_id) and should_render):
# HACK: force SubprocVecEnv for Bullet env
# as Pybullet envs does not follow gym.render() interface
vec_env_cls = SubprocVecEnv # type: ignore[assignment]
# start_method = 'spawn' for thread safe
# Fix for gym 0.26, to keep old behavior
env_kwargs = env_kwargs or {}
env_kwargs = deepcopy(env_kwargs)
if "render_mode" not in env_kwargs and should_render:
env_kwargs.update(render_mode="human")
# Make Pybullet compatible with gym 0.26
if ExperimentManager.is_bullet(env_id):
spec = gym26.spec(env_id)
env_kwargs.update(dict(apply_api_compatibility=True))
else:
# Define make_env here so it works with subprocesses
# when the registry was modified with `--gym-packages`
# See https://github.com/HumanCompatibleAI/imitation/pull/160
try:
spec = gym.spec(env_id) # type: ignore[assignment]
except gym.error.NameNotFound:
# Registered with gym 0.26
spec = gym26.spec(env_id)
def make_env(**kwargs) -> gym.Env:
env = spec.make(**kwargs)
return env # type: ignore[return-value]
env = make_vec_env(
make_env,
n_envs=n_envs,
monitor_dir=log_dir,
seed=seed,
wrapper_class=env_wrapper,
env_kwargs=env_kwargs,
vec_env_cls=vec_env_cls,
vec_env_kwargs=vec_env_kwargs,
)
if "vec_env_wrapper" in hyperparams.keys():
vec_env_wrapper = get_wrapper_class(hyperparams, "vec_env_wrapper")
assert vec_env_wrapper is not None
env = vec_env_wrapper(env) # type: ignore[assignment, arg-type]
del hyperparams["vec_env_wrapper"]
# Load saved stats for normalizing input and rewards
# And optionally stack frames
if stats_path is not None:
if hyperparams["normalize"]:
print("Loading running average")
print(f"with params: {hyperparams['normalize_kwargs']}")
path_ = os.path.join(stats_path, "vecnormalize.pkl")
if os.path.exists(path_):
env = VecNormalize.load(path_, env)
# Deactivate training and reward normalization
env.training = False
env.norm_reward = False
else:
raise ValueError(f"VecNormalize stats {path_} not found")
n_stack = hyperparams.get("frame_stack", 0)
if n_stack > 0:
print(f"Stacking {n_stack} frames")
env = VecFrameStack(env, n_stack)
return env
def linear_schedule(initial_value: Union[float, str]) -> Callable[[float], float]:
"""
Linear learning rate schedule.
:param initial_value: (float or str)
:return: (function)
"""
# Force conversion to float
initial_value_ = float(initial_value)
def func(progress_remaining: float) -> float:
"""
Progress will decrease from 1 (beginning) to 0
:param progress_remaining: (float)
:return: (float)
"""
return progress_remaining * initial_value_
return func
def get_trained_models(log_folder: str) -> Dict[str, Tuple[str, str]]:
"""
:param log_folder: Root log folder
:return: Dict representing the trained agents
"""
trained_models = {}
for algo in os.listdir(log_folder):
if not os.path.isdir(os.path.join(log_folder, algo)):
continue
for model_folder in os.listdir(os.path.join(log_folder, algo)):
args_files = glob.glob(os.path.join(log_folder, algo, model_folder, "*/args.yml"))
if len(args_files) != 1:
continue # we expect only one sub-folder with an args.yml file
with open(args_files[0]) as fh:
env_id = yaml.load(fh, Loader=yaml.UnsafeLoader)["env"]
model_name = ModelName(algo, EnvironmentName(env_id))
trained_models[model_name] = (algo, env_id)
return trained_models
def get_hf_trained_models(organization: str = "sb3", check_filename: bool = False) -> Dict[str, Tuple[str, str]]:
"""
Get pretrained models,
available on the Hugginface hub for a given organization.
:param organization: Huggingface organization
Stable-Baselines (SB3) one is the default.
:param check_filename: Perform additional check per model
to be sure they match the RL Zoo convention.
(this will slow down things as it requires one API call per model)
:return: Dict representing the trained agents
"""
api = HfApi()
models = api.list_models(author=organization, cardData=True)
trained_models = {}
for model in models:
# Try to extract algorithm and environment id from model card
try:
env_id = model.cardData["model-index"][0]["results"][0]["dataset"]["name"]
algo = model.cardData["model-index"][0]["name"].lower()
# RecurrentPPO alias is "ppo_lstm" in the rl zoo
if algo == "recurrentppo":
algo = "ppo_lstm"
except (KeyError, IndexError):
print(f"Skipping {model.modelId}")
continue # skip model if name env id or algo name could not be found
env_name = EnvironmentName(env_id)
model_name = ModelName(algo, env_name)
# check if there is a model file in the repo
if check_filename and not any(f.rfilename == model_name.filename for f in api.model_info(model.modelId).siblings):
continue # skip model if the repo contains no properly named model file
trained_models[model_name] = (algo, env_id)
return trained_models
def get_latest_run_id(log_path: str, env_name: EnvironmentName) -> int:
"""
Returns the latest run number for the given log name and log path,
by finding the greatest number in the directories.
:param log_path: path to log folder
:param env_name:
:return: latest run number
"""
max_run_id = 0
for path in glob.glob(os.path.join(log_path, env_name + "_[0-9]*")):
run_id = path.split("_")[-1]
path_without_run_id = path[: -len(run_id) - 1]
if path_without_run_id.endswith(env_name) and run_id.isdigit() and int(run_id) > max_run_id:
max_run_id = int(run_id)
return max_run_id
def get_saved_hyperparams(
stats_path: str,
norm_reward: bool = False,
test_mode: bool = False,
) -> Tuple[Dict[str, Any], Optional[str]]:
"""
Retrieve saved hyperparameters given a path.
Return empty dict and None if the path is not valid.
:param stats_path:
:param norm_reward:
:param test_mode:
:return:
"""
hyperparams: Dict[str, Any] = {}
if not os.path.isdir(stats_path):
return hyperparams, None
else:
config_file = os.path.join(stats_path, "config.yml")
if os.path.isfile(config_file):
# Load saved hyperparameters
with open(os.path.join(stats_path, "config.yml")) as f:
hyperparams = yaml.load(f, Loader=yaml.UnsafeLoader) # pytype: disable=module-attr
hyperparams["normalize"] = hyperparams.get("normalize", False)
else:
obs_rms_path = os.path.join(stats_path, "obs_rms.pkl")
hyperparams["normalize"] = os.path.isfile(obs_rms_path)
# Load normalization params
if hyperparams["normalize"]:
if isinstance(hyperparams["normalize"], str):
normalize_kwargs = eval(hyperparams["normalize"])
if test_mode:
normalize_kwargs["norm_reward"] = norm_reward
else:
normalize_kwargs = {"norm_obs": hyperparams["normalize"], "norm_reward": norm_reward}
hyperparams["normalize_kwargs"] = normalize_kwargs
return hyperparams, stats_path
class StoreDict(argparse.Action):
"""
Custom argparse action for storing dict.
In: args1:0.0 args2:"dict(a=1)"
Out: {'args1': 0.0, arg2: dict(a=1)}
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
self._nargs = nargs
super().__init__(option_strings, dest, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
arg_dict = {}
for arguments in values:
key = arguments.split(":")[0]
value = ":".join(arguments.split(":")[1:])
# Evaluate the string as python code
arg_dict[key] = eval(value)
setattr(namespace, self.dest, arg_dict)
def get_model_path(
exp_id: int,
folder: str,
algo: str,
env_name: EnvironmentName,
load_best: bool = False,
load_checkpoint: Optional[str] = None,
load_last_checkpoint: bool = False,
) -> Tuple[str, str, str]:
if exp_id == 0:
exp_id = get_latest_run_id(os.path.join(folder, algo), env_name)
print(f"Loading latest experiment, id={exp_id}")
# Sanity checks
if exp_id > 0:
log_path = os.path.join(folder, algo, f"{env_name}_{exp_id}")
else:
log_path = os.path.join(folder, algo)
assert os.path.isdir(log_path), f"The {log_path} folder was not found"
model_name = ModelName(algo, env_name)
if load_best:
model_path = os.path.join(log_path, "best_model.zip")
name_prefix = f"best-model-{model_name}"
elif load_checkpoint is not None:
model_path = os.path.join(log_path, f"rl_model_{load_checkpoint}_steps.zip")
name_prefix = f"checkpoint-{load_checkpoint}-{model_name}"
elif load_last_checkpoint:
checkpoints = glob.glob(os.path.join(log_path, "rl_model_*_steps.zip"))
if len(checkpoints) == 0:
raise ValueError(f"No checkpoint found for {algo} on {env_name}, path: {log_path}")
def step_count(checkpoint_path: str) -> int:
# path follow the pattern "rl_model_*_steps.zip", we count from the back to ignore any other _ in the path
return int(checkpoint_path.split("_")[-2])
checkpoints = sorted(checkpoints, key=step_count)
model_path = checkpoints[-1]
name_prefix = f"checkpoint-{step_count(model_path)}-{model_name}"
else:
# Default: load latest model
model_path = os.path.join(log_path, f"{env_name}.zip")
name_prefix = f"final-model-{model_name}"
found = os.path.isfile(model_path)
if not found:
raise ValueError(f"No model found for {algo} on {env_name}, path: {model_path}")
return name_prefix, model_path, log_path | /rl_zoo3-2.1.0.tar.gz/rl_zoo3-2.1.0/rl_zoo3/utils.py | 0.789842 | 0.295697 | utils.py | pypi |
import argparse
import os
import shutil
import zipfile
from pathlib import Path
from typing import Optional
from huggingface_sb3 import EnvironmentName, ModelName, ModelRepoId, load_from_hub
from requests.exceptions import HTTPError
from rl_zoo3 import ALGOS, get_latest_run_id
def download_from_hub(
algo: str,
env_name: EnvironmentName,
exp_id: int,
folder: str,
organization: str,
repo_name: Optional[str] = None,
force: bool = False,
) -> None:
"""
Try to load a model from the Huggingface hub
and save it following the RL Zoo structure.
Default repo name is {organization}/{algo}-{env_id}
where repo_name = {algo}-{env_id}
:param algo: Algorithm
:param env_name: Environment name
:param exp_id: Experiment id
:param folder: Log folder
:param organization: Huggingface organization
:param repo_name: Overwrite default repository name
:param force: Allow overwritting the folder
if it already exists.
"""
model_name = ModelName(algo, env_name)
if repo_name is None:
repo_name = model_name # Note: model name is {algo}-{env_name}
# Note: repo id is {organization}/{repo_name}
repo_id = ModelRepoId(organization, repo_name)
print(f"Downloading from https://huggingface.co/{repo_id}")
checkpoint = load_from_hub(repo_id, model_name.filename)
config_path = load_from_hub(repo_id, "config.yml")
# If VecNormalize, download
try:
vec_normalize_stats = load_from_hub(repo_id, "vec_normalize.pkl")
except HTTPError:
print("No normalization file")
vec_normalize_stats = None
saved_args = load_from_hub(repo_id, "args.yml")
env_kwargs = load_from_hub(repo_id, "env_kwargs.yml")
train_eval_metrics = load_from_hub(repo_id, "train_eval_metrics.zip")
if exp_id == 0:
exp_id = get_latest_run_id(os.path.join(folder, algo), env_name) + 1
# Sanity checks
if exp_id > 0:
log_path = os.path.join(folder, algo, f"{env_name}_{exp_id}")
else:
log_path = os.path.join(folder, algo)
# Check that the folder does not exist
log_folder = Path(log_path)
if log_folder.is_dir():
if force:
print(f"The folder {log_path} already exists, overwritting")
# Delete the current one to avoid errors
shutil.rmtree(log_path)
else:
raise ValueError(
f"The folder {log_path} already exists, use --force to overwrite it, "
"or choose '--exp-id 0' to create a new folder"
)
print(f"Saving to {log_path}")
# Create folder structure
os.makedirs(log_path, exist_ok=True)
config_folder = os.path.join(log_path, env_name)
os.makedirs(config_folder, exist_ok=True)
# Copy config files and saved stats
shutil.copy(checkpoint, os.path.join(log_path, f"{env_name}.zip"))
shutil.copy(saved_args, os.path.join(config_folder, "args.yml"))
shutil.copy(config_path, os.path.join(config_folder, "config.yml"))
shutil.copy(env_kwargs, os.path.join(config_folder, "env_kwargs.yml"))
if vec_normalize_stats is not None:
shutil.copy(vec_normalize_stats, os.path.join(config_folder, "vecnormalize.pkl"))
# Extract monitor file and evaluation file
with zipfile.ZipFile(train_eval_metrics, "r") as zip_ref:
zip_ref.extractall(log_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--env", help="environment ID", type=EnvironmentName, required=True)
parser.add_argument("-f", "--folder", help="Log folder", type=str, required=True)
parser.add_argument("-orga", "--organization", help="Huggingface hub organization", default="sb3")
parser.add_argument("-name", "--repo-name", help="Huggingface hub repository name, by default 'algo-env_id'", type=str)
parser.add_argument("--algo", help="RL Algorithm", type=str, required=True, choices=list(ALGOS.keys()))
parser.add_argument("--exp-id", help="Experiment ID (default: 0: latest, -1: no exp folder)", default=0, type=int)
parser.add_argument("--verbose", help="Verbose mode (0: no output, 1: INFO)", default=1, type=int)
parser.add_argument(
"--force", action="store_true", default=False, help="Allow overwritting exp folder if it already exist"
)
args = parser.parse_args()
download_from_hub(
algo=args.algo,
env_name=args.env,
exp_id=args.exp_id,
folder=args.folder,
organization=args.organization,
repo_name=args.repo_name,
force=args.force,
) | /rl_zoo3-2.1.0.tar.gz/rl_zoo3-2.1.0/rl_zoo3/load_from_hub.py | 0.64713 | 0.212559 | load_from_hub.py | pypi |
import argparse
import os
import re
import shutil
import subprocess
from copy import deepcopy
from huggingface_sb3 import EnvironmentName
from rl_zoo3.utils import ALGOS, get_latest_run_id
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--env", help="environment ID", type=EnvironmentName, default="CartPole-v1")
parser.add_argument("-f", "--folder", help="Log folder", type=str, default="rl-trained-agents")
parser.add_argument("-o", "--output-folder", help="Output folder", type=str)
parser.add_argument("--algo", help="RL Algorithm", default="ppo", type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument("-n", "--n-timesteps", help="number of timesteps", default=1000, type=int)
parser.add_argument("--n-envs", help="number of environments", default=1, type=int)
parser.add_argument("--deterministic", action="store_true", default=False, help="Use deterministic actions")
parser.add_argument("-g", "--gif", action="store_true", default=False, help="Convert final video to gif")
parser.add_argument("--seed", help="Random generator seed", type=int, default=0)
parser.add_argument("--exp-id", help="Experiment ID (default: 0: latest, -1: no exp folder)", default=0, type=int)
args = parser.parse_args()
env_name: EnvironmentName = args.env
algo = args.algo
folder = args.folder
n_timesteps = args.n_timesteps
n_envs = args.n_envs
video_folder = args.output_folder
seed = args.seed
deterministic = args.deterministic
convert_to_gif = args.gif
if args.exp_id == 0:
args.exp_id = get_latest_run_id(os.path.join(folder, algo), env_name)
print(f"Loading latest experiment, id={args.exp_id}")
# Sanity checks
if args.exp_id > 0:
log_path = os.path.join(folder, algo, f"{env_name}_{args.exp_id}")
else:
log_path = os.path.join(folder, algo)
assert os.path.isdir(log_path), f"The {log_path} folder was not found"
if video_folder is None:
video_folder = os.path.abspath(os.path.join(log_path, "videos"))
shutil.rmtree(video_folder, ignore_errors=True)
os.makedirs(video_folder, exist_ok=True)
# record a video of every model
models_dir_entries = [dir_ent.name for dir_ent in os.scandir(log_path) if dir_ent.is_file()]
checkpoints_names = list(filter(lambda x: x.startswith("rl_model_"), models_dir_entries))
checkpoints = list(map(lambda x: int(re.findall(r"\d+", x)[0]), checkpoints_names))
checkpoints.sort()
args_final_model = [
"--env",
env_name.gym_id,
"--algo",
algo,
"--exp-id",
str(args.exp_id),
"-f",
folder,
"-o",
video_folder,
"--n-timesteps",
str(n_timesteps),
"--n-envs",
str(n_envs),
"--seed",
str(seed),
# Disable rendering to generate videos faster
"--no-render",
]
if deterministic is not None:
args_final_model.append("--deterministic")
if os.path.exists(os.path.join(log_path, f"{env_name}.zip")):
return_code = subprocess.call(["python", "-m", "rl_zoo3.record_video", *args_final_model])
assert return_code == 0, "Failed to record the final model"
if os.path.exists(os.path.join(log_path, "best_model.zip")):
args_best_model = [*args_final_model, "--load-best"]
return_code = subprocess.call(["python", "-m", "rl_zoo3.record_video", *args_best_model])
assert return_code == 0, "Failed to record the best model"
args_checkpoint = [*args_final_model, "--load-checkpoint"]
args_checkpoint.append("0")
for checkpoint in checkpoints:
args_checkpoint[-1] = str(checkpoint)
return_code = subprocess.call(["python", "-m", "rl_zoo3.record_video", *args_checkpoint])
assert return_code == 0, f"Failed to record the {checkpoint} checkpoint model"
# add text to each video
episode_videos_names = [dir_ent.name for dir_ent in os.scandir(video_folder) if dir_ent.name.endswith(".mp4")]
checkpoints_videos_names = list(filter(lambda x: x.startswith("checkpoint"), episode_videos_names))
# sort checkpoints by the number of steps
def get_number_from_checkpoint_filename(filename: str) -> int:
match = re.search("checkpoint-(.*?)-", filename)
number = "0"
if match is not None:
number = match.group(1)
return int(number)
if checkpoints_videos_names is not None:
checkpoints_videos_names.sort(key=get_number_from_checkpoint_filename)
final_model_video_name = list(filter(lambda x: x.startswith("final-model"), episode_videos_names))
best_model_video_name = list(filter(lambda x: x.startswith("best-model"), episode_videos_names))
episode_videos_names = checkpoints_videos_names + final_model_video_name + best_model_video_name
episode_videos_path = [os.path.join(video_folder, video) for video in episode_videos_names]
# the text displayed will be the first two words of the file
def get_text_from_video_filename(filename: str) -> str:
match = re.search(r"^(\w+)-(\w+)", filename)
text = ""
if match is not None:
text = f"{match.group(1)} {match.group(2)}"
return text
episode_videos_names = list(map(get_text_from_video_filename, episode_videos_names))
# In some cases, ffmpeg needs a tmp file
# https://stackoverflow.com/questions/28877049/issue-with-overwriting-file-while-using-ffmpeg-for-converting
tmp_videos_path = deepcopy(episode_videos_path)
tmp_videos_path = [path_[:-4] + "_with_text" + ".mp4" for path_ in tmp_videos_path]
for i in range(len(episode_videos_path)):
ffmpeg_command_to_add_text = (
f'ffmpeg -i {episode_videos_path[i]} -vf drawtext="'
f"text='{episode_videos_names[i]}': fontcolor=white: fontsize=24: box=1: boxcolor=black@0.5:"
f'boxborderw=5: x=(w-text_w)/2: y=12" -codec:a copy {tmp_videos_path[i]} -y -hide_banner -loglevel error'
)
os.system(ffmpeg_command_to_add_text)
# join videos together and convert to gif if needed
ffmpeg_text_file = os.path.join(video_folder, "tmp.txt")
with open(ffmpeg_text_file, "a") as file:
for video_path in tmp_videos_path:
file.write(f"file {video_path}\n")
final_video_path = os.path.abspath(os.path.join(video_folder, "training.mp4"))
os.system(f"ffmpeg -f concat -safe 0 -i {ffmpeg_text_file} -c copy {final_video_path} -hide_banner -loglevel error")
os.remove(ffmpeg_text_file)
print(f"Saving video to {final_video_path}")
if convert_to_gif:
final_gif_path = os.path.abspath(os.path.join(video_folder, "training.gif"))
os.system(f"ffmpeg -i {final_video_path} -vf fps=10 {final_gif_path} -hide_banner -loglevel error")
print(f"Saving gif to {final_gif_path}")
# Remove tmp video files
for video_path in tmp_videos_path:
os.remove(video_path) | /rl_zoo3-2.1.0.tar.gz/rl_zoo3-2.1.0/rl_zoo3/record_training.py | 0.46563 | 0.18168 | record_training.py | pypi |
import argparse
import os
import numpy as np
import seaborn
from matplotlib import pyplot as plt
from stable_baselines3.common.monitor import LoadMonitorResultsError, load_results
from stable_baselines3.common.results_plotter import X_EPISODES, X_TIMESTEPS, X_WALLTIME, ts2xy, window_func
# Activate seaborn
seaborn.set()
def plot_train():
parser = argparse.ArgumentParser("Gather results, plot training reward/success")
parser.add_argument("-a", "--algo", help="Algorithm to include", type=str, required=True)
parser.add_argument("-e", "--env", help="Environment(s) to include", nargs="+", type=str, required=True)
parser.add_argument("-f", "--exp-folder", help="Folders to include", type=str, required=True)
parser.add_argument("--figsize", help="Figure size, width, height in inches.", nargs=2, type=int, default=[6.4, 4.8])
parser.add_argument("--fontsize", help="Font size", type=int, default=14)
parser.add_argument("-max", "--max-timesteps", help="Max number of timesteps to display", type=int)
parser.add_argument("-x", "--x-axis", help="X-axis", choices=["steps", "episodes", "time"], type=str, default="steps")
parser.add_argument("-y", "--y-axis", help="Y-axis", choices=["success", "reward", "length"], type=str, default="reward")
parser.add_argument("-w", "--episode-window", help="Rolling window size", type=int, default=100)
args = parser.parse_args()
algo = args.algo
envs = args.env
log_path = os.path.join(args.exp_folder, algo)
x_axis = {
"steps": X_TIMESTEPS,
"episodes": X_EPISODES,
"time": X_WALLTIME,
}[args.x_axis]
x_label = {
"steps": "Timesteps",
"episodes": "Episodes",
"time": "Walltime (in hours)",
}[args.x_axis]
y_axis = {
"success": "is_success",
"reward": "r",
"length": "l",
}[args.y_axis]
y_label = {
"success": "Training Success Rate",
"reward": "Training Episodic Reward",
"length": "Training Episode Length",
}[args.y_axis]
dirs = []
for env in envs:
# Sort by last modification
entries = sorted(os.scandir(log_path), key=lambda entry: entry.stat().st_mtime)
dirs.extend(entry.path for entry in entries if env in entry.name and entry.is_dir())
plt.figure(y_label, figsize=args.figsize)
plt.title(y_label, fontsize=args.fontsize)
plt.xlabel(f"{x_label}", fontsize=args.fontsize)
plt.ylabel(y_label, fontsize=args.fontsize)
for folder in dirs:
try:
data_frame = load_results(folder)
except LoadMonitorResultsError:
continue
if args.max_timesteps is not None:
data_frame = data_frame[data_frame.l.cumsum() <= args.max_timesteps]
try:
y = np.array(data_frame[y_axis])
except KeyError:
print(f"No data available for {folder}")
continue
x, _ = ts2xy(data_frame, x_axis)
# Do not plot the smoothed curve at all if the timeseries is shorter than window size.
if x.shape[0] >= args.episode_window:
# Compute and plot rolling mean with window of size args.episode_window
x, y_mean = window_func(x, y, args.episode_window, np.mean)
plt.plot(x, y_mean, linewidth=2, label=folder.split("/")[-1])
plt.legend()
plt.tight_layout()
plt.show()
if __name__ == "__main__":
plot_train() | /rl_zoo3-2.1.0.tar.gz/rl_zoo3-2.1.0/rl_zoo3/plots/plot_train.py | 0.61451 | 0.363901 | plot_train.py | pypi |
<div align="center">
<!-- <img src="https://github.com/kaist-silab/rl4co/assets/34462374/249462ea-b15d-4358-8a11-6508903dae58" style="width:40%"> -->
<img src="https://github.com/kaist-silab/rl4co/assets/48984123/01a547b2-9722-4540-b0e1-9c12af094b15" style="width:40%">
</br></br>
<a href="https://pytorch.org/get-started/locally/"><img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-ee4c2c?logo=pytorch&logoColor=white"></a>
<a href="https://pytorchlightning.ai/"><img alt="Lightning" src="https://img.shields.io/badge/-Lightning-792ee5?logo=pytorchlightning&logoColor=white"></a>
<a href="https://github.com/pytorch/rl"><img alt="base: TorchRL" src="https://img.shields.io/badge/base-TorchRL-red">
<a href="https://hydra.cc/"><img alt="config: Hydra" src="https://img.shields.io/badge/config-Hydra-89b8cd"></a> [](https://github.com/psf/black) [](https://join.slack.com/t/rl4co/shared_invite/zt-1ytz2c1v4-0IkQ8NQH4TRXIX8PrRmDhQ)
 <a href="https://colab.research.google.com/github/kaist-silab/rl4co/blob/main/notebooks/1-quickstart.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> [](https://pypi.org/project/rl4co)
[](https://github.com/kaist-silab/rl4co/actions/workflows/tests.yml)
<!--  -->
[**Documentation**](https://rl4co.readthedocs.io/) | [**Getting Started**](#getting-started) | [**Usage**](#usage) | [**Contributing**](#contributing) | [**Paper**](https://arxiv.org/abs/2306.17100) | [**Citation**](#cite-us)
</div>
---
An extensive Reinforcement Learning (RL) for Combinatorial Optimization (CO) benchmark. Our goal is to provide a unified framework for RL-based CO algorithms, and to facilitate reproducible research in this field, decoupling the science from the engineering.
RL4CO is built upon:
- [TorchRL](https://github.com/pytorch/rl): official PyTorch framework for RL algorithms and vectorized environments on GPUs
- [TensorDict](https://github.com/pytorch-labs/tensordict): a library to easily handle heterogeneous data such as states, actions and rewards
- [PyTorch Lightning](https://github.com/Lightning-AI/lightning): a lightweight PyTorch wrapper for high-performance AI research
- [Hydra](https://github.com/facebookresearch/hydra): a framework for elegantly configuring complex applications

## Getting started
<a href="https://colab.research.google.com/github/kaist-silab/rl4co/blob/main/notebooks/1-quickstart.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
RL4CO is now available for installation on `pip`!
```bash
pip install rl4co
```
### Local install and development
If you want to develop RL4CO or access the latest builds, we recommend you to install it locally with `pip` in editable mode:
```bash
git clone https://github.com/kaist-silab/rl4co && cd rl4co
pip install -e .
```
<details>
<summary>[Optional] Automatically install PyTorch with correct CUDA version</summary>
These commands will [automatically install](https://github.com/pmeier/light-the-torch) PyTorch with the right GPU version for your system:
```bash
pip install light-the-torch
python3 -m light_the_torch install -r --upgrade torch
```
> Note: `conda` is also a good candidate for hassle-free installation of PyTorch: check out the [PyTorch website](https://pytorch.org/get-started/locally/) for more details.
</details>
To get started, we recommend checking out our [quickstart notebook](notebooks/1-quickstart.ipynb) or the [minimalistic example](#minimalistic-example) below.
## Usage
Train model with default configuration (AM on TSP environment):
```bash
python run.py
```
<details>
<summary>Change experiment</summary>
Train model with chosen experiment configuration from [configs/experiment/](configs/experiment/) (e.g. tsp/am, and environment with 42 cities)
```bash
python run.py experiment=routing/am env.num_loc=42
```
</details>
<details>
<summary>Disable logging</summary>
```bash
python run.py experiment=routing/am logger=none '~callbacks.learning_rate_monitor'
```
Note that `~` is used to disable a callback that would need a logger.
</details>
<details>
<summary>Create a sweep over hyperparameters (-m for multirun)</summary>
```bash
python run.py -m experiment=routing/am train.optimizer.lr=1e-3,1e-4,1e-5
```
</details>
### Minimalistic Example
Here is a minimalistic example training the Attention Model with greedy rollout baseline on TSP in less than 30 lines of code:
```python
from rl4co.envs import TSPEnv
from rl4co.models import AttentionModel
from rl4co.utils import RL4COTrainer
# Environment, Model, and Lightning Module
env = TSPEnv(num_loc=20)
model = AttentionModel(env,
baseline="rollout",
train_data_size=100_000,
test_data_size=10_000,
optimizer_kwargs={'lr': 1e-4}
)
# Trainer
trainer = RL4COTrainer(max_epochs=3)
# Fit the model
trainer.fit(model)
# Test the model
trainer.test(model)
```
### Testing
Run tests with `pytest` from the root directory:
```bash
pytest tests
```
## Contributing
[](https://join.slack.com/t/rl4co/shared_invite/zt-1ytz2c1v4-0IkQ8NQH4TRXIX8PrRmDhQ)
Have a suggestion, request, or found a bug? Feel free to [open an issue](https://github.com/kaist-silab/rl4co/issues) or [submit a pull request](https://github.com/kaist-silab/rl4co/pulls).
If you would like to contribute, please check out our contribution guidelines [here](.github/CONTRIBUTING.md). We welcome and look forward to all contributions to RL4CO!
We are also on [Slack](https://join.slack.com/t/rl4co/shared_invite/zt-1ytz2c1v4-0IkQ8NQH4TRXIX8PrRmDhQ) if you have any questions or would like to discuss RL4CO with us. We are open to collaborations and would love to hear from you 🚀
### Contributors
<a href="https://github.com/kaist-silab/rl4co/graphs/contributors">
<img src="https://contrib.rocks/image?repo=kaist-silab/rl4co" />
</a>
## Cite us
If you find RL4CO valuable for your research or applied projects:
```bibtex
@article{berto2023rl4co,
title = {{RL4CO}: an Extensive Reinforcement Learning for Combinatorial Optimization Benchmark},
author={Federico Berto and Chuanbo Hua and Junyoung Park and Minsu Kim and Hyeonah Kim and Jiwoo Son and Haeyeon Kim and Joungho Kim and Jinkyoo Park},
journal={arXiv preprint arXiv:2306.17100},
year={2023},
url = {https://github.com/kaist-silab/rl4co}
}
```
| /rl4co-0.2.0.tar.gz/rl4co-0.2.0/README.md | 0.639511 | 0.955569 | README.md | pypi |
import pandas as pd
from ..utils import getList, getFactor
__all__=[
"list",
"query",
"basic_derivation",
"valuation_estimation",
"reversal",
"sentiment",
"power_volume",
"price_volume",
"momentum",
"volatility_value",
"earning_expectation",
"solvency",
"operation_capacity",
"capital_structure",
"per_share_indicators",
"revenue_quality",
"cash_flow",
"historical_growth",
"earning",
]
GROUP="factor"
def list():
""" 获取列表
获取因子列表
Args:
无
Returns:
(status,ret)
"""
return getList(GROUP)
def query(key, *, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 通用查询接口
通用查询接口
Args:
key: 因子组key
fields: 衍生数据字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, key, factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def basic_derivation(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 基础衍生
获取基础衍生
Args:
fields: 衍生数据字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "basic_derivation", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def valuation_estimation(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 估值因子
估值因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "valuation_estimation", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def reversal(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 反转指标
反转指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "reversal", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def sentiment(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 情绪指标
情绪指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "sentiment", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def power_volume(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 量能指标
量能指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "power_volume", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def price_volume(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 量价指标
量价指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "price_volume", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def momentum(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 动量指标
动量指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "momentum", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def volatility_value(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 收益风险
收益风险
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "volatility_value", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def earning_expectation(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 盈利预测
盈利预测
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "earning_expectation", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def solvency(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 偿债能力
偿债能力
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "solvency", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def operation_capacity(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 营运能力
营运能力
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "operation_capacity", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def capital_structure(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 资本结构
资本结构
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "capital_structure", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def per_share_indicators(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 每股指标
每股指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "per_share_indicators", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def revenue_quality(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 收益质量
收益质量
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "revenue_quality", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def cash_flow(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 现金流量
现金流量
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "cash_flow", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def historical_growth(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 历史成长
历史成长
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "historical_growth", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def earning(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 盈利能力
盈利能力
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "earning", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period) | /factor/factor.py | 0.655557 | 0.380126 | factor.py | pypi |
import pandas as pd
from ..utils import getList, getFactor
__all__=[
"list",
"query",
"basic_derivation",
"valuation_estimation",
"reversal",
"sentiment",
"power_volume",
"price_volume",
"momentum",
"volatility_value",
"earning_expectation",
"solvency",
"operation_capacity",
"capital_structure",
"per_share_indicators",
"revenue_quality",
"cash_flow",
"historical_growth",
"earning",
]
GROUP="factor/standard"
def list():
""" 获取列表
获取因子列表
Args:
无
Returns:
(status,ret)
"""
return getList(GROUP)
def query(key, *, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 通用查询接口
通用查询接口
Args:
key: 因子组key
fields: 衍生数据字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, key, factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def basic_derivation(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 基础衍生
获取基础衍生
Args:
fields: 衍生数据字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "basic_derivation", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def valuation_estimation(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 估值因子
估值因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "valuation_estimation", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def reversal(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 反转指标
反转指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "reversal", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def sentiment(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 情绪指标
情绪指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "sentiment", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def power_volume(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 量能指标
量能指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "power_volume", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def price_volume(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 量价指标
量价指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "price_volume", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def momentum(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 动量指标
动量指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "momentum", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def volatility_value(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 收益风险
收益风险
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "volatility_value", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def earning_expectation(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 盈利预测
盈利预测
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "earning_expectation", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def solvency(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 偿债能力
偿债能力
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "solvency", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def operation_capacity(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 营运能力
营运能力
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "operation_capacity", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def capital_structure(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 资本结构
资本结构
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "capital_structure", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def per_share_indicators(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 每股指标
每股指标
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "per_share_indicators", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def revenue_quality(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 收益质量
收益质量
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "revenue_quality", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def cash_flow(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 现金流量
现金流量
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "cash_flow", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def historical_growth(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 历史成长
历史成长
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "historical_growth", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def earning(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 盈利能力
盈利能力
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "earning", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period) | /factor/std.py | 0.671794 | 0.379005 | std.py | pypi |
import pandas as pd
from ...utils import getList, getFactor
__all__=[
"list",
"query",
"rl_characteristic",
"dx_securities",
"tf_securities",
"inhouse",
]
GROUP="factor/vip"
def list():
""" 获取列表
获取因子列表
Args:
无
Returns:
(status,ret)
"""
return getList(GROUP)
def query(key, *, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 通用查询接口
通用查询接口
Args:
key: 因子组key
fields: 衍生数据字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, key, factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def rl_characteristic(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 融量特色因子
融量特色因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "rl_characteristic", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def dx_securities(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 东兴因子
东兴因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "dx_securities", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def tf_securities(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 天风因子
天风因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "tf_securities", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def inhouse(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 收费因子
收费因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "inhouse", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period) | /factor/vip/vip.py | 0.63307 | 0.250357 | vip.py | pypi |
import pandas as pd
from ...utils import getList,getFactor
__all__=[
"list",
"query",
"rl_characteristic",
"dx_securities",
"tf_securities",
"inhouse",
]
GROUP="factor/vip/standard"
def list():
""" 获取列表
获取因子列表
Args:
无
Returns:
(status,ret)
"""
return getList(GROUP)
def query(key, *, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 通用查询接口
通用查询接口
Args:
key: 因子组key
fields: 衍生数据字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, key, factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def rl_characteristic(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 融量特色因子
融量特色因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "rl_characteristic", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def dx_securities(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 东兴因子
东兴因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "dx_securities", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def tf_securities(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 天风因子
天风因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "tf_securities", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period)
def inhouse(*, fields=None,isymbol=None,stocks=None,startdate=None,enddate=None,period=None):
""" 收费因子
收费因子
Args:
fields: 因子数据返回字段,None表示返回所有字段
isymbol: 指数
stocks: 个股列表
startdate: 起始日期
enddate: 终止日期
period: 周期
Returns:
(status,ret)
"""
return getFactor( GROUP, "inhouse", factors=fields, isymbol=isymbol,stocks=stocks,startdate=startdate,enddate=enddate,period=period) | /factor/vip/vip_std.py | 0.634543 | 0.247948 | vip_std.py | pypi |
# 1. Make OpenAI Gym like environment
- This example uses DDPG(Deep Deterministic Policy Gradient) with pybullet_env
- pybullet_env prerequisites: Open AI Gym, pybullet.
pip install gym
pip install pybullet
```
import gym
import pybullet_envs
import time
env = gym.make("InvertedPendulumBulletEnv-v0")
env.render(mode="human")
print('action space:',env.action_space)
print('action space high,low :',env.action_space.high,env.action_space.low)
print('state space:',env.observation_space)
print('state space high,low :',env.observation_space.high,env.observation_space.low)
```
# 2. Import RL Algorithm
Base agent needs core agent and an environment to interact.
```
from rlagent.agents import ExperienceReplayAgent
from rlagent.algorithms import DDPG
state_shape = env.observation_space.shape
action_shape = env.action_space.shape
ddpg = DDPG(state_shape, action_shape, tau=0.01, actor_lr=0.0001, critic_lr=0.001,
action_noise=True, add_memory=True)
tf_agent = ExperienceReplayAgent(agent=ddpg, env=env, save_steps=10000, model_dir='model')
tf_agent.agent.summary()
```
# 3. Train
```
tf_agent.train(max_training_steps=20000)
```
# 4. Check Trained Model
```
import gym
import pybullet_envs
env = gym.make("InvertedPendulumBulletEnv-v0")
env.render(mode="human")
from rlagent.agents import ExperienceReplayAgent
from rlagent.algorithms import DDPG
state_shape = env.observation_space.shape
action_shape = env.action_space.shape
ddpg = DDPG(state_shape, action_shape, tau=0.01, actor_lr=0.0001, critic_lr=0.001,
action_noise=False, add_memory=False)
tf_agent = ExperienceReplayAgent(agent=ddpg, env=env, save_steps=10000, model_dir='model')
tf_agent.load_model(model_path='model/model-19999')
tf_agent.act()
```
| /rlagent-0.1.4.tar.gz/rlagent-0.1.4/tutorial/rlagent_tutorial_1_getting_started.ipynb | 0.406862 | 0.851953 | rlagent_tutorial_1_getting_started.ipynb | pypi |
import gym
import pybullet_envs
import tensorflow as tf
import numpy as np
import argparse
from rlagent.models import ActorCriticFF
from rlagent.agents import NStepMPIAgentFF
from rlagent.memories import NStepMemory
from rlagent.algorithms import A2C
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--env', required=True, help='Environment name for gym.make')
parser.add_argument('-t', '--training_steps', type=int, default=20000, help='Training steps per process')
parser.add_argument('-s', '--save_steps', type=int, default=10000, help='Per steps to save model')
parser.add_argument('-v', '--verbosity', default='INFO', help='Verbosity: choose from {"DEBUG", "ERROR", "FATAL", "INFO", "WARN"}')
parser.add_argument('--lr', type=float, default=0.0001, help='Learning rate')
parser.add_argument('--beta', type=float, default=0.0001, help='Beta for action entropy')
parser.add_argument('--t_max', type=int, default=5, help='Env steps per train')
parser.add_argument('--render', type=bool, default=False, help='if render env: True, otherwise: False')
def main():
args = parser.parse_args()
verbosity = {"DEBUG":tf.logging.DEBUG,
"ERROR":tf.logging.ERROR,
"FATAL":tf.logging.FATAL,
"INFO":tf.logging.INFO,
"WARN":tf.logging.WARN}
tf.logging.set_verbosity(verbosity[args.verbosity])
env = gym.make(args.env)
state_shape = env.observation_space.shape
if type(env.action_space) == gym.spaces.Discrete:
action_shape = (env.action_space.n,)
discrete_action = True
elif type(env.action_space) == gym.spaces.Box:
action_shape = env.action_space.shape
discrete_action = False
else:
raise NotImplementedError('Needs a custom function for action shape...')
agent = A2C(state_shape, action_shape,
discrete_action=discrete_action,
lr=args.lr, beta=args.beta, t_max=args.t_max,
actor_critic_model=ActorCriticFF,
memory_model=NStepMemory)
tf_agent = NStepMPIAgentFF(agent=agent, env=env, save_steps=args.save_steps)
tf_agent.train(training_steps=args.training_steps, env_render=args.render)
if __name__ == "__main__":
main() | /rlagent-0.1.4.tar.gz/rlagent-0.1.4/train/train_a2c_mpi.py | 0.563138 | 0.154121 | train_a2c_mpi.py | pypi |
import curses
from libcurses.border import Border
from libcurses.bw import BorderedWindow
class WindowStack:
"""Vertical stack of windows."""
def __init__(self, neighbor_left, padding_y):
"""Create a vertical stack of windows with 'border-collapse: collapse'.
A visual stack, not a push-pop thing... think smokestack or stovepipe.
"""
self.neighbor_left = neighbor_left
self.padding_y = padding_y
self.begin_x = self.neighbor_left.begin_x + self.neighbor_left.ncols - 1
self.windows = []
def redraw(self):
"""Redraw stack."""
for w in self.windows:
w.redraw()
def refresh(self):
"""Refresh stack."""
for w in self.windows:
w.refresh()
def get_border(self, loc):
"""Return the appropriate Border for a window based on its location in the stack."""
first = loc == 0
final = loc == len(self.windows) - 1
# pylint: disable=no-else-return
if first:
if not final:
# first with more to come
return Border(tl=curses.ACS_TTEE, bl=curses.ACS_LTEE, br=curses.ACS_RTEE)
else:
# first and final
return Border(tl=curses.ACS_TTEE, bl=curses.ACS_BTEE)
else:
if not final:
# additional with more to come
return Border(
tl=curses.ACS_LTEE,
tr=curses.ACS_RTEE,
bl=curses.ACS_LTEE,
br=curses.ACS_RTEE,
)
else:
# additional and final
return Border(tl=curses.ACS_LTEE, tr=curses.ACS_RTEE, bl=curses.ACS_BTEE)
def append(self, nlines, ncols):
"""Create window at bottom of stack."""
first = len(self.windows) == 0
final = not nlines
if first:
# join first window to neighbor on left, aligning tops, overlapping
# my left and his right sides.
begin_y = self.neighbor_left.begin_y
else:
# join additional windows to neighbor above, overlapping top and bottom sides.
begin_y = self.windows[-1].begin_y + self.windows[-1].nlines - 1
if final:
if first:
# first and final, full height
nlines = self.neighbor_left.nlines
else:
# additional and final; variable height
nlines = self.neighbor_left.nlines - (
(self.windows[-1].begin_y + self.windows[-1].nlines - 1) - self.padding_y
)
bw = BorderedWindow(nlines, ncols, begin_y, self.begin_x)
self.windows.append(bw)
bw.border(self.get_border(len(self.windows) - 1))
return bw
def insert(self, nlines, ncols, loc=0):
"""Insert new BorderedWindow at loc."""
if len(self.windows) == 0:
self.append(nlines, ncols)
return
# loc 0 or -3 window one
# loc 1 or -2 window two
# loc 2 or -1 window three
min_ = -len(self.windows)
max_ = len(self.windows) - 1
if loc < min_ or loc > max_:
raise ValueError(f"loc {loc} is not {min_}..{max_}")
if loc < 0:
loc = len(self.windows) + loc
# Create new BorderedWindow at this location
loc_y = self.windows[loc].begin_y
loc_x = self.windows[loc].begin_x
# shrink last window by height of new window
bw = self.windows[-1]
new_nlines = bw.nlines - nlines
if new_nlines < 3:
# pylint: disable=consider-using-f-string
raise ValueError(
"Can't shrink: current nlines {} minus {} ({}) is < 3".format(
bw.nlines, nlines, new_nlines
)
)
bw.resize(new_nlines + 1, bw.ncols)
# slide windows down
for bw in reversed(self.windows[loc:]):
bw.mvwin(bw.begin_y + nlines - 1, bw.begin_x)
# insert new window
bw = BorderedWindow(nlines, ncols, loc_y, loc_x)
self.windows.insert(loc, bw)
# adjust borders of all windows
for idx, _bw in enumerate(self.windows):
_bw.border(self.get_border(idx)) | /rlane_libcurses-1.0.5-py3-none-any.whl/libcurses/stack.py | 0.803174 | 0.28708 | stack.py | pypi |
import curses
from collections import defaultdict, namedtuple
from loguru import logger
from libcurses.mouseevent import MouseEvent
class Mouse:
"""Mouse handling."""
@staticmethod
def enable():
"""Enable `curses.getkey` to return mouse events.
Call after `curses.initscr`. If trouble, try `TERM=xterm-1002`.
"""
newmask = curses.ALL_MOUSE_EVENTS | curses.REPORT_MOUSE_POSITION
(availmask, oldmask) = curses.mousemask(newmask)
logger.trace(
f"(availmask={availmask:#x}, oldmask={oldmask:#x}) = mousemask({newmask:#x})"
)
# -------------------------------------------------------------------------------
# Internal mouse handlers
handler = namedtuple("handler", "func args")
handlers = []
@classmethod
def add_internal_mouse_handler(cls, func, args=None):
"""Register `func` to be called with `args` when mouse event happens."""
handler = cls.handler(func, args)
cls.handlers.append(handler)
# -------------------------------------------------------------------------------
# Application may register a handler to respond to mouse activity at coordinates
yxhandler = namedtuple("yxhandler", "func begin_x last_x args")
yxhandlers_by_row = {}
@classmethod
def add_mouse_handler(cls, func, y, x, ncols, args=None):
"""Call `func` with `args` when mouse event happens at (y, x)."""
# pylint: disable=too-many-arguments
cls.yxhandlers_by_row[y].append(cls.yxhandler(func, x, x + ncols - 1, args))
@classmethod
def clear_mouse_handlers(cls):
"""Remove all mouse handlers."""
cls.yxhandlers_by_row = defaultdict(list)
@classmethod
def handle_mouse_event(cls):
"""Respond to `curses.getch` returning `curses.KEY_MOUSE`.
Return True if any handler handled the event, else False.
"""
mouse = MouseEvent()
logger.trace(f"{mouse!r}")
# Mouse handlers return True when they handle the event, and False when they don't.
# Try internal mouse handlers first.
if any(handler.func(mouse, handler.args) for handler in cls.handlers):
return True
# Try application mouse handler registered at mouse location.
if any(
handler.begin_x <= mouse.x <= handler.last_x and handler.func(mouse, handler.args)
for handler in cls.yxhandlers_by_row.get(mouse.y, [])
):
return True
# All handlers, if any, ignored the mouse event.
return False
add_mouse_handler = Mouse.add_mouse_handler
clear_mouse_handlers = Mouse.clear_mouse_handlers | /rlane_libcurses-1.0.5-py3-none-any.whl/libcurses/mouse.py | 0.733547 | 0.17006 | mouse.py | pypi |
import curses
import re
from loguru import logger
_COLORMAP = None # key=loguru-level-name, value=curses-color/attr
def get_colormap() -> dict[str, int]:
"""Return map of `loguru-level-name` to `curses-color/attr`.
Call after creating all custom levels with `logger.level()`.
Map is build once and cached; repeated calls return same map.
"""
global _COLORMAP # noqa
if not _COLORMAP:
_COLORMAP = {}
colors = {
"black": curses.COLOR_BLACK,
"blue": curses.COLOR_BLUE,
"cyan": curses.COLOR_CYAN,
"green": curses.COLOR_GREEN,
"magenta": curses.COLOR_MAGENTA,
"red": curses.COLOR_RED,
"white": curses.COLOR_WHITE,
"yellow": curses.COLOR_YELLOW,
}
attrs = {
"bold": curses.A_BOLD,
"dim": curses.A_DIM,
"normal": curses.A_NORMAL,
"hide": curses.A_INVIS,
"italic": curses.A_ITALIC,
"blink": curses.A_BLINK,
"strike": curses.A_HORIZONTAL,
"underline": curses.A_UNDERLINE,
"reverse": curses.A_REVERSE,
}
# Parse strings like:
# "red bold"
# "green, reverse"
# "<blue><italic><WHITE>"
# Apply lowercase colors to fg, and uppercase to bg.
re_words = re.compile(r"[\w]+")
for idx, lvl in enumerate(logger._core.levels.values()): # noqa protected-access
fg = curses.COLOR_WHITE
bg = curses.COLOR_BLACK
attr = 0
for word in re_words.findall(lvl.color):
if word.islower() and (_ := colors.get(word)):
fg = _
elif word.isupper() and (_ := colors.get(word.lower())):
bg = _
elif _ := attrs.get(word):
attr |= _
curses.init_pair(idx + 1, fg, bg)
_COLORMAP[lvl.name] = curses.color_pair(idx + 1) | attr
logger.trace(
f"name={lvl.name} color={lvl.color} idx={idx+1} fg={fg} bg={bg} "
f"color={_COLORMAP[lvl.name]} attr={attr:o}"
)
return _COLORMAP | /rlane_libcurses-1.0.5-py3-none-any.whl/libcurses/colormap.py | 0.572006 | 0.177152 | colormap.py | pypi |
import curses
from libcurses.border import Border
class BorderedWindow:
"""Bordered Window."""
def __init__(self, nlines, ncols, begin_y, begin_x, _border=None):
"""Create new bordered window with the given dimensions and optional border stylings.
A bordered window is composed of two windows:
1. an outer (border) window, which draws a box around
2. an inner (working) window.
border=None means use default border stylings, not no border.
"""
self.nlines = nlines
self.ncols = ncols
self.begin_y = begin_y
self.begin_x = begin_x
self.b = curses.newwin(nlines, ncols, begin_y, begin_x)
self.w = curses.newwin(
self.nlines - 2, self.ncols - 2, self.begin_y + 1, self.begin_x + 1
)
self.w.scrollok(True)
self.w.idlok(True)
self.w.leaveok(False)
self.w.keypad(True)
self.w.refresh()
self.border(_border or Border())
def __repr__(self):
# getmaxyx does not return the maximum values for y and x, as the name indicates.
# https://docs.python.org/3/library/curses.html#curses.window.getmaxyx
# Return a tuple (y, x) of the height and width of the window.
return (
self.__class__.__name__
+ "("
+ ", ".join(
[
f"nlines={self.nlines}",
f"ncols={self.ncols}",
f"begin_y={self.begin_y}",
f"begin_x={self.begin_x}",
f"b=(getbegyx={self.b.getbegyx()}, getmaxyx={self.b.getmaxyx()})",
f"w=(getbegyx={self.w.getbegyx()}, getmaxyx={self.w.getmaxyx()})",
]
)
+ ")"
)
def redraw(self):
"""Redraw window."""
self.b.redrawwin()
self.w.redrawwin()
def refresh(self):
"""Refresh window."""
self.b.refresh()
self.w.refresh()
def border(self, _border):
"""Set window border."""
self.b.border(*_border)
def resize(self, nlines, ncols):
"""Resize window."""
# constrain cursor to new dimensions
for w in (self.b, self.w):
y, x = w.getyx()
w.move(min(y, nlines - 1), min(x, ncols - 1))
# before resizing windows ;)
self.b.resize(nlines, ncols)
self.w.resize(nlines - 2, ncols - 2)
self.nlines = nlines
self.ncols = ncols
def mvwin(self, new_y, new_x):
"""Move window."""
self.b.mvwin(new_y, new_x)
self.w.mvwin(new_y + 1, new_x + 1)
self.begin_y, self.begin_x = new_y, new_x
self.refresh()
def addstr(self, *args):
"""Add strings to window."""
self.w.addstr(*args) | /rlane_libcurses-1.0.5-py3-none-any.whl/libcurses/bw.py | 0.819533 | 0.38217 | bw.py | pypi |
from copy import copy
import iso8601
import datetime
import itertools
import re
from urllib.parse import urljoin as _urljoin
from m3u8 import protocol
'''
http://tools.ietf.org/html/draft-pantos-http-live-streaming-08#section-3.2
http://stackoverflow.com/questions/2785755/how-to-split-but-ignore-separators-in-quoted-strings-in-python
'''
ATTRIBUTELISTPATTERN = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
URI_PREFIXES = ('https://', 'http://', 's3://', 's3a://', 's3n://')
def cast_date_time(value):
return iso8601.parse_date(value)
def format_date_time(value, **kwargs):
return value.isoformat(**kwargs)
class ParseError(Exception):
def __init__(self, lineno, line):
self.lineno = lineno
self.line = line
def __str__(self):
return 'Syntax error in manifest on line %d: %s' % (self.lineno, self.line)
def parse(content, strict=False, custom_tags_parser=None):
'''
Given a M3U8 playlist content returns a dictionary with all data found
'''
data = {
'media_sequence': 0,
'is_variant': False,
'is_endlist': False,
'is_i_frames_only': False,
'is_independent_segments': False,
'playlist_type': None,
'playlists': [],
'segments': [],
'iframe_playlists': [],
'media': [],
'keys': [],
'rendition_reports': [],
'skip': {},
'part_inf': {},
'session_data': [],
'session_keys': [],
'segment_map': []
}
state = {
'expect_segment': False,
'expect_playlist': False,
'current_keys': [],
'current_segment_map': None,
}
lineno = 0
for line in string_to_lines(content):
lineno += 1
line = line.strip()
# Call custom parser if needed
if line.startswith('#') and callable(custom_tags_parser):
go_to_next_line = custom_tags_parser(line, lineno, data, state)
# Do not try to parse other standard tags on this line if custom_tags_parser function returns 'True'
if go_to_next_line:
continue
if line.startswith(protocol.ext_x_byterange):
_parse_byterange(line, state)
state['expect_segment'] = True
continue
if line.startswith(protocol.ext_x_bitrate):
_parse_bitrate(line, state)
elif line.startswith(protocol.ext_x_targetduration):
_parse_simple_parameter(line, data, float)
elif line.startswith(protocol.ext_x_media_sequence):
_parse_simple_parameter(line, data, int)
elif line.startswith(protocol.ext_x_discontinuity_sequence):
_parse_simple_parameter(line, data, int)
state['current_keys'].clear()
elif line.startswith(protocol.ext_x_program_date_time):
_, program_date_time = _parse_simple_parameter_raw_value(line, cast_date_time)
if not data.get('program_date_time'):
data['program_date_time'] = program_date_time
state['current_program_date_time'] = program_date_time
state['program_date_time'] = program_date_time
elif line.startswith(protocol.ext_x_discontinuity):
state['discontinuity'] = True
elif line.startswith(protocol.ext_x_cue_out_cont):
_parse_cueout_cont(line, state)
state['cue_out'] = True
elif line.startswith(protocol.ext_x_cue_out):
_parse_cueout(line, state)
state['cue_out_start'] = True
state['cue_out'] = True
elif line.startswith(f'{protocol.ext_oatcls_scte35}:'):
_parse_oatcls_scte35(line, state)
elif line.startswith(f'{protocol.ext_x_asset}:'):
_parse_asset(line, state)
elif line.startswith(protocol.ext_x_cue_in):
state['cue_in'] = True
elif line.startswith(protocol.ext_x_cue_span):
state['cue_out'] = True
elif line.startswith(protocol.ext_x_version):
_parse_simple_parameter(line, data, int)
elif line.startswith(protocol.ext_x_allow_cache):
_parse_simple_parameter(line, data)
elif line.startswith(protocol.ext_x_key):
key = _parse_key(line)
state['current_keys'].append(key)
if key not in data['keys']:
data['keys'].append(key)
elif line.startswith(protocol.extinf):
_parse_extinf(line, data, state, lineno, strict)
state['expect_segment'] = True
elif line.startswith(protocol.ext_x_stream_inf):
state['expect_playlist'] = True
_parse_stream_inf(line, data, state)
elif line.startswith(protocol.ext_x_i_frame_stream_inf):
_parse_i_frame_stream_inf(line, data)
elif line.startswith(protocol.ext_x_media):
_parse_media(line, data, state)
elif line.startswith(protocol.ext_x_playlist_type):
_parse_simple_parameter(line, data)
elif line.startswith(protocol.ext_i_frames_only):
data['is_i_frames_only'] = True
elif line.startswith(protocol.ext_is_independent_segments):
data['is_independent_segments'] = True
elif line.startswith(protocol.ext_x_endlist):
data['is_endlist'] = True
elif line.startswith(protocol.ext_x_map):
quoted_parser = remove_quotes_parser('uri', 'byterange')
segment_map_info = _parse_attribute_list(protocol.ext_x_map, line, quoted_parser)
state['current_segment_map'] = segment_map_info
data['segment_map'].append(segment_map_info)
elif line.startswith(protocol.ext_x_start):
attribute_parser = {
"time_offset": lambda x: float(x)
}
start_info = _parse_attribute_list(protocol.ext_x_start, line, attribute_parser)
data['start'] = start_info
elif line.startswith(protocol.ext_x_server_control):
_parse_server_control(line, data, state)
elif line.startswith(protocol.ext_x_part_inf):
_parse_part_inf(line, data, state)
elif line.startswith(protocol.ext_x_rendition_report):
_parse_rendition_report(line, data, state)
elif line.startswith(protocol.ext_x_part):
_parse_part(line, data, state)
elif line.startswith(protocol.ext_x_skip):
_parse_skip(line, data, state)
elif line.startswith(protocol.ext_x_session_data):
_parse_session_data(line, data, state)
elif line.startswith(protocol.ext_x_session_key):
_parse_session_key(line, data, state)
elif line.startswith(protocol.ext_x_preload_hint):
_parse_preload_hint(line, data, state)
elif line.startswith(protocol.ext_x_daterange):
_parse_daterange(line, data, state)
elif line.startswith(protocol.ext_x_gap):
state['gap'] = True
elif line.startswith(protocol.ext_x_content_steering):
_parse_content_steering(line, data, state)
elif line.startswith(protocol.ext_m3u):
# We don't parse #EXTM3U, it just should to be present
pass
elif line.strip() == '':
# blank lines are legal
pass
elif state['expect_segment']:
_parse_ts_chunk(line, data, state)
state['expect_segment'] = False
state['current_keys'].clear()
elif state['expect_playlist']:
_parse_variant_playlist(line, data, state)
state['expect_playlist'] = False
elif strict:
raise ParseError(lineno, line)
# there could be remaining partial segments
if 'segment' in state:
data['segments'].append(state.pop('segment'))
return data
def _parse_key(line):
params = ATTRIBUTELISTPATTERN.split(line.replace(protocol.ext_x_key + ':', ''))[1::2]
key = {}
for param in params:
name, value = param.split('=', 1)
key[normalize_attribute(name)] = remove_quotes(value)
return key
def _parse_extinf(line, data, state, lineno, strict):
chunks = line.replace(protocol.extinf + ':', '').split(',', 1)
if len(chunks) == 2:
duration, title = chunks
elif len(chunks) == 1:
if strict:
raise ParseError(lineno, line)
else:
duration = chunks[0]
title = ''
if 'segment' not in state:
state['segment'] = {}
state['segment']['duration'] = float(duration)
state['segment']['title'] = title
def _parse_ts_chunk(line, data, state):
segment = state.pop('segment')
if state.get('program_date_time'):
segment['program_date_time'] = state.pop('program_date_time')
if state.get('current_program_date_time'):
segment['current_program_date_time'] = state['current_program_date_time']
state['current_program_date_time'] += datetime.timedelta(seconds=segment['duration'])
segment['uri'] = line
segment['cue_in'] = state.pop('cue_in', False)
segment['cue_out'] = state.pop('cue_out', False)
segment['cue_out_start'] = state.pop('cue_out_start', False)
scte_op = state.pop if segment['cue_in'] else state.get
segment['scte35'] = scte_op('current_cue_out_scte35', None)
segment['oatcls_scte35'] = scte_op('current_cue_out_oatcls_scte35', None)
segment['scte35_duration'] = scte_op('current_cue_out_duration', None)
segment['scte35_elapsedtime'] = scte_op('current_cue_out_elapsedtime', None)
segment['asset_metadata'] = scte_op('asset_metadata', None)
segment['discontinuity'] = state.pop('discontinuity', False)
segment['keys'] = copy(state['current_keys'])
if not state['current_keys']:
# For unencrypted segments, the initial key would be None
if None not in data['keys']:
data['keys'].append(None)
if state.get('current_segment_map'):
segment['init_section'] = state['current_segment_map']
segment['dateranges'] = state.pop('dateranges', None)
segment['gap_tag'] = state.pop('gap', None)
data['segments'].append(segment)
def _parse_attribute_list(prefix, line, atribute_parser, default_parser=None):
params = ATTRIBUTELISTPATTERN.split(line.replace(prefix + ':', ''))[1::2]
attributes = {}
for param in params:
name, value = param.split('=', 1)
name = normalize_attribute(name)
if name in atribute_parser:
value = atribute_parser[name](value)
elif default_parser is not None:
value = default_parser(value)
attributes[name] = value
return attributes
def _parse_stream_inf(line, data, state):
data['is_variant'] = True
data['media_sequence'] = None
atribute_parser = remove_quotes_parser('codecs', 'audio', 'video', 'video_range', 'subtitles', 'pathway_id', 'stable_variant_id')
atribute_parser["program_id"] = int
atribute_parser["bandwidth"] = lambda x: int(float(x))
atribute_parser["average_bandwidth"] = int
atribute_parser["frame_rate"] = float
atribute_parser["video_range"] = str
atribute_parser["hdcp_level"] = str
state['stream_info'] = _parse_attribute_list(protocol.ext_x_stream_inf, line, atribute_parser)
def _parse_i_frame_stream_inf(line, data):
atribute_parser = remove_quotes_parser('codecs', 'uri', 'pathway_id', 'stable_variant_id')
atribute_parser["program_id"] = int
atribute_parser["bandwidth"] = int
atribute_parser["average_bandwidth"] = int
atribute_parser["video_range"] = str
atribute_parser["hdcp_level"] = str
iframe_stream_info = _parse_attribute_list(protocol.ext_x_i_frame_stream_inf, line, atribute_parser)
iframe_playlist = {'uri': iframe_stream_info.pop('uri'),
'iframe_stream_info': iframe_stream_info}
data['iframe_playlists'].append(iframe_playlist)
def _parse_media(line, data, state):
quoted = remove_quotes_parser('uri', 'group_id', 'language', 'assoc_language', 'name', 'instream_id', 'characteristics', 'channels', 'stable_rendition_id')
media = _parse_attribute_list(protocol.ext_x_media, line, quoted)
data['media'].append(media)
def _parse_variant_playlist(line, data, state):
playlist = {'uri': line,
'stream_info': state.pop('stream_info')}
data['playlists'].append(playlist)
def _parse_bitrate(line, state):
if 'segment' not in state:
state['segment'] = {}
state['segment']['bitrate'] = line.replace(protocol.ext_x_bitrate + ':', '')
def _parse_byterange(line, state):
if 'segment' not in state:
state['segment'] = {}
state['segment']['byterange'] = line.replace(protocol.ext_x_byterange + ':', '')
def _parse_simple_parameter_raw_value(line, cast_to=str, normalize=False):
param, value = line.split(':', 1)
param = normalize_attribute(param.replace('#EXT-X-', ''))
if normalize:
value = value.strip().lower()
return param, cast_to(value)
def _parse_and_set_simple_parameter_raw_value(line, data, cast_to=str, normalize=False):
param, value = _parse_simple_parameter_raw_value(line, cast_to, normalize)
data[param] = value
return data[param]
def _parse_simple_parameter(line, data, cast_to=str):
return _parse_and_set_simple_parameter_raw_value(line, data, cast_to, True)
def _parse_cueout_cont(line, state):
elements = line.split(':', 1)
if len(elements) != 2:
return
cue_info = _parse_attribute_list(
protocol.ext_x_cue_out_cont,
line,
remove_quotes_parser('duration', 'elapsedtime', 'scte35')
)
duration = cue_info.get('duration')
if duration:
state['current_cue_out_duration'] = duration
scte35 = cue_info.get('scte35')
if duration:
state['current_cue_out_scte35'] = scte35
elapsedtime = cue_info.get('elapsedtime')
if elapsedtime:
state['current_cue_out_elapsedtime'] = elapsedtime
def _cueout_no_duration(line):
# this needs to be called first since line.split in all other
# parsers will throw a ValueError if passed just this tag
if line == protocol.ext_x_cue_out:
return (None, None)
def _cueout_envivio(line, state):
param, value = line.split(':', 1)
res = re.match('.*DURATION=(.*),.*,CUE="(.*)"', value)
if res:
return (res.group(2), res.group(1))
else:
return None
def _cueout_duration(line):
# This was added separately rather than modifying "simple"
param, value = line.split(':', 1)
res = re.match(r'DURATION=(.*)', value)
if res:
return (None, res.group(1))
def _cueout_simple(line):
param, value = line.split(':', 1)
res = re.match(r'^(\d+(?:\.\d)?\d*)$', value)
if res:
return (None, res.group(1))
def _parse_cueout(line, state):
_cueout_state = (_cueout_no_duration(line)
or _cueout_envivio(line, state)
or _cueout_duration(line)
or _cueout_simple(line))
if _cueout_state:
cue_out_scte35, cue_out_duration = _cueout_state
current_cue_out_scte35 = state.get('current_cue_out_scte35')
state['current_cue_out_scte35'] = cue_out_scte35 or current_cue_out_scte35
state['current_cue_out_duration'] = cue_out_duration
def _parse_server_control(line, data, state):
attribute_parser = {
"can_block_reload": str,
"hold_back": lambda x: float(x),
"part_hold_back": lambda x: float(x),
"can_skip_until": lambda x: float(x),
"can_skip_dateranges": str
}
data['server_control'] = _parse_attribute_list(
protocol.ext_x_server_control, line, attribute_parser
)
def _parse_part_inf(line, data, state):
attribute_parser = {
"part_target": lambda x: float(x)
}
data['part_inf'] = _parse_attribute_list(
protocol.ext_x_part_inf, line, attribute_parser
)
def _parse_rendition_report(line, data, state):
attribute_parser = remove_quotes_parser('uri')
attribute_parser['last_msn'] = int
attribute_parser['last_part'] = int
rendition_report = _parse_attribute_list(
protocol.ext_x_rendition_report, line, attribute_parser
)
data['rendition_reports'].append(rendition_report)
def _parse_part(line, data, state):
attribute_parser = remove_quotes_parser('uri')
attribute_parser['duration'] = lambda x: float(x)
attribute_parser['independent'] = str
attribute_parser['gap'] = str
attribute_parser['byterange'] = str
part = _parse_attribute_list(protocol.ext_x_part, line, attribute_parser)
# this should always be true according to spec
if state.get('current_program_date_time'):
part['program_date_time'] = state['current_program_date_time']
state['current_program_date_time'] += datetime.timedelta(seconds=part['duration'])
part['dateranges'] = state.pop('dateranges', None)
part['gap_tag'] = state.pop('gap', None)
if 'segment' not in state:
state['segment'] = {}
segment = state['segment']
if 'parts' not in segment:
segment['parts'] = []
segment['parts'].append(part)
def _parse_skip(line, data, state):
attribute_parser = remove_quotes_parser('recently_removed_dateranges')
attribute_parser['skipped_segments'] = int
data['skip'] = _parse_attribute_list(protocol.ext_x_skip, line, attribute_parser)
def _parse_session_data(line, data, state):
quoted = remove_quotes_parser('data_id', 'value', 'uri', 'language')
session_data = _parse_attribute_list(protocol.ext_x_session_data, line, quoted)
data['session_data'].append(session_data)
def _parse_session_key(line, data, state):
params = ATTRIBUTELISTPATTERN.split(line.replace(protocol.ext_x_session_key + ':', ''))[1::2]
key = {}
for param in params:
name, value = param.split('=', 1)
key[normalize_attribute(name)] = remove_quotes(value)
data['session_keys'].append(key)
def _parse_preload_hint(line, data, state):
attribute_parser = remove_quotes_parser('uri')
attribute_parser['type'] = str
attribute_parser['byterange_start'] = int
attribute_parser['byterange_length'] = int
data['preload_hint'] = _parse_attribute_list(
protocol.ext_x_preload_hint, line, attribute_parser
)
def _parse_daterange(line, date, state):
attribute_parser = remove_quotes_parser('id', 'class', 'start_date', 'end_date')
attribute_parser['duration'] = float
attribute_parser['planned_duration'] = float
attribute_parser['end_on_next'] = str
attribute_parser['scte35_cmd'] = str
attribute_parser['scte35_out'] = str
attribute_parser['scte35_in'] = str
parsed = _parse_attribute_list(
protocol.ext_x_daterange, line, attribute_parser
)
if 'dateranges' not in state:
state['dateranges'] = []
state['dateranges'].append(parsed)
def _parse_content_steering(line, data, state):
attribute_parser = remove_quotes_parser('server_uri', 'pathway_id')
data['content_steering'] = _parse_attribute_list(
protocol.ext_x_content_steering, line, attribute_parser
)
def _parse_oatcls_scte35(line, state):
scte35_cue = line.split(':', 1)[1]
state['current_cue_out_oatcls_scte35'] = scte35_cue
state['current_cue_out_scte35'] = scte35_cue
def _parse_asset(line, state):
# EXT-X-ASSET attribute values may or may not be quoted, and need to be URL-encoded.
# They are preserved as-is here to prevent loss of information.
state['asset_metadata'] = _parse_attribute_list(
protocol.ext_x_asset, line, {}, default_parser=str
)
def string_to_lines(string):
return string.strip().splitlines()
def remove_quotes_parser(*attrs):
return dict(zip(attrs, itertools.repeat(remove_quotes)))
def remove_quotes(string):
'''
Remove quotes from string.
Ex.:
"foo" -> foo
'foo' -> foo
'foo -> 'foo
'''
quotes = ('"', "'")
if string.startswith(quotes) and string.endswith(quotes):
return string[1:-1]
return string
def normalize_attribute(attribute):
return attribute.replace('-', '_').lower().strip()
def is_url(uri):
return uri.startswith(URI_PREFIXES)
def urljoin(base, url):
base = base.replace('://', '\1')
url = url.replace('://', '\1')
while '//' in base:
base = base.replace('//', '/\0/')
while '//' in url:
url = url.replace('//', '/\0/')
return _urljoin(base.replace('\1', '://'), url.replace('\1', '://')).replace('\0', '')
def get_segment_custom_value(state, key, default=None):
"""
Helper function for getting custom values for Segment
Are useful with custom_tags_parser
"""
if 'segment' not in state:
return default
if 'custom_parser_values' not in state['segment']:
return default
return state['segment']['custom_parser_values'].get(key, default)
def save_segment_custom_value(state, key, value):
"""
Helper function for saving custom values for Segment
Are useful with custom_tags_parser
"""
if 'segment' not in state:
state['segment'] = {}
if 'custom_parser_values' not in state['segment']:
state['segment']['custom_parser_values'] = {}
state['segment']['custom_parser_values'][key] = value | /rlaphoenix.m3u8-3.4.0-py3-none-any.whl/m3u8/parser.py | 0.500977 | 0.182535 | parser.py | pypi |
import decimal
import os
import errno
from m3u8.protocol import (
ext_oatcls_scte35,
ext_x_asset,
ext_x_key,
ext_x_map,
ext_x_session_key,
ext_x_start,
)
from m3u8.parser import parse, format_date_time
from m3u8.mixins import BasePathMixin, GroupedBasePathMixin
class MalformedPlaylistError(Exception):
pass
class M3U8(object):
'''
Represents a single M3U8 playlist. Should be instantiated with
the content as string.
Parameters:
`content`
the m3u8 content as string
`base_path`
all urls (key and segments url) will be updated with this base_path,
ex.:
base_path = "http://videoserver.com/hls"
/foo/bar/key.bin --> http://videoserver.com/hls/key.bin
http://vid.com/segment1.ts --> http://videoserver.com/hls/segment1.ts
can be passed as parameter or setted as an attribute to ``M3U8`` object.
`base_uri`
uri the playlist comes from. it is propagated to SegmentList and Key
ex.: http://example.com/path/to
Attributes:
`keys`
Returns the list of `Key` objects used to encrypt the segments from m3u8.
It covers the whole list of possible situations when encryption either is
used or not.
1. No encryption.
`keys` list will only contain a `None` element.
2. Encryption enabled for all segments.
`keys` list will contain the key used for the segments.
3. No encryption for first element(s), encryption is applied afterwards
`keys` list will contain `None` and the key used for the rest of segments.
4. Multiple keys used during the m3u8 manifest.
`keys` list will contain the key used for each set of segments.
`session_keys`
Returns the list of `SessionKey` objects used to encrypt multiple segments from m3u8.
`segments`
a `SegmentList` object, represents the list of `Segment`s from this playlist
`is_variant`
Returns true if this M3U8 is a variant playlist, with links to
other M3U8s with different bitrates.
If true, `playlists` is a list of the playlists available,
and `iframe_playlists` is a list of the i-frame playlists available.
`is_endlist`
Returns true if EXT-X-ENDLIST tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.8
`playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
Playlist objects
`iframe_playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
IFramePlaylist objects
`playlist_type`
A lower-case string representing the type of the playlist, which can be
one of VOD (video on demand) or EVENT.
`media`
If this is a variant playlist (`is_variant` is True), returns a list of
Media objects
`target_duration`
Returns the EXT-X-TARGETDURATION as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.2
`media_sequence`
Returns the EXT-X-MEDIA-SEQUENCE as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.3
`program_date_time`
Returns the EXT-X-PROGRAM-DATE-TIME as a string
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.5
`version`
Return the EXT-X-VERSION as is
`allow_cache`
Return the EXT-X-ALLOW-CACHE as is
`files`
Returns an iterable with all files from playlist, in order. This includes
segments and key uri, if present.
`base_uri`
It is a property (getter and setter) used by
SegmentList and Key to have absolute URIs.
`is_i_frames_only`
Returns true if EXT-X-I-FRAMES-ONLY tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.12
`is_independent_segments`
Returns true if EXT-X-INDEPENDENT-SEGMENTS tag present in M3U8.
https://tools.ietf.org/html/draft-pantos-http-live-streaming-13#section-3.4.16
'''
simple_attributes = (
# obj attribute # parser attribute
('is_variant', 'is_variant'),
('is_endlist', 'is_endlist'),
('is_i_frames_only', 'is_i_frames_only'),
('target_duration', 'targetduration'),
('media_sequence', 'media_sequence'),
('program_date_time', 'program_date_time'),
('is_independent_segments', 'is_independent_segments'),
('version', 'version'),
('allow_cache', 'allow_cache'),
('playlist_type', 'playlist_type'),
('discontinuity_sequence', 'discontinuity_sequence')
)
def __init__(self, content=None, base_path=None, base_uri=None, strict=False, custom_tags_parser=None):
if content is not None:
self.data = parse(content, strict, custom_tags_parser)
else:
self.data = {}
self._base_uri = base_uri
if self._base_uri:
if not self._base_uri.endswith('/'):
self._base_uri += '/'
self._initialize_attributes()
self.base_path = base_path
def _initialize_attributes(self):
self.keys = [Key(base_uri=self.base_uri, **params) if params else None
for params in self.data.get('keys', [])]
self.segment_map = [InitializationSection(base_uri=self.base_uri, **params) if params else None
for params in self.data.get('segment_map', [])]
self.segments = SegmentList([
Segment(
base_uri=self.base_uri,
keyobjects=[
find_key(segment_key, self.keys)
for segment_key in segment['keys']],
**segment)
for segment in self.data.get('segments', [])
])
for attr, param in self.simple_attributes:
setattr(self, attr, self.data.get(param))
for i, segment in enumerate(self.segments, self.media_sequence or 0):
segment.media_sequence = i
self.files = []
for key in self.keys:
# Avoid None key, it could be the first one, don't repeat them
if key and key.uri not in self.files:
self.files.append(key.uri)
self.files.extend(self.segments.uri)
self.media = MediaList([ Media(base_uri=self.base_uri, **media)
for media in self.data.get('media', []) ])
self.playlists = PlaylistList([ Playlist(base_uri=self.base_uri, media=self.media, **playlist)
for playlist in self.data.get('playlists', []) ])
self.iframe_playlists = PlaylistList()
for ifr_pl in self.data.get('iframe_playlists', []):
self.iframe_playlists.append(IFramePlaylist(base_uri=self.base_uri,
uri=ifr_pl['uri'],
iframe_stream_info=ifr_pl['iframe_stream_info'])
)
start = self.data.get('start', None)
self.start = start and Start(**start)
server_control = self.data.get('server_control', None)
self.server_control = server_control and ServerControl(**server_control)
part_inf = self.data.get('part_inf', None)
self.part_inf = part_inf and PartInformation(**part_inf)
skip = self.data.get('skip', None)
self.skip = skip and Skip(**skip)
self.rendition_reports = RenditionReportList([ RenditionReport(base_uri=self.base_uri, **rendition_report)
for rendition_report in self.data.get('rendition_reports', []) ])
self.session_data = SessionDataList([ SessionData(**session_data)
for session_data in self.data.get('session_data', [])
if 'data_id' in session_data ])
self.session_keys = [ SessionKey(base_uri=self.base_uri, **params) if params else None
for params in self.data.get('session_keys', []) ]
preload_hint = self.data.get('preload_hint', None)
self.preload_hint = preload_hint and PreloadHint(base_uri=self.base_uri, **preload_hint)
content_steering = self.data.get('content_steering', None)
self.content_steering = content_steering and ContentSteering(base_uri=self.base_uri, **content_steering)
def __unicode__(self):
return self.dumps()
@property
def base_uri(self):
return self._base_uri
@base_uri.setter
def base_uri(self, new_base_uri):
self._base_uri = new_base_uri
self.media.base_uri = new_base_uri
self.playlists.base_uri = new_base_uri
self.iframe_playlists.base_uri = new_base_uri
self.segments.base_uri = new_base_uri
self.rendition_reports.base_uri = new_base_uri
for key in self.keys:
if key:
key.base_uri = new_base_uri
for key in self.session_keys:
if key:
key.base_uri = new_base_uri
if self.preload_hint:
self.preload_hint.base_uri = new_base_uri
if self.content_steering:
self.content_steering.base_uri = new_base_uri
@property
def base_path(self):
return self._base_path
@base_path.setter
def base_path(self, newbase_path):
self._base_path = newbase_path
self._update_base_path()
def _update_base_path(self):
if self._base_path is None:
return
for key in self.keys:
if key:
key.base_path = self._base_path
for key in self.session_keys:
if key:
key.base_path = self._base_path
self.media.base_path = self._base_path
self.segments.base_path = self._base_path
self.playlists.base_path = self._base_path
self.iframe_playlists.base_path = self._base_path
self.rendition_reports.base_path = self._base_path
if self.preload_hint:
self.preload_hint.base_path = self._base_path
if self.content_steering:
self.content_steering.base_path = self._base_path
def add_playlist(self, playlist):
self.is_variant = True
self.playlists.append(playlist)
def add_iframe_playlist(self, iframe_playlist):
if iframe_playlist is not None:
self.is_variant = True
self.iframe_playlists.append(iframe_playlist)
def add_media(self, media):
self.media.append(media)
def add_segment(self, segment):
self.segments.append(segment)
def add_rendition_report(self, report):
self.rendition_reports.append(report)
def dumps(self):
'''
Returns the current m3u8 as a string.
You could also use unicode(<this obj>) or str(<this obj>)
'''
output = ['#EXTM3U']
if self.content_steering:
output.append(str(self.content_steering))
if self.is_independent_segments:
output.append('#EXT-X-INDEPENDENT-SEGMENTS')
if self.media_sequence:
output.append('#EXT-X-MEDIA-SEQUENCE:' + str(self.media_sequence))
if self.discontinuity_sequence:
output.append('#EXT-X-DISCONTINUITY-SEQUENCE:{}'.format(
number_to_string(self.discontinuity_sequence)))
if self.allow_cache:
output.append('#EXT-X-ALLOW-CACHE:' + self.allow_cache.upper())
if self.version:
output.append('#EXT-X-VERSION:' + str(self.version))
if self.target_duration:
output.append('#EXT-X-TARGETDURATION:' +
number_to_string(self.target_duration))
if not (self.playlist_type is None or self.playlist_type == ''):
output.append('#EXT-X-PLAYLIST-TYPE:%s' % str(self.playlist_type).upper())
if self.start:
output.append(str(self.start))
if self.is_i_frames_only:
output.append('#EXT-X-I-FRAMES-ONLY')
if self.server_control:
output.append(str(self.server_control))
if self.is_variant:
if self.media:
output.append(str(self.media))
output.append(str(self.playlists))
if self.iframe_playlists:
output.append(str(self.iframe_playlists))
if self.part_inf:
output.append(str(self.part_inf))
if self.skip:
output.append(str(self.skip))
if self.session_data:
output.append(str(self.session_data))
for key in self.session_keys:
output.append(str(key))
output.append(str(self.segments))
if self.preload_hint:
output.append(str(self.preload_hint))
if self.rendition_reports:
output.append(str(self.rendition_reports))
if self.is_endlist:
output.append('#EXT-X-ENDLIST')
# ensure that the last line is terminated correctly
if output[-1] and not output[-1].endswith('\n'):
output.append('')
return '\n'.join(output)
def dump(self, filename):
'''
Saves the current m3u8 to ``filename``
'''
self._create_sub_directories(filename)
with open(filename, 'w') as fileobj:
fileobj.write(self.dumps())
def _create_sub_directories(self, filename):
basename = os.path.dirname(filename)
if basename:
os.makedirs(basename, exist_ok=True)
class Segment(BasePathMixin):
'''
A video segment from a M3U8 playlist
`uri`
a string with the segment uri
`title`
title attribute from EXTINF parameter
`program_date_time`
Returns the EXT-X-PROGRAM-DATE-TIME as a datetime. This field is only set
if EXT-X-PROGRAM-DATE-TIME exists for this segment
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.5
`current_program_date_time`
Returns a datetime of this segment, either the value of `program_date_time`
when EXT-X-PROGRAM-DATE-TIME is set or a calculated value based on previous
segments' EXT-X-PROGRAM-DATE-TIME and EXTINF values
`discontinuity`
Returns a boolean indicating if a EXT-X-DISCONTINUITY tag exists
http://tools.ietf.org/html/draft-pantos-http-live-streaming-13#section-3.4.11
`cue_out_start`
Returns a boolean indicating if a EXT-X-CUE-OUT tag exists
`cue_out`
Returns a boolean indicating if a EXT-X-CUE-OUT-CONT tag exists
Note: for backwards compatibility, this will be True when cue_out_start
is True, even though this tag did not exist in the input, and
EXT-X-CUE-OUT-CONT will not exist in the output
`cue_in`
Returns a boolean indicating if a EXT-X-CUE-IN tag exists
`scte35`
Base64 encoded SCTE35 metadata if available
`scte35_duration`
Planned SCTE35 duration
`duration`
duration attribute from EXTINF parameter
`base_uri`
uri the key comes from in URI hierarchy. ex.: http://example.com/path/to
`bitrate`
bitrate attribute from EXT-X-BITRATE parameter
`byterange`
byterange attribute from EXT-X-BYTERANGE parameter
`keys`
Keys used to encrypt the segment (EXT-X-KEY)
`parts`
partial segments that make up this segment
`dateranges`
any dateranges that should precede the segment
`gap_tag`
GAP tag indicates that a Media Segment is missing
`custom_parser_values`
Additional values which custom_tags_parser might store per segment
'''
def __init__(self, uri=None, base_uri=None, program_date_time=None, current_program_date_time=None,
duration=None, title=None, bitrate=None, byterange=None, cue_out=False,
cue_out_start=False, cue_in=False, discontinuity=False, keys=None, scte35=None,
oatcls_scte35=None, scte35_duration=None, scte35_elapsedtime=None, asset_metadata=None,
keyobjects=None, parts=None, init_section=None, dateranges=None, gap_tag=None,
media_sequence=None, custom_parser_values=None):
self.media_sequence = media_sequence
self.uri = uri
self.duration = duration
self.title = title
self._base_uri = base_uri
self.bitrate = bitrate
self.byterange = byterange
self.program_date_time = program_date_time
self.current_program_date_time = current_program_date_time
self.discontinuity = discontinuity
self.cue_out_start = cue_out_start
self.cue_out = cue_out
self.cue_in = cue_in
self.scte35 = scte35
self.oatcls_scte35 = oatcls_scte35
self.scte35_duration = scte35_duration
self.scte35_elapsedtime = scte35_elapsedtime
self.asset_metadata = asset_metadata
self.keys = keyobjects
self.parts = PartialSegmentList( [ PartialSegment(base_uri=self._base_uri, **partial) for partial in parts ] if parts else [] )
if init_section is not None:
self.init_section = InitializationSection(self._base_uri, **init_section)
else:
self.init_section = None
self.dateranges = DateRangeList( [ DateRange(**daterange) for daterange in dateranges ] if dateranges else [] )
self.gap_tag = gap_tag
self.custom_parser_values = custom_parser_values or {}
def add_part(self, part):
self.parts.append(part)
def dumps(self, last_segment, timespec='milliseconds'):
output = []
if not last_segment or (self.keys and self.keys != last_segment.keys):
for key in self.keys:
output.append(str(key) + '\n')
if last_segment and self.init_section != last_segment.init_section:
if not self.init_section:
raise MalformedPlaylistError(
"init section can't be None if previous is not None")
output.append(str(self.init_section))
output.append('\n')
else:
if self.init_section and last_segment is None:
output.append(str(self.init_section))
output.append('\n')
if self.discontinuity:
output.append('#EXT-X-DISCONTINUITY\n')
if self.program_date_time:
output.append('#EXT-X-PROGRAM-DATE-TIME:%s\n' %
format_date_time(self.program_date_time, timespec=timespec))
if len(self.dateranges):
output.append(str(self.dateranges))
output.append('\n')
if self.cue_out_start:
if self.oatcls_scte35:
output.append(f'{ext_oatcls_scte35}:{self.oatcls_scte35}\n')
if self.asset_metadata:
asset_suffix = []
for metadata_key, metadata_value in self.asset_metadata.items():
asset_suffix.append(f'{metadata_key.upper()}={metadata_value}')
output.append(f"{ext_x_asset}:{','.join(asset_suffix)}\n")
output.append('#EXT-X-CUE-OUT{}\n'.format(
(':' + self.scte35_duration) if self.scte35_duration else ''))
elif self.cue_out:
cue_out_cont_suffix = []
if self.scte35_elapsedtime:
cue_out_cont_suffix.append(f'ElapsedTime={self.scte35_elapsedtime}')
if self.scte35_duration:
cue_out_cont_suffix.append(f'Duration={self.scte35_duration}')
if self.scte35:
cue_out_cont_suffix.append(f'SCTE35={self.scte35}')
if cue_out_cont_suffix:
cue_out_cont_suffix = ':' + ','.join(cue_out_cont_suffix)
else:
cue_out_cont_suffix = ''
output.append(f'#EXT-X-CUE-OUT-CONT{cue_out_cont_suffix}\n')
if self.cue_in:
output.append('#EXT-X-CUE-IN\n')
if self.parts:
output.append(str(self.parts))
output.append('\n')
if self.uri:
if self.duration is not None:
output.append('#EXTINF:%s,' % number_to_string(self.duration))
if self.title:
output.append(self.title)
output.append('\n')
if self.byterange:
output.append('#EXT-X-BYTERANGE:%s\n' % self.byterange)
if self.bitrate:
output.append('#EXT-X-BITRATE:%s\n' % self.bitrate)
if self.gap_tag:
output.append('#EXT-X-GAP\n')
output.append(self.uri)
return ''.join(output)
def __str__(self):
return self.dumps(None)
@property
def base_path(self):
return super(Segment, self).base_path
@base_path.setter
def base_path(self, newbase_path):
super(Segment, self.__class__).base_path.fset(self, newbase_path)
self.parts.base_path = newbase_path
if self.init_section is not None:
self.init_section.base_path = newbase_path
@property
def base_uri(self):
return self._base_uri
@base_uri.setter
def base_uri(self, newbase_uri):
self._base_uri = newbase_uri
self.parts.base_uri = newbase_uri
if self.init_section is not None:
self.init_section.base_uri = newbase_uri
class SegmentList(list, GroupedBasePathMixin):
def __str__(self):
output = []
last_segment = None
for segment in self:
output.append(segment.dumps(last_segment))
last_segment = segment
return '\n'.join(output)
@property
def uri(self):
return [seg.uri for seg in self]
def by_key(self, key):
return [ segment for segment in self if key in segment.keys ]
class PartialSegment(BasePathMixin):
'''
A partial segment from a M3U8 playlist
`uri`
a string with the segment uri
`program_date_time`
Returns the EXT-X-PROGRAM-DATE-TIME as a datetime. This field is only set
if EXT-X-PROGRAM-DATE-TIME exists for this segment
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.5
`current_program_date_time`
Returns a datetime of this segment, either the value of `program_date_time`
when EXT-X-PROGRAM-DATE-TIME is set or a calculated value based on previous
segments' EXT-X-PROGRAM-DATE-TIME and EXTINF values
`duration`
duration attribute from EXTINF parameter
`byterange`
byterange attribute from EXT-X-BYTERANGE parameter
`independent`
the Partial Segment contains an independent frame
`gap`
GAP attribute indicates the Partial Segment is not available
`dateranges`
any dateranges that should precede the partial segment
`gap_tag`
GAP tag indicates one or more of the parent Media Segment's Partial
Segments have a GAP=YES attribute. This tag should appear immediately
after the first EXT-X-PART tag in the Parent Segment with a GAP=YES
attribute.
'''
def __init__(self, base_uri, uri, duration, program_date_time=None,
current_program_date_time=None, byterange=None,
independent=None, gap=None, dateranges=None, gap_tag=None):
self.base_uri = base_uri
self.uri = uri
self.duration = duration
self.program_date_time = program_date_time
self.current_program_date_time = current_program_date_time
self.byterange = byterange
self.independent = independent
self.gap = gap
self.dateranges = DateRangeList( [ DateRange(**daterange) for daterange in dateranges ] if dateranges else [] )
self.gap_tag = gap_tag
def dumps(self, last_segment):
output = []
if len(self.dateranges):
output.append(str(self.dateranges))
output.append('\n')
if self.gap_tag:
output.append('#EXT-X-GAP\n')
output.append('#EXT-X-PART:DURATION=%s,URI="%s"' % (
number_to_string(self.duration), self.uri
))
if self.independent:
output.append(',INDEPENDENT=%s' % self.independent)
if self.byterange:
output.append(',BYTERANGE=%s' % self.byterange)
if self.gap:
output.append(',GAP=%s' % self.gap)
return ''.join(output)
def __str__(self):
return self.dumps(None)
class PartialSegmentList(list, GroupedBasePathMixin):
def __str__(self):
output = [str(part) for part in self]
return '\n'.join(output)
class Key(BasePathMixin):
'''
Key used to encrypt the segments in a m3u8 playlist (EXT-X-KEY)
`method`
is a string. ex.: "AES-128"
`uri`
is a string. ex:: "https://priv.example.com/key.php?r=52"
`base_uri`
uri the key comes from in URI hierarchy. ex.: http://example.com/path/to
`iv`
initialization vector. a string representing a hexadecimal number. ex.: 0X12A
'''
tag = ext_x_key
def __init__(self, method, base_uri, uri=None, iv=None, keyformat=None, keyformatversions=None, **kwargs):
self.method = method
self.uri = uri
self.iv = iv
self.keyformat = keyformat
self.keyformatversions = keyformatversions
self.base_uri = base_uri
self._extra_params = kwargs
def __str__(self):
output = [
'METHOD=%s' % self.method,
]
if self.uri:
output.append('URI="%s"' % self.uri)
if self.iv:
output.append('IV=%s' % self.iv)
if self.keyformat:
output.append('KEYFORMAT="%s"' % self.keyformat)
if self.keyformatversions:
output.append('KEYFORMATVERSIONS="%s"' % self.keyformatversions)
return self.tag + ':' + ','.join(output)
def __eq__(self, other):
if not other:
return False
return self.method == other.method and \
self.uri == other.uri and \
self.iv == other.iv and \
self.base_uri == other.base_uri and \
self.keyformat == other.keyformat and \
self.keyformatversions == other.keyformatversions
def __ne__(self, other):
return not self.__eq__(other)
class InitializationSection(BasePathMixin):
'''
Used to obtain Media Initialization Section required to
parse the applicable Media Segments (EXT-X-MAP)
`uri`
is a string. ex:: "https://priv.example.com/key.php?r=52"
`byterange`
value of BYTERANGE attribute
`base_uri`
uri the segment comes from in URI hierarchy. ex.: http://example.com/path/to
'''
tag = ext_x_map
def __init__(self, base_uri, uri, byterange=None):
self.base_uri = base_uri
self.uri = uri
self.byterange = byterange
def __str__(self):
output = []
if self.uri:
output.append('URI=' + quoted(self.uri))
if self.byterange:
output.append('BYTERANGE=' + quoted(self.byterange))
return "{tag}:{attributes}".format(tag=self.tag, attributes=",".join(output))
def __eq__(self, other):
if not other:
return False
return self.uri == other.uri and \
self.byterange == other.byterange and \
self.base_uri == other.base_uri
def __ne__(self, other):
return not self.__eq__(other)
class SessionKey(Key):
tag = ext_x_session_key
class Playlist(BasePathMixin):
'''
Playlist object representing a link to a variant M3U8 with a specific bitrate.
Attributes:
`stream_info` is a named tuple containing the attributes: `program_id`,
`bandwidth`, `average_bandwidth`, `resolution`, `codecs` and `resolution`
which is a a tuple (w, h) of integers
`media` is a list of related Media entries.
More info: http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.10
'''
def __init__(self, uri, stream_info, media, base_uri):
self.uri = uri
self.base_uri = base_uri
resolution = stream_info.get('resolution')
if resolution is not None:
resolution = resolution.strip('"')
values = resolution.split('x')
resolution_pair = (int(values[0]), int(values[1]))
else:
resolution_pair = None
self.stream_info = StreamInfo(
bandwidth=stream_info['bandwidth'],
video=stream_info.get('video'),
audio=stream_info.get('audio'),
subtitles=stream_info.get('subtitles'),
closed_captions=stream_info.get('closed_captions'),
average_bandwidth=stream_info.get('average_bandwidth'),
program_id=stream_info.get('program_id'),
resolution=resolution_pair,
codecs=stream_info.get('codecs'),
frame_rate=stream_info.get('frame_rate'),
video_range=stream_info.get('video_range'),
hdcp_level=stream_info.get('hdcp_level'),
pathway_id=stream_info.get('pathway_id'),
stable_variant_id=stream_info.get('stable_variant_id')
)
self.media = []
for media_type in ('audio', 'video', 'subtitles'):
group_id = stream_info.get(media_type)
if not group_id:
continue
self.media += filter(lambda m: m.group_id == group_id, media)
def __str__(self):
media_types = []
stream_inf = [str(self.stream_info)]
for media in self.media:
if media.type in media_types:
continue
else:
media_types += [media.type]
media_type = media.type.upper()
stream_inf.append('%s="%s"' % (media_type, media.group_id))
return '#EXT-X-STREAM-INF:' + ','.join(stream_inf) + '\n' + self.uri
class IFramePlaylist(BasePathMixin):
'''
IFramePlaylist object representing a link to a
variant M3U8 i-frame playlist with a specific bitrate.
Attributes:
`iframe_stream_info` is a named tuple containing the attributes:
`program_id`, `bandwidth`, `average_bandwidth`, `codecs`, `video_range`,
`hdcp_level` and `resolution` which is a tuple (w, h) of integers
More info: http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.13
'''
def __init__(self, base_uri, uri, iframe_stream_info):
self.uri = uri
self.base_uri = base_uri
resolution = iframe_stream_info.get('resolution')
if resolution is not None:
values = resolution.split('x')
resolution_pair = (int(values[0]), int(values[1]))
else:
resolution_pair = None
self.iframe_stream_info = StreamInfo(
bandwidth=iframe_stream_info.get('bandwidth'),
average_bandwidth=iframe_stream_info.get('average_bandwidth'),
video=iframe_stream_info.get('video'),
# Audio, subtitles, and closed captions should not exist in
# EXT-X-I-FRAME-STREAM-INF, so just hardcode them to None.
audio=None,
subtitles=None,
closed_captions=None,
program_id=iframe_stream_info.get('program_id'),
resolution=resolution_pair,
codecs=iframe_stream_info.get('codecs'),
video_range=iframe_stream_info.get('video_range'),
hdcp_level=iframe_stream_info.get('hdcp_level'),
frame_rate=None,
pathway_id=iframe_stream_info.get('pathway_id'),
stable_variant_id=iframe_stream_info.get('stable_variant_id')
)
def __str__(self):
iframe_stream_inf = []
if self.iframe_stream_info.program_id:
iframe_stream_inf.append('PROGRAM-ID=%d' %
self.iframe_stream_info.program_id)
if self.iframe_stream_info.bandwidth:
iframe_stream_inf.append('BANDWIDTH=%d' %
self.iframe_stream_info.bandwidth)
if self.iframe_stream_info.average_bandwidth:
iframe_stream_inf.append('AVERAGE-BANDWIDTH=%d' %
self.iframe_stream_info.average_bandwidth)
if self.iframe_stream_info.resolution:
res = (str(self.iframe_stream_info.resolution[0]) + 'x' +
str(self.iframe_stream_info.resolution[1]))
iframe_stream_inf.append('RESOLUTION=' + res)
if self.iframe_stream_info.codecs:
iframe_stream_inf.append('CODECS=' +
quoted(self.iframe_stream_info.codecs))
if self.iframe_stream_info.video_range:
iframe_stream_inf.append('VIDEO-RANGE=%s' %
self.iframe_stream_info.video_range)
if self.iframe_stream_info.hdcp_level:
iframe_stream_inf.append('HDCP-LEVEL=%s' %
self.iframe_stream_info.hdcp_level)
if self.uri:
iframe_stream_inf.append('URI=' + quoted(self.uri))
if self.iframe_stream_info.pathway_id:
iframe_stream_inf.append(
'PATHWAY-ID=' + quoted(self.iframe_stream_info.pathway_id)
)
if self.iframe_stream_info.stable_variant_id:
iframe_stream_inf.append(
'STABLE-VARIANT-ID=' + quoted(self.iframe_stream_info.stable_variant_id)
)
return '#EXT-X-I-FRAME-STREAM-INF:' + ','.join(iframe_stream_inf)
class StreamInfo(object):
bandwidth = None
closed_captions = None
average_bandwidth = None
program_id = None
resolution = None
codecs = None
audio = None
video = None
subtitles = None
frame_rate = None
video_range = None
hdcp_level = None
pathway_id = None
stable_variant_id = None
def __init__(self, **kwargs):
self.bandwidth = kwargs.get("bandwidth")
self.closed_captions = kwargs.get("closed_captions")
self.average_bandwidth = kwargs.get("average_bandwidth")
self.program_id = kwargs.get("program_id")
self.resolution = kwargs.get("resolution")
self.codecs = kwargs.get("codecs")
self.audio = kwargs.get("audio")
self.video = kwargs.get("video")
self.subtitles = kwargs.get("subtitles")
self.frame_rate = kwargs.get("frame_rate")
self.video_range = kwargs.get("video_range")
self.hdcp_level = kwargs.get("hdcp_level")
self.pathway_id = kwargs.get("pathway_id")
self.stable_variant_id = kwargs.get("stable_variant_id")
def __str__(self):
stream_inf = []
if self.program_id is not None:
stream_inf.append('PROGRAM-ID=%d' % self.program_id)
if self.closed_captions is not None:
stream_inf.append('CLOSED-CAPTIONS=%s' % self.closed_captions)
if self.bandwidth is not None:
stream_inf.append('BANDWIDTH=%d' % self.bandwidth)
if self.average_bandwidth is not None:
stream_inf.append('AVERAGE-BANDWIDTH=%d' %
self.average_bandwidth)
if self.resolution is not None:
res = str(self.resolution[
0]) + 'x' + str(self.resolution[1])
stream_inf.append('RESOLUTION=' + res)
if self.frame_rate is not None:
stream_inf.append('FRAME-RATE=%g' % decimal.Decimal(self.frame_rate).quantize(decimal.Decimal('1.000')))
if self.codecs is not None:
stream_inf.append('CODECS=' + quoted(self.codecs))
if self.video_range is not None:
stream_inf.append('VIDEO-RANGE=%s' % self.video_range)
if self.hdcp_level is not None:
stream_inf.append('HDCP-LEVEL=%s' % self.hdcp_level)
if self.pathway_id is not None:
stream_inf.append('PATHWAY-ID=' + quoted(self.pathway_id))
if self.stable_variant_id is not None:
stream_inf.append('STABLE-VARIANT-ID=' + quoted(self.stable_variant_id))
return ",".join(stream_inf)
class Media(BasePathMixin):
'''
A media object from a M3U8 playlist
https://tools.ietf.org/html/draft-pantos-http-live-streaming-16#section-4.3.4.1
`uri`
a string with the media uri
`type`
`group_id`
`language`
`assoc-language`
`name`
`default`
`autoselect`
`forced`
`instream_id`
`characteristics`
`channels`
`stable_rendition_id`
attributes in the EXT-MEDIA tag
`base_uri`
uri the media comes from in URI hierarchy. ex.: http://example.com/path/to
'''
def __init__(self, uri=None, type=None, group_id=None, language=None,
name=None, default=None, autoselect=None, forced=None,
characteristics=None, channels=None, stable_rendition_id=None,
assoc_language=None, instream_id=None, base_uri=None, **extras):
self.base_uri = base_uri
self.uri = uri
self.type = type
self.group_id = group_id
self.language = language
self.name = name
self.default = default
self.autoselect = autoselect
self.forced = forced
self.assoc_language = assoc_language
self.instream_id = instream_id
self.characteristics = characteristics
self.channels = channels
self.stable_rendition_id = stable_rendition_id
self.extras = extras
def dumps(self):
media_out = []
if self.uri:
media_out.append('URI=' + quoted(self.uri))
if self.type:
media_out.append('TYPE=' + self.type)
if self.group_id:
media_out.append('GROUP-ID=' + quoted(self.group_id))
if self.language:
media_out.append('LANGUAGE=' + quoted(self.language))
if self.assoc_language:
media_out.append('ASSOC-LANGUAGE=' + quoted(self.assoc_language))
if self.name:
media_out.append('NAME=' + quoted(self.name))
if self.default:
media_out.append('DEFAULT=' + self.default)
if self.autoselect:
media_out.append('AUTOSELECT=' + self.autoselect)
if self.forced:
media_out.append('FORCED=' + self.forced)
if self.instream_id:
media_out.append('INSTREAM-ID=' + quoted(self.instream_id))
if self.characteristics:
media_out.append('CHARACTERISTICS=' + quoted(self.characteristics))
if self.channels:
media_out.append('CHANNELS=' + quoted(self.channels))
if self.stable_rendition_id:
media_out.append('STABLE-RENDITION-ID=' + quoted(self.stable_rendition_id))
return ('#EXT-X-MEDIA:' + ','.join(media_out))
def __str__(self):
return self.dumps()
class TagList(list):
def __str__(self):
output = [str(tag) for tag in self]
return '\n'.join(output)
class MediaList(TagList, GroupedBasePathMixin):
@property
def uri(self):
return [media.uri for media in self]
class PlaylistList(TagList, GroupedBasePathMixin):
pass
class SessionDataList(TagList):
pass
class Start(object):
def __init__(self, time_offset, precise=None):
self.time_offset = float(time_offset)
self.precise = precise
def __str__(self):
output = [
'TIME-OFFSET=' + str(self.time_offset)
]
if self.precise and self.precise in ['YES', 'NO']:
output.append('PRECISE=' + str(self.precise))
return ext_x_start + ':' + ','.join(output)
class RenditionReport(BasePathMixin):
def __init__(self, base_uri, uri, last_msn, last_part=None):
self.base_uri = base_uri
self.uri = uri
self.last_msn = last_msn
self.last_part = last_part
def dumps(self):
report = []
report.append('URI=' + quoted(self.uri))
report.append('LAST-MSN=' + number_to_string(self.last_msn))
if self.last_part is not None:
report.append('LAST-PART=' + number_to_string(
self.last_part))
return ('#EXT-X-RENDITION-REPORT:' + ','.join(report))
def __str__(self):
return self.dumps()
class RenditionReportList(list, GroupedBasePathMixin):
def __str__(self):
output = [str(report) for report in self]
return '\n'.join(output)
class ServerControl(object):
def __init__(self, can_skip_until=None, can_block_reload=None,
hold_back=None, part_hold_back=None,
can_skip_dateranges=None):
self.can_skip_until = can_skip_until
self.can_block_reload = can_block_reload
self.hold_back = hold_back
self.part_hold_back = part_hold_back
self.can_skip_dateranges = can_skip_dateranges
def __getitem__(self, item):
return getattr(self, item)
def dumps(self):
ctrl = []
if self.can_block_reload:
ctrl.append('CAN-BLOCK-RELOAD=%s' % self.can_block_reload)
for attr in ['hold_back', 'part_hold_back']:
if self[attr]:
ctrl.append('%s=%s' % (
denormalize_attribute(attr),
number_to_string(self[attr])
))
if self.can_skip_until:
ctrl.append('CAN-SKIP-UNTIL=%s' % number_to_string(
self.can_skip_until))
if self.can_skip_dateranges:
ctrl.append('CAN-SKIP-DATERANGES=%s' %
self.can_skip_dateranges)
return '#EXT-X-SERVER-CONTROL:' + ','.join(ctrl)
def __str__(self):
return self.dumps()
class Skip(object):
def __init__(self, skipped_segments, recently_removed_dateranges=None):
self.skipped_segments = skipped_segments
self.recently_removed_dateranges = recently_removed_dateranges
def dumps(self):
skip = []
skip.append('SKIPPED-SEGMENTS=%s' % number_to_string(
self.skipped_segments))
if self.recently_removed_dateranges is not None:
skip.append('RECENTLY-REMOVED-DATERANGES=%s' %
quoted(self.recently_removed_dateranges))
return '#EXT-X-SKIP:' + ','.join(skip)
def __str__(self):
return self.dumps()
class PartInformation(object):
def __init__(self, part_target=None):
self.part_target = part_target
def dumps(self):
return '#EXT-X-PART-INF:PART-TARGET=%s' % number_to_string(
self.part_target)
def __str__(self):
return self.dumps()
class PreloadHint(BasePathMixin):
def __init__(self, type, base_uri, uri, byterange_start=None, byterange_length=None):
self.hint_type = type
self.base_uri = base_uri
self.uri = uri
self.byterange_start = byterange_start
self.byterange_length = byterange_length
def __getitem__(self, item):
return getattr(self, item)
def dumps(self):
hint = []
hint.append('TYPE=' + self.hint_type)
hint.append('URI=' + quoted(self.uri))
for attr in ['byterange_start', 'byterange_length']:
if self[attr] is not None:
hint.append('%s=%s' % (
denormalize_attribute(attr),
number_to_string(self[attr])
))
return ('#EXT-X-PRELOAD-HINT:' + ','.join(hint))
def __str__(self):
return self.dumps()
class SessionData(object):
def __init__(self, data_id, value=None, uri=None, language=None):
self.data_id = data_id
self.value = value
self.uri = uri
self.language = language
def dumps(self):
session_data_out = ['DATA-ID=' + quoted(self.data_id)]
if self.value:
session_data_out.append('VALUE=' + quoted(self.value))
elif self.uri:
session_data_out.append('URI=' + quoted(self.uri))
if self.language:
session_data_out.append('LANGUAGE=' + quoted(self.language))
return '#EXT-X-SESSION-DATA:' + ','.join(session_data_out)
def __str__(self):
return self.dumps()
class DateRangeList(TagList):
pass
class DateRange(object):
def __init__(self, **kwargs):
self.id = kwargs['id']
self.start_date = kwargs.get('start_date')
self.class_ = kwargs.get('class')
self.end_date = kwargs.get('end_date')
self.duration = kwargs.get('duration')
self.planned_duration = kwargs.get('planned_duration')
self.scte35_cmd = kwargs.get('scte35_cmd')
self.scte35_out = kwargs.get('scte35_out')
self.scte35_in = kwargs.get('scte35_in')
self.end_on_next = kwargs.get('end_on_next')
self.x_client_attrs = [ (attr, kwargs.get(attr)) for attr in kwargs if attr.startswith('x_') ]
def dumps(self):
daterange = []
daterange.append('ID=' + quoted(self.id))
# whilst START-DATE is technically REQUIRED by the spec, this is
# contradicted by an example in the same document (see
# https://tools.ietf.org/html/rfc8216#section-8.10), and also by
# real-world implementations, so we make it optional here
if (self.start_date):
daterange.append('START-DATE=' + quoted(self.start_date))
if (self.class_):
daterange.append('CLASS=' + quoted(self.class_))
if (self.end_date):
daterange.append('END-DATE=' + quoted(self.end_date))
if (self.duration):
daterange.append('DURATION=' + number_to_string(self.duration))
if (self.planned_duration):
daterange.append('PLANNED-DURATION=' + number_to_string(self.planned_duration))
if (self.scte35_cmd):
daterange.append('SCTE35-CMD=' + self.scte35_cmd)
if (self.scte35_out):
daterange.append('SCTE35-OUT=' + self.scte35_out)
if (self.scte35_in):
daterange.append('SCTE35-IN=' + self.scte35_in)
if (self.end_on_next):
daterange.append('END-ON-NEXT=' + self.end_on_next)
# client attributes sorted alphabetically output order is predictable
for attr, value in sorted(self.x_client_attrs):
daterange.append('%s=%s' % (
denormalize_attribute(attr),
value
))
return '#EXT-X-DATERANGE:' + ','.join(daterange)
def __str__(self):
return self.dumps()
class ContentSteering(BasePathMixin):
def __init__(self, base_uri, server_uri, pathway_id = None):
self.base_uri = base_uri
self.uri = server_uri
self.pathway_id = pathway_id
def dumps(self):
steering = []
steering.append('SERVER-URI=' + quoted(self.uri))
if self.pathway_id is not None:
steering.append('PATHWAY-ID=' + quoted(self.pathway_id))
return '#EXT-X-CONTENT-STEERING:' + ','.join(steering)
def __str__(self):
return self.dumps()
def find_key(keydata, keylist):
if not keydata:
return None
for key in keylist:
if key:
# Check the intersection of keys and values
if keydata.get('uri', None) == key.uri and \
keydata.get('method', 'NONE') == key.method and \
keydata.get('iv', None) == key.iv:
return key
raise KeyError("No key found for key data")
def denormalize_attribute(attribute):
return attribute.replace('_', '-').upper()
def quoted(string):
return '"%s"' % string
def number_to_string(number):
with decimal.localcontext() as ctx:
ctx.prec = 20 # set floating point precision
d = decimal.Decimal(str(number))
return str(d.quantize(decimal.Decimal(1)) if d == d.to_integral_value() else d.normalize()) | /rlaphoenix.m3u8-3.4.0-py3-none-any.whl/m3u8/model.py | 0.595257 | 0.301966 | model.py | pypi |
"""A simple double-DQN agent trained to play BSuite's Catch env."""
import collections
import random
from absl import app
from absl import flags
from bsuite.environments import catch
import haiku as hk
from haiku import nets
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from rlax.examples import experiment
Params = collections.namedtuple("Params", "online target")
ActorState = collections.namedtuple("ActorState", "count")
ActorOutput = collections.namedtuple("ActorOutput", "actions q_values")
LearnerState = collections.namedtuple("LearnerState", "count opt_state")
Data = collections.namedtuple("Data", "obs_tm1 a_tm1 r_t discount_t obs_t")
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("train_episodes", 301, "Number of train episodes.")
flags.DEFINE_integer("batch_size", 32, "Size of the training batch")
flags.DEFINE_float("target_period", 50, "How often to update the target net.")
flags.DEFINE_integer("replay_capacity", 2000, "Capacity of the replay buffer.")
flags.DEFINE_integer("hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon_begin", 1., "Initial epsilon-greedy exploration.")
flags.DEFINE_float("epsilon_end", 0.01, "Final epsilon-greedy exploration.")
flags.DEFINE_integer("epsilon_steps", 1000, "Steps over which to anneal eps.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.005, "Optimizer learning rate.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("evaluate_every", 50,
"Number of episodes between evaluations.")
def build_network(num_actions: int) -> hk.Transformed:
"""Factory for a simple MLP network for approximating Q-values."""
def q(obs):
network = hk.Sequential(
[hk.Flatten(),
nets.MLP([FLAGS.hidden_units, num_actions])])
return network(obs)
return hk.without_apply_rng(hk.transform(q))
class ReplayBuffer(object):
"""A simple Python replay buffer."""
def __init__(self, capacity):
self._prev = None
self._action = None
self._latest = None
self.buffer = collections.deque(maxlen=capacity)
def push(self, env_output, action):
self._prev = self._latest
self._action = action
self._latest = env_output
if action is not None:
self.buffer.append(
(self._prev.observation, self._action, self._latest.reward,
self._latest.discount, self._latest.observation))
def sample(self, batch_size):
obs_tm1, a_tm1, r_t, discount_t, obs_t = zip(
*random.sample(self.buffer, batch_size))
return (np.stack(obs_tm1), np.asarray(a_tm1), np.asarray(r_t),
np.asarray(discount_t) * FLAGS.discount_factor, np.stack(obs_t))
def is_ready(self, batch_size):
return batch_size <= len(self.buffer)
class DQN:
"""A simple DQN agent."""
def __init__(self, observation_spec, action_spec, epsilon_cfg, target_period,
learning_rate):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._target_period = target_period
# Neural net and optimiser.
self._network = build_network(action_spec.num_values)
self._optimizer = optax.adam(learning_rate)
self._epsilon_by_frame = optax.polynomial_schedule(**epsilon_cfg)
# Jitting for speed.
self.actor_step = jax.jit(self.actor_step)
self.learner_step = jax.jit(self.learner_step)
def initial_params(self, key):
sample_input = self._observation_spec.generate_value()
sample_input = jnp.expand_dims(sample_input, 0)
online_params = self._network.init(key, sample_input)
return Params(online_params, online_params)
def initial_actor_state(self):
actor_count = jnp.zeros((), dtype=jnp.float32)
return ActorState(actor_count)
def initial_learner_state(self, params):
learner_count = jnp.zeros((), dtype=jnp.float32)
opt_state = self._optimizer.init(params.online)
return LearnerState(learner_count, opt_state)
def actor_step(self, params, env_output, actor_state, key, evaluation):
obs = jnp.expand_dims(env_output.observation, 0) # add dummy batch
q = self._network.apply(params.online, obs)[0] # remove dummy batch
epsilon = self._epsilon_by_frame(actor_state.count)
train_a = rlax.epsilon_greedy(epsilon).sample(key, q)
eval_a = rlax.greedy().sample(key, q)
a = jax.lax.select(evaluation, eval_a, train_a)
return ActorOutput(actions=a, q_values=q), ActorState(actor_state.count + 1)
def learner_step(self, params, data, learner_state, unused_key):
target_params = optax.periodic_update(params.online, params.target,
learner_state.count,
self._target_period)
dloss_dtheta = jax.grad(self._loss)(params.online, target_params, *data)
updates, opt_state = self._optimizer.update(dloss_dtheta,
learner_state.opt_state)
online_params = optax.apply_updates(params.online, updates)
return (Params(online_params, target_params),
LearnerState(learner_state.count + 1, opt_state))
def _loss(self, online_params, target_params, obs_tm1, a_tm1, r_t, discount_t,
obs_t):
q_tm1 = self._network.apply(online_params, obs_tm1)
q_t_val = self._network.apply(target_params, obs_t)
q_t_select = self._network.apply(online_params, obs_t)
batched_loss = jax.vmap(rlax.double_q_learning)
td_error = batched_loss(q_tm1, a_tm1, r_t, discount_t, q_t_val, q_t_select)
return jnp.mean(rlax.l2_loss(td_error))
def main(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
epsilon_cfg = dict(
init_value=FLAGS.epsilon_begin,
end_value=FLAGS.epsilon_end,
transition_steps=FLAGS.epsilon_steps,
power=1.)
agent = DQN(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
epsilon_cfg=epsilon_cfg,
target_period=FLAGS.target_period,
learning_rate=FLAGS.learning_rate,
)
accumulator = ReplayBuffer(FLAGS.replay_capacity)
experiment.run_loop(
agent=agent,
environment=env,
accumulator=accumulator,
seed=FLAGS.seed,
batch_size=FLAGS.batch_size,
train_episodes=FLAGS.train_episodes,
evaluate_every=FLAGS.evaluate_every,
eval_episodes=FLAGS.eval_episodes,
)
if __name__ == "__main__":
app.run(main) | /rlax-0.1.6-py3-none-any.whl/examples/simple_dqn.py | 0.863377 | 0.473779 | simple_dqn.py | pypi |
import collections
from absl import app
from absl import flags
from bsuite.environments import catch
from bsuite.utils import wrappers
import haiku as hk
from haiku import nets
import jax
import jax.numpy as jnp
import optax
import rlax
from rlax.examples import experiment
ActorOutput = collections.namedtuple("ActorOutput", "actions")
Transition = collections.namedtuple("Transition",
"obs_tm1 a_tm1 r_t discount_t obs_t")
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_float("reward_scale", 10000, "Reward scale on Catch.")
flags.DEFINE_integer("train_episodes", 1000, "Number of train episodes.")
flags.DEFINE_integer("num_hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon", 0.01, "Epsilon-greedy exploration probability.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.005, "Optimizer learning rate.")
flags.DEFINE_float("pop_art_step_size", 3e-3, "PopArt normalization step size.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("evaluate_every", 50,
"Number of episodes between evaluations.")
def build_network(num_hidden_units: int, num_actions: int) -> hk.Transformed:
"""Factory for a simple MLP network for approximating Q-values."""
def q(obs):
flatten = lambda x: jnp.reshape(x, (-1,))
network = hk.Sequential(
[flatten, nets.MLP([num_hidden_units, num_actions])])
return network(obs)
return hk.without_apply_rng(hk.transform(q))
class TransitionAccumulator:
"""Simple Python accumulator for transitions."""
def __init__(self):
self._prev = None
self._action = None
self._latest = None
def push(self, env_output, action):
self._prev = self._latest
self._action = action
self._latest = env_output
def sample(self, batch_size):
assert batch_size == 1
return Transition(self._prev.observation, self._action, self._latest.reward,
self._latest.discount, self._latest.observation)
def is_ready(self, batch_size):
assert batch_size == 1
return self._prev is not None
class PopArtAgent:
"""An online Q-learning deep RL agent with PopArt."""
def __init__(self, observation_spec, action_spec, num_hidden_units, epsilon,
learning_rate, pop_art_step_size):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._epsilon = epsilon
# Neural net and optimiser.
self._network = build_network(num_hidden_units, action_spec.num_values)
self._optimizer = optax.adam(learning_rate)
# Jitting for speed.
self.actor_step = jax.jit(self.actor_step)
self.learner_step = jax.jit(self.learner_step)
self._initial_pop_art_state, self._pop_art_update = rlax.popart(
num_outputs=1, step_size=pop_art_step_size, scale_lb=1e-5, scale_ub=1e5)
def initial_params(self, key):
sample_input = self._observation_spec.generate_value()
return self._network.init(key, sample_input)
def initial_actor_state(self):
return ()
def initial_learner_state(self, params):
return self._optimizer.init(params), self._initial_pop_art_state()
def actor_step(self, params, env_output, actor_state, key, evaluation):
norm_q = self._network.apply(params, env_output.observation)
# This is equivalent to epsilon-greedy on the (unnormalized) Q-values
# because normalization is linear, therefore the argmaxes are the same.
train_a = rlax.epsilon_greedy(self._epsilon).sample(key, norm_q)
eval_a = rlax.greedy().sample(key, norm_q)
a = jax.lax.select(evaluation, eval_a, train_a)
return ActorOutput(actions=a), actor_state
def learner_step(self, params, data, learner_state, unused_key):
opt_state, pop_art_state = learner_state
dloss_dtheta, pop_art_state = jax.grad(
self._loss, has_aux=True)(params, pop_art_state, *data)
updates, opt_state = self._optimizer.update(dloss_dtheta, opt_state)
params = optax.apply_updates(params, updates)
return params, (opt_state, pop_art_state)
def _loss(self, params, pop_art_state, obs_tm1, a_tm1, r_t, discount_t,
obs_t):
"""Loss function."""
indices = jnp.array(0) # Only one output for normalization.
# Calculate targets by unnormalizing Q-values output by network.
norm_q_t = self._network.apply(params, obs_t)
q_t = rlax.unnormalize(pop_art_state, norm_q_t, indices)
target_tm1 = r_t + discount_t * jnp.max(q_t)
# Update PopArt statistics and use them to update the network parameters to
# POP (preserve outputs precisely). If there were target networks, the
# parameters for these would also need to be updated.
final_linear_module_name = "mlp/~/linear_1"
mutable_params = hk.data_structures.to_mutable_dict(params)
linear_params = mutable_params[final_linear_module_name]
popped_linear_params, new_pop_art_state = self._pop_art_update(
params=linear_params, state=pop_art_state, targets=target_tm1,
indices=indices)
mutable_params[final_linear_module_name] = popped_linear_params
popped_params = hk.data_structures.to_immutable_dict(mutable_params)
# Normalize target with updated PopArt statistics.
norm_target_tm1 = rlax.normalize(new_pop_art_state, target_tm1, indices)
# Calculate parameter update with normalized target and popped parameters.
norm_q_t = self._network.apply(popped_params, obs_t)
norm_q_tm1 = self._network.apply(popped_params, obs_tm1)
td_error = jax.lax.stop_gradient(norm_target_tm1) - norm_q_tm1[a_tm1]
return rlax.l2_loss(td_error), new_pop_art_state
def main(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
env = wrappers.RewardScale(env, reward_scale=FLAGS.reward_scale)
agent = PopArtAgent(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
num_hidden_units=FLAGS.num_hidden_units,
epsilon=FLAGS.epsilon,
learning_rate=FLAGS.learning_rate,
pop_art_step_size=FLAGS.pop_art_step_size,
)
accumulator = TransitionAccumulator()
experiment.run_loop(
agent=agent,
environment=env,
accumulator=accumulator,
seed=FLAGS.seed,
batch_size=1,
train_episodes=FLAGS.train_episodes,
evaluate_every=FLAGS.evaluate_every,
eval_episodes=FLAGS.eval_episodes,
)
if __name__ == "__main__":
app.run(main) | /rlax-0.1.6-py3-none-any.whl/examples/pop_art.py | 0.822759 | 0.498901 | pop_art.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.