code stringlengths 17 6.64M |
|---|
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), (- 1))
|
class IgnoreInput(nn.Module):
def __init__(self, n_experts):
super().__init__()
self.weights = Parameter(torch.Tensor(n_experts))
def forward(self, x):
sft = F.softmax(self.weights, dim=0)
return torch.stack([sft for _ in range(x.shape[0])], dim=0)
|
class TaskonomyFeaturesOnlyNet(nn.Module):
def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512, num_tasks=1, extra_kwargs={}):
super(TaskonomyFeaturesOnlyNet, self).__init__()
self.n_frames = n_frames
self.use_target = use_target
self.use_map = (n_map_channels > 0)
print(n_map_channels, self.use_map)
self.map_channels = n_map_channels
self.output_size = output_size
if self.use_map:
self.map_tower = atari_conv((self.n_frames * self.map_channels))
if self.use_target:
self.target_channels = 3
else:
self.target_channels = 0
self.conv1 = nn.Conv2d((self.n_frames * ((8 * num_tasks) + self.target_channels)), 32, 4, stride=2)
self.flatten = Flatten()
self.fc1 = init_(nn.Linear((((32 * 7) * 7) * (self.use_map + 1)), 1024))
self.fc2 = init_(nn.Linear(1024, self.output_size))
self.groupnorm = nn.GroupNorm(8, 8, affine=False)
self.extra_kwargs = extra_kwargs
self.features_scaling = 1
if (('features_double' in extra_kwargs) and extra_kwargs['features_double']):
self.features_scaling = 2
self.normalize_taskonomy = (extra_kwargs['normalize_taskonomy'] if ('normalize_taskonomy' in extra_kwargs) else True)
if (not self.normalize_taskonomy):
warnings.warn('Not normalizing taskonomy features in TaskonomyFeaturesOnlyNet, are you sure?')
def forward(self, x, cache={}):
try:
x_taskonomy = (x['taskonomy'] * self.features_scaling)
except AttributeError:
x_taskonomy = x['taskonomy']
try:
if self.normalize_taskonomy:
x_taskonomy = self.groupnorm(x_taskonomy)
except AttributeError:
x_taskonomy = self.groupnorm(x_taskonomy)
if self.use_target:
x_taskonomy = torch.cat([x_taskonomy, x['target']], dim=1)
x_taskonomy = F.relu(self.conv1(x_taskonomy))
if self.use_map:
x_map = x['map']
x_map = self.map_tower(x_map)
x_taskonomy = torch.cat([x_map, x_taskonomy], dim=1)
x = self.flatten(x_taskonomy)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
|
class Expert():
def __init__(self, data_dir, compare_with_saved_trajs=False, follower=None):
self.data_dir = data_dir
self.compare_with_saved_trajs = compare_with_saved_trajs
self.traj_dir = None
self.action_idx = 0
self.same_as_il = True
self.follower = None
if (follower is not None):
checkpoint_obj = torch.load(follower)
start_epoch = checkpoint_obj['epoch']
print('Loaded imitator (epoch {}) from {}'.format(start_epoch, follower))
self.follower = checkpoint_obj['model']
self.follower.eval()
def reset(self, envs):
scene_id = envs.env.env.env._env.current_episode.scene_id
scene_id = scene_id.split('/')[(- 1)].split('.')[0]
episode_id = envs.env.env.env._env.current_episode.episode_id
self.traj_dir = self._find_traj_dir(scene_id, episode_id)
self.action_idx = 0
self.same_as_il = True
if (self.follower is not None):
self.follower.reset()
def _find_traj_dir(self, scene_id, episode_id):
for split in SPLITS:
for building in os.listdir(os.path.join(self.data_dir, split)):
if (building == scene_id):
for episode in os.listdir(os.path.join(self.data_dir, split, building)):
if (str(episode_id) in episode):
return os.path.join(self.data_dir, split, building, episode)
assert False, f'Could not find scene {scene_id}, episode {episode_id} in {self.data_dir}'
def _load_npz(self, fn):
path = os.path.join(self.traj_dir, fn)
return np.load(path)['arr_0']
def _cmp_with_il(self, k, observations, img=False, save_png=False, printer=True):
if (k not in observations):
if printer:
print(f'Key {k}: cannot find')
return 0
if img:
il_obj = Image.open(os.path.join(self.traj_dir, f'{k}_{self.action_idx:03d}.png'))
il_obj = rgb_t(il_obj).cuda()
else:
il_obj = torch.Tensor(self._load_npz(f'{k}_{self.action_idx:03d}.npz')).cuda()
num_channels = (observations[k].shape[1] // 4)
rl_obj = observations[k][0][(- num_channels):]
if save_png:
debug_dir = os.path.join('/mnt/data/debug/', str(self.action_idx))
os.makedirs(debug_dir, exist_ok=True)
show(il_obj).save(os.path.join(debug_dir, f'il_{k}.png'))
show(rl_obj).save(os.path.join(debug_dir, f'rl_{k}.png'))
diff = torch.sum((il_obj - rl_obj))
if printer:
print(f'Key {k}: {diff}')
return diff
def _debug(self, observations):
self._cmp_with_il('map', observations, save_png=self.same_as_il, printer=self.same_as_il)
self._cmp_with_il('target', observations, printer=self.same_as_il)
self._cmp_with_il('rgb_filled', observations, img=True, save_png=self.same_as_il, printer=self.same_as_il)
self._cmp_with_il('taskonomy', observations, printer=self.same_as_il)
def act(self, observations, states, mask_done, deterministic=True):
action = self._load_npz(f'action_{self.action_idx:03d}.npz')
if (len(action.shape) == 0):
action = int(action)
else:
action = np.argmax(action)
if self.compare_with_saved_trajs:
mapd = self._cmp_with_il('map', observations, printer=False)
taskonomyd = self._cmp_with_il('taskonomy', observations, printer=False)
targetd = self._cmp_with_il('target', observations, printer=False)
if ((abs(mapd) > 0.1) or (abs(taskonomyd) > 0.1) or (abs(targetd) > 1e-06)):
print(('-' * 50))
print(f'Running {self.traj_dir} on step {self.action_idx}. expert: {action}, targetd {targetd} mapdif {mapd}, taskdif {taskonomyd}')
self._debug(observations)
self.same_as_il = False
if (self.follower is not None):
(_, follower_action, _, _) = self.follower.act(observations, states, mask_done, True)
follower_action = follower_action.squeeze(1).cpu().numpy()[0]
if ((follower_action != action) and (action != 3)):
print(('-' * 50))
print(f'Running {self.traj_dir} on step {self.action_idx}. expert: {action}, follower: {follower_action}, mapdif {mapd}, taskdif {taskonomyd}')
self._debug(observations)
self.same_as_il = False
if (action == 3):
action = 2
self.action_idx += 1
return (0, torch.Tensor([action]).unsqueeze(1), 0, states)
def cuda(self, *inputs, **kwargs):
pass
def eval(self, *inputs, **kwargs):
pass
|
class ForwardModel(nn.Module):
def __init__(self, state_shape, action_shape, hidden_size):
super().__init__()
self.fc1 = init_(nn.Linear((state_shape + action_shape[1]), hidden_size))
self.fc2 = init_(nn.Linear(hidden_size, state_shape))
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
class InverseModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = init_(nn.Linear((input_size * 2), hidden_size))
self.fc2 = init_(nn.Linear(hidden_size, output_size))
def forward(self, phi_t, phi_t_plus_1):
x = torch.cat([phi_t, phi_t_plus_1], 1)
x = F.relu(self.fc1(x))
logits = self.fc2(x)
return logits
|
def action_to_one_hot(action: int) -> np.array:
one_hot = np.zeros(len(SimulatorActions), dtype=np.float32)
one_hot[action] = 1
return one_hot
|
class ShortestPathFollower():
'Utility class for extracting the action on the shortest path to the\n goal.\n Args:\n sim: HabitatSim instance.\n goal_radius: Distance between the agent and the goal for it to be\n considered successful.\n return_one_hot: If true, returns a one-hot encoding of the action\n (useful for training ML agents). If false, returns the\n SimulatorAction.\n '
def __init__(self, sim: HabitatSim, goal_radius: float, return_one_hot: bool=True):
assert (getattr(sim, 'geodesic_distance', None) is not None), '{} must have a method called geodesic_distance'.format(type(sim).__name__)
self._sim = sim
self._max_delta = (self._sim.config.FORWARD_STEP_SIZE - EPSILON)
self._goal_radius = goal_radius
self._step_size = self._sim.config.FORWARD_STEP_SIZE
self._mode = ('geodesic_path' if (getattr(sim, 'get_straight_shortest_path_points', None) is not None) else 'greedy')
self._return_one_hot = return_one_hot
def _get_return_value(self, action: SimulatorActions) -> Union[(SimulatorActions, np.array)]:
if self._return_one_hot:
return action_to_one_hot(action.value)
else:
return action
def get_next_action(self, goal_pos: np.array) -> Union[(SimulatorActions, np.array)]:
'Returns the next action along the shortest path.'
if (self._geo_dist(goal_pos) <= self._goal_radius):
return self._get_return_value(SimulatorActions.STOP)
max_grad_dir = self._est_max_grad_dir(goal_pos)
if (max_grad_dir is None):
return self._get_return_value(SimulatorActions.FORWARD)
return self._step_along_grad(max_grad_dir)
def _step_along_grad(self, grad_dir: np.quaternion) -> Union[(SimulatorActions, np.array)]:
current_state = self._sim.get_agent_state()
alpha = angle_between_quaternions(grad_dir, current_state.rotation)
if (alpha <= (np.deg2rad(self._sim.config.TURN_ANGLE) + EPSILON)):
return self._get_return_value(SimulatorActions.FORWARD)
else:
sim_action = SimulatorActions.LEFT.value
self._sim.step(sim_action)
best_turn = (SimulatorActions.LEFT if (angle_between_quaternions(grad_dir, self._sim.get_agent_state().rotation) < alpha) else SimulatorActions.RIGHT)
self._reset_agent_state(current_state)
return self._get_return_value(best_turn)
def _reset_agent_state(self, state: habitat_sim.AgentState) -> None:
self._sim.set_agent_state(state.position, state.rotation, reset_sensors=False)
def _geo_dist(self, goal_pos: np.array) -> float:
return self._sim.geodesic_distance(self._sim.get_agent_state().position, goal_pos)
def _est_max_grad_dir(self, goal_pos: np.array) -> np.array:
current_state = self._sim.get_agent_state()
current_pos = current_state.position
if (self.mode == 'geodesic_path'):
points = self._sim.get_straight_shortest_path_points(self._sim.get_agent_state().position, goal_pos)
if (len(points) < 2):
return None
max_grad_dir = quaternion_from_two_vectors(self._sim.forward_vector, ((points[1] - points[0]) + (EPSILON * np.cross(self._sim.up_vector, self._sim.forward_vector))))
max_grad_dir.x = 0
max_grad_dir = np.normalized(max_grad_dir)
else:
current_rotation = self._sim.get_agent_state().rotation
current_dist = self._geo_dist(goal_pos)
best_geodesic_delta = ((- 2) * self._max_delta)
best_rotation = current_rotation
for _ in range(0, 360, self._sim.config.TURN_ANGLE):
sim_action = SimulatorActions.FORWARD.value
self._sim.step(sim_action)
new_delta = (current_dist - self._geo_dist(goal_pos))
if (new_delta > best_geodesic_delta):
best_rotation = self._sim.get_agent_state().rotation
best_geodesic_delta = new_delta
if np.isclose(best_geodesic_delta, self._max_delta, rtol=(1 - np.cos(np.deg2rad(self._sim.config.TURN_ANGLE)))):
break
self._sim.set_agent_state(current_pos, self._sim.get_agent_state().rotation, reset_sensors=False)
sim_action = SimulatorActions.LEFT.value
self._sim.step(sim_action)
self._reset_agent_state(current_state)
max_grad_dir = best_rotation
return max_grad_dir
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, new_mode: str):
"Sets the mode for how the greedy follower determines the best next\n step.\n Args:\n new_mode: geodesic_path indicates using the simulator's shortest\n path algorithm to find points on the map to navigate between.\n greedy indicates trying to move forward at all possible\n orientations and selecting the one which reduces the geodesic\n distance the most.\n "
assert (new_mode in {'geodesic_path', 'greedy'})
if (new_mode == 'geodesic_path'):
assert (getattr(self._sim, 'get_straight_shortest_path_points', None) is not None)
self._mode = new_mode
|
class RLSidetuneWrapper(nn.Module):
def __init__(self, n_frames, blind=False, **kwargs):
super(RLSidetuneWrapper, self).__init__()
extra_kwargs = kwargs.pop('extra_kwargs')
assert ('main_perception_network' in extra_kwargs), 'For RLSidetuneWrapper, need to include main class'
assert ('sidetune_kwargs' in extra_kwargs), 'Cannot use sidetune network without kwargs'
self.main_perception = eval(extra_kwargs['main_perception_network'])(n_frames, **kwargs)
self.sidetuner = GenericSidetuneNetwork(**extra_kwargs['sidetune_kwargs'])
attrs_to_remember = (extra_kwargs['attrs_to_remember'] if ('attrs_to_remember' in extra_kwargs) else [])
self.sidetuner = MemoryFrameStacked(self.sidetuner, n_frames, attrs_to_remember=attrs_to_remember)
self.blind = blind
assert (not (self.blind and (count_trainable_parameters(self.sidetuner) > 5))), 'Cannot be blind and have many sidetuner parameters'
self.n_frames = n_frames
def forward(self, x, cache=None):
if (hasattr(self, 'blind') and self.blind):
sample = x[list(x.keys())[0]]
batch_size = sample.shape[0]
x_sidetune = torch.zeros((batch_size, (8 * self.n_frames), 16, 16)).to(sample.device)
else:
x_sidetune = self.sidetuner(x, cache)
contains_taskonomy = ('taskonomy' in x)
if contains_taskonomy:
taskonomy_copy = x['taskonomy']
x['taskonomy'] = x_sidetune
try:
ret = self.main_perception(x, cache)
except TypeError:
ret = self.main_perception(x)
if contains_taskonomy:
x['taskonomy'] = taskonomy_copy
else:
del x['taskonomy']
return ret
|
class RLSidetuneNetwork(nn.Module):
def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512, num_tasks=1, extra_kwargs={}):
super(RLSidetuneNetwork, self).__init__()
assert ('sidetune_kwargs' in extra_kwargs), 'Cannot use sidetune network without kwargs'
self.sidetuner = GenericSidetuneNetwork(**extra_kwargs['sidetune_kwargs'])
attrs_to_remember = (extra_kwargs['attrs_to_remember'] if ('attrs_to_remember' in extra_kwargs) else [])
self.sidetuner = MemoryFrameStacked(self.sidetuner, n_frames, attrs_to_remember=attrs_to_remember)
self.n_frames = n_frames
self.use_target = use_target
self.use_map = (n_map_channels > 0)
self.map_channels = n_map_channels
self.output_size = output_size
if self.use_map:
self.map_tower = atari_conv((self.n_frames * self.map_channels))
if self.use_target:
self.target_channels = 3
else:
self.target_channels = 0
self.conv1 = nn.Conv2d((self.n_frames * ((8 * num_tasks) + self.target_channels)), 32, 4, stride=2)
self.flatten = Flatten()
self.fc1 = init_(nn.Linear((((32 * 7) * 7) * (self.use_map + 1)), 1024))
self.fc2 = init_(nn.Linear(1024, self.output_size))
self.groupnorm = nn.GroupNorm(8, 8, affine=False)
def forward(self, x, cache):
x_sidetune = self.sidetuner(x, cache)
x_sidetune = self.groupnorm(x_sidetune)
if self.use_target:
x_sidetune = torch.cat([x_sidetune, x['target']], dim=1)
x_sidetune = F.relu(self.conv1(x_sidetune))
if self.use_map:
x_map = x['map']
x_map = self.map_tower(x_map)
x_sidetune = torch.cat([x_map, x_sidetune], dim=1)
x = self.flatten(x_sidetune)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
|
def getNChannels():
return N_CHANNELS
|
class BaseModelSRL(nn.Module):
'\n Base Class for a SRL network\n It implements a getState method to retrieve a state from observations\n '
def __init__(self):
super(BaseModelSRL, self).__init__()
def getStates(self, observations):
'\n :param observations: (th.Tensor)\n :return: (th.Tensor)\n '
return self.forward(observations)
def forward(self, x):
raise NotImplementedError
|
class BaseModelAutoEncoder(BaseModelSRL):
'\n Base Class for a SRL network (autoencoder family)\n It implements a getState method to retrieve a state from observations\n '
def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512):
super(BaseModelAutoEncoder, self).__init__()
self.output_size = output_size
self.n_frames = 4
self.n_frames = n_frames
self.output_size = output_size
self.n_map_channels = n_map_channels
self.use_target = use_target
self.use_map = (n_map_channels > 0)
if self.use_map:
self.map_tower = nn.Sequential(atari_conv((self.n_frames * self.n_map_channels)), nn.Conv2d(32, 64, kernel_size=4, stride=1), nn.ReLU(inplace=True))
if self.use_target:
self.target_channels = 3
else:
self.target_channels = 0
self.encoder_conv = nn.Sequential(nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), conv3x3(in_planes=64, out_planes=64, stride=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), conv3x3(in_planes=64, out_planes=64, stride=2), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
self.decoder_conv = nn.Sequential(nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, getNChannels(), kernel_size=4, stride=2))
self.encoder = FrameStacked(self.encoder_conv, self.n_frames)
self.conv1 = nn.Conv2d((self.n_frames * (64 + self.target_channels)), 64, 3, stride=1)
self.flatten = Flatten()
self.fc1 = init_(nn.Linear(((((64 * 4) * 4) * self.use_map) + (((64 * 4) * 4) * 1)), 1024))
self.fc2 = init_(nn.Linear(1024, self.output_size))
def getStates(self, observations):
'\n :param observations: (th.Tensor)\n :return: (th.Tensor)\n '
return self.encode(observations)
def encode(self, x):
'\n :param x: (th.Tensor)\n :return: (th.Tensor)\n '
self.encoder_conv(x)
def decode(self, x):
'\n :param x: (th.Tensor)\n :return: (th.Tensor)\n '
self.decoder_conv(x)
def forward(self, x):
'\n :param x: (th.Tensor)\n :return: (th.Tensor)\n '
x_taskonomy = x['taskonomy']
if self.use_target:
x_taskonomy = torch.cat([x_taskonomy, x['target']], dim=1)
x_taskonomy = F.relu(self.conv1(x_taskonomy))
if self.use_map:
x_map = x['map']
x_map = self.map_tower(x_map)
x_taskonomy = torch.cat([x_map, x_taskonomy], dim=1)
x = self.flatten(x_taskonomy)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
encoded = self.encode(x)
return encoded
|
def conv3x3(in_planes, out_planes, stride=1):
'"\n From PyTorch Resnet implementation\n 3x3 convolution with padding\n :param in_planes: (int)\n :param out_planes: (int)\n :param stride: (int)\n '
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
def srl_features_transform(task_path, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = transforms.rescale_centercrop_resize((3, 224, 224))
if (task_path != 'pixels_as_state'):
net = nn.Sequential(nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), conv3x3(in_planes=64, out_planes=64, stride=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), conv3x3(in_planes=64, out_planes=64, stride=2), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2)).cuda()
net.eval()
if (task_path != 'None'):
checkpoint = torch.load(task_path)
checkpoint = {k.replace('model.conv_layers.', '').replace('model.encoder_conv.', ''): v for (k, v) in checkpoint.items() if (('encoder_conv' in k) or ('conv_layers' in k))}
net.load_state_dict(checkpoint)
def encode(x):
if (task_path == 'pixels_as_state'):
return x
with torch.no_grad():
return net(x)
def _features_transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
def pipeline(x):
x = torch.Tensor(x).cuda()
x = encode(x)
return x.cpu()
if (task_path == 'pixels_as_state'):
raise NotImplementedError
return (pixels_as_state_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
else:
return (pipeline, spaces.Box((- 1), 1, (64, 6, 6), dtype))
return _features_transform_thunk
|
class TriangleModel(nn.Module):
def __init__(self, network_constructors, n_channels_lists, universal_kwargses=[{}]):
super().__init__()
self.chains = nn.ModuleList()
for (network_constructor, n_channels_list, universal_kwargs) in zip(network_constructors, n_channels_lists, universal_kwargses):
print(network_constructor)
chain = network_constructor(n_channels_list=n_channels_list, universal_kwargs=universal_kwargs)
self.chains.append(chain)
def initialize_from_checkpoints(self, checkpoint_paths, logger=None):
for (i, (chain, ckpt_fpath)) in enumerate(zip(self.chains, checkpoint_paths)):
if (logger is not None):
logger.info(f'Loading step {i} from {ckpt_fpath}')
checkpoint = torch.load(ckpt_fpath)
sd = {k.replace('module.', ''): v for (k, v) in checkpoint['state_dict'].items()}
chain.load_state_dict(sd)
return self
def forward(self, x):
chain_outputs = []
for chain in self.chains:
outputs = chain(x)
chain_outputs.append(outputs)
return chain_outputs
|
class UNet_up_block(nn.Module):
def __init__(self, prev_channel, input_channel, output_channel, up_sample=True):
super().__init__()
self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv1 = nn.Conv2d((prev_channel + input_channel), output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.relu = torch.nn.ReLU()
self.up_sample = up_sample
def forward(self, prev_feature_map, x):
if self.up_sample:
x = self.up_sampling(x)
x = torch.cat((x, prev_feature_map), dim=1)
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
return x
|
class UNet_down_block(nn.Module):
def __init__(self, input_channel, output_channel, down_size=True):
super().__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.max_pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU()
self.down_size = down_size
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
if self.down_size:
x = self.max_pool(x)
return x
|
class UNet(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
x = x.clamp(min=(- 1.0), max=1.0)
return x
def loss(self, pred, target):
loss = torch.tensor(0.0, device=pred.device)
return (loss, (loss.detach(),))
|
class UNetHeteroscedasticFull(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-05):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, (((out_channels + 1) * out_channels) / 2), 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
mu = mu.clamp(min=(- 1.0), max=1.0)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return (mu, scale)
|
class UNetHeteroscedasticIndep(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-05):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
mu = mu.clamp(min=(- 1.0), max=1.0)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return (mu, scale)
|
class UNetHeteroscedasticPooled(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-05, use_clamp=False):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, 1, 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
self.use_clamp = use_clamp
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
if self.use_clamp:
mu = mu.clamp(min=(- 1.0), max=1.0)
else:
mu = F.tanh(mu)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return (mu, scale)
|
class UNetReshade(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.relu(self.last_conv2(x))
x = x.clamp(max=1, min=0).mean(dim=1, keepdim=True)
x = x.expand((- 1), 3, (- 1), (- 1))
return x
def loss(self, pred, target):
loss = torch.tensor(0.0, device=pred.device)
return (loss, (loss.detach(),))
|
class ConvBlock(nn.Module):
def __init__(self, f1, f2, kernel_size=3, padding=1, use_groupnorm=True, groups=8, dilation=1, transpose=False):
super().__init__()
self.transpose = transpose
self.conv = nn.Conv2d(f1, f2, (kernel_size, kernel_size), dilation=dilation, padding=(padding * dilation))
if self.transpose:
self.convt = nn.ConvTranspose2d(f1, f1, (3, 3), dilation=dilation, stride=2, padding=dilation, output_padding=1)
if use_groupnorm:
self.bn = nn.GroupNorm(groups, f1)
else:
self.bn = nn.BatchNorm2d(f1)
def forward(self, x):
x = self.bn(x)
if self.transpose:
x = F.relu(self.convt(x))
x = F.relu(self.conv(x))
return x
|
def load_from_file(net, checkpoint_path):
checkpoint = torch.load(checkpoint_path)
sd = {k.replace('module.', ''): v for (k, v) in checkpoint['state_dict'].items()}
net.load_state_dict(sd)
for p in net.parameters():
p.requires_grad = False
return net
|
def blind(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _thunk(obs_space):
pipeline = (lambda x: torch.zeros(output_size))
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def pixels_as_state(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
base_pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh)])
grayscale_pipeline = vision.transforms.Compose([vision.transforms.Grayscale(), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
rgb_pipeline = vision.transforms.Compose([vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def pipeline(x):
base = base_pipeline(x)
rgb = rgb_pipeline(base)
gray = grayscale_pipeline(base)
n_rgb = (output_size[0] // 3)
n_gray = (output_size[0] % 3)
return torch.cat((([rgb] * n_rgb) + ([gray] * n_gray)))
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
class GaussianSmoothing(nn.Module):
'\n Apply gaussian smoothing on a\n 1d, 2d or 3d tensor. Filtering is performed seperately for each channel\n in the input using a depthwise convolution.\n Arguments:\n channels (int, sequence): Number of channels of the input tensors. Output will\n have this number of channels as well.\n kernel_size (int, sequence): Size of the gaussian kernel.\n sigma (float, sequence): Standard deviation of the gaussian kernel.\n dim (int, optional): The number of dimensions of the data.\n Default value is 2 (spatial).\n '
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = ([kernel_size] * dim)
if isinstance(sigma, numbers.Number):
sigma = ([sigma] * dim)
self.kernel_size = kernel_size
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids):
mean = ((size - 1) / 2)
kernel *= ((1 / (std * math.sqrt((2 * math.pi)))) * torch.exp(((- (((mgrid - mean) / std) ** 2)) / 2)))
kernel = (kernel / torch.sum(kernel))
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *([1] * (kernel.dim() - 1)))
self.register_buffer('weight', kernel)
self.groups = channels
if (dim == 1):
self.conv = F.conv1d
elif (dim == 2):
self.conv = F.conv2d
elif (dim == 3):
self.conv = F.conv3d
else:
raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim))
def forward(self, input):
'\n Apply gaussian filter to input.\n Arguments:\n input (torch.Tensor): Input to apply gaussian filter on.\n Returns:\n filtered (torch.Tensor): Filtered output.\n '
input_was_3 = (len(input) == 3)
if input_was_3:
input = input.unsqueeze(0)
input = F.pad(input, ([(self.kernel_size[0] // 2)] * 4), mode='reflect')
res = self.conv(input, weight=self.weight, groups=self.groups)
return (res.squeeze(0) if input_was_3 else res)
|
class GaussianSmoothing(nn.Module):
'\n Apply gaussian smoothing on a\n 1d, 2d or 3d tensor. Filtering is performed seperately for each channel\n in the input using a depthwise convolution.\n Arguments:\n channels (int, sequence): Number of channels of the input tensors. Output will\n have this number of channels as well.\n kernel_size (int, sequence): Size of the gaussian kernel.\n sigma (float, sequence): Standard deviation of the gaussian kernel.\n dim (int, optional): The number of dimensions of the data.\n Default value is 2 (spatial).\n '
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = ([kernel_size] * dim)
self.kernel_size = kernel_size[0]
self.dim = dim
if isinstance(sigma, numbers.Number):
sigma = ([sigma] * dim)
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids):
mean = ((size - 1) / 2)
kernel *= ((1 / (std * math.sqrt((2 * math.pi)))) * torch.exp(((- (((mgrid - mean) / std) ** 2)) / 2)))
kernel = (kernel / torch.sum(kernel))
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *([1] * (kernel.dim() - 1)))
self.register_buffer('weight', kernel)
self.groups = channels
if (dim == 1):
self.conv = F.conv1d
elif (dim == 2):
self.conv = F.conv2d
elif (dim == 3):
self.conv = F.conv3d
else:
raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim))
def forward(self, input):
'\n Apply gaussian filter to input.\n Arguments:\n input (torch.Tensor): Input to apply gaussian filter on.\n Returns:\n filtered (torch.Tensor): Filtered output.\n '
input = F.pad(input, (([(self.kernel_size // 2)] * 2) * self.dim), mode='reflect')
return self.conv(input, weight=self.weight, groups=self.groups)
|
class TransformFactory(object):
@staticmethod
def independent(names_to_transforms, multithread=False, keep_unnamed=True):
def processing_fn(obs_space):
' Obs_space is expected to be a 1-layer deep spaces.Dict '
transforms = {}
sensor_space = {}
transform_names = set(names_to_transforms.keys())
obs_space_names = set(obs_space.spaces.keys())
assert transform_names.issubset(obs_space_names), 'Trying to transform observations that are not present ({})'.format((transform_names - obs_space_names))
for name in obs_space_names:
if (name in names_to_transforms):
transform = names_to_transforms[name]
(transforms[name], sensor_space[name]) = transform(obs_space.spaces[name])
elif keep_unnamed:
sensor_space[name] = obs_space.spaces[name]
else:
print(f'Did not transform {name}, removing from obs')
def _independent_tranform_thunk(obs):
results = {}
if multithread:
pool = mp.pool(min(mp.cpu_count(), len(sensor_shapes)))
pool.map()
else:
for (name, transform) in transforms.items():
try:
results[name] = transform(obs[name])
except Exception as e:
print(f'Problem applying preproces transform to {name}.', e)
raise e
for (name, val) in obs.items():
if ((name not in results) and keep_unnamed):
results[name] = val
return SensorPack(results)
return (_independent_tranform_thunk, spaces.Dict(sensor_space))
return processing_fn
@staticmethod
def splitting(names_to_transforms, multithread=False, keep_unnamed=True):
def processing_fn(obs_space):
' Obs_space is expected to be a 1-layer deep spaces.Dict '
old_name_to_new_name_to_transform = defaultdict(dict)
sensor_space = {}
transform_names = set(names_to_transforms.keys())
obs_space_names = set(obs_space.spaces.keys())
assert transform_names.issubset(obs_space_names), 'Trying to transform observations that are not present ({})'.format((transform_names - obs_space_names))
for old_name in obs_space_names:
if (old_name in names_to_transforms):
assert hasattr(names_to_transforms, 'items'), 'each sensor must map to a dict of transfors'
for (new_name, transform_maker) in names_to_transforms[old_name].items():
(transform, sensor_space[new_name]) = transform_maker(obs_space.spaces[old_name])
old_name_to_new_name_to_transform[old_name][new_name] = transform
elif keep_unnamed:
sensor_space[old_name] = obs_space.spaces[old_name]
def _transform_thunk(obs):
results = {}
transforms_to_run = []
for (old_name, new_names_to_transform) in old_name_to_new_name_to_transform.items():
for (new_name, transform) in new_names_to_transform.items():
transforms_to_run.append((old_name, new_name, transform))
if multithread:
pool = mp.Pool(min(multiprocessing.cpu_count(), len(transforms_to_run)))
res = pool.map((lambda t_o: t_o[0](t_o[1])), zip([t for (_, _, t) in transforms_to_run], [obs[old_name] for (old_name, _, _) in transforms_to_run]))
for (transformed, (old_name, new_name, _)) in zip(res, transforms_to_run):
results[new_name] = transformed
else:
for (old_name, new_names_to_transform) in old_name_to_new_name_to_transform.items():
for (new_name, transform) in new_names_to_transform.items():
results[new_name] = transform(obs[old_name])
if keep_unnamed:
for (name, val) in obs.items():
if ((name not in results) and (name not in old_name_to_new_name_to_transform)):
results[name] = val
return SensorPack(results)
return (_transform_thunk, spaces.Dict(sensor_space))
return processing_fn
|
class Pipeline(object):
def __init__(self, env_or_pipeline):
pass
def forward(self):
pass
|
def identity_transform():
def _thunk(obs_space):
return ((lambda x: x), obs_space)
return _thunk
|
def fill_like(output_size, fill_value=0.0, dtype=torch.float32):
def _thunk(obs_space):
tensor = torch.ones((1,), dtype=dtype)
def _process(x):
return tensor.new_full(output_size, fill_value).numpy()
return (_process, spaces.Box((- 1), 1, output_size, tensor.numpy().dtype))
return _thunk
|
def rescale_centercrop_resize(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n\n obs_space: Should be form WxHxC\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def rescale_centercrop_resize_collated(output_size, dtype=np.float32):
" rescale_centercrop_resize\n\n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n\n obs_space: Should be form WxHxC\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def iterative_pipeline(pipeline):
def runner(x):
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return runner
return (iterative_pipeline(pipeline), spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def rescale():
" Rescales observations to a new values\n\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_thunk(obs_space):
obs_shape = obs_space.shape
np_pipeline = vision.transforms.Compose([vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def pipeline(im):
if isinstance(im, np.ndarray):
return np_pipeline(im)
else:
return RESCALE_0_255_NEG1_POS1(im)
return (pipeline, spaces.Box((- 1.0), 1.0, obs_space.shape, np.float32))
return _rescale_thunk
|
def grayscale_rescale():
" Rescales observations to a new values\n\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _grayscale_rescale_thunk(obs_space):
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Grayscale(), vision.transforms.ToTensor(), vision.transforms.Normalize([0.5], [0.5])])
obs_shape = obs_space.shape
return (pipeline, spaces.Box((- 1.0), 1.0, (1, obs_shape[0], obs_shape[1]), dtype=np.float32))
return _grayscale_rescale_thunk
|
def cross_modal_transform(eval_to_get_net, output_shape=(3, 84, 84), dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
output_size = output_shape[(- 1)]
output_shape = output_shape
net = eval_to_get_net
resize_fn = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Resize(output_size), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def encode(x):
with torch.no_grad():
return net(x)
def _transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
x = rescale(x).view(1, 3, 256, 256)
x = torch.Tensor(x).cuda()
x = encode(x)
y = ((x + 1.0) / 2)
z = resize_fn(y[0].cpu())
return z
return (pipeline, spaces.Box((- 1), 1, output_shape, dtype))
return _transform_thunk
|
def cross_modal_transform_collated(eval_to_get_net, output_shape=(3, 84, 84), dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
output_size = output_shape[(- 1)]
output_shape = output_shape
net = eval_to_get_net
resize_fn = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Resize(output_size), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def encode(x):
with torch.no_grad():
return net(x)
def _transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
x = (torch.FloatTensor(x).cuda().permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
y = ((x + 1.0) / 2)
z = torch.stack([resize_fn(y_.cpu()) for y_ in y])
return z
return (pipeline, spaces.Box((- 1), 1, output_shape, dtype))
return _transform_thunk
|
def pixels_as_state(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
base_pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh)])
grayscale_pipeline = vision.transforms.Compose([vision.transforms.Grayscale(), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
rgb_pipeline = vision.transforms.Compose([vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def pipeline(x):
base = base_pipeline(x)
rgb = rgb_pipeline(base)
gray = grayscale_pipeline(base)
n_rgb = (output_size[0] // 3)
n_gray = (output_size[0] % 3)
return torch.cat((([rgb] * n_rgb) + ([gray] * n_gray)))
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def taskonomy_features_transform_collated(task_path, encoder_type='taskonomy', dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
_pixels_as_state_thunk = pixels_as_state((8, 16, 16))
if ((task_path != 'pixels_as_state') and (task_path != 'blind')):
if (encoder_type == 'taskonomy'):
net = TaskonomyEncoder(normalize_outputs=False)
if (task_path != 'None'):
checkpoint = torch.load(task_path)
if any([isinstance(v, nn.Module) for v in checkpoint.values()]):
net = [v for v in checkpoint.values() if isinstance(v, nn.Module)][0]
elif ('state_dict' in checkpoint.keys()):
net.load_state_dict(checkpoint['state_dict'])
else:
assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
net = net.cuda()
net.eval()
def encode(x):
if ((task_path == 'pixels_as_state') or (task_path == 'blind')):
return x
with torch.no_grad():
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
(pixels_as_state, _) = _pixels_as_state_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
return x
def pixels_as_state_pipeline(x):
return pixels_as_state(x).cpu()
def blind_pipeline(x):
batch_size = x.shape[0]
return torch.zeros((batch_size, 8, 16, 16))
if (task_path == 'blind'):
return (blind_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
elif (task_path == 'pixels_as_state'):
return (pixels_as_state_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
else:
return (pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def taskonomy_features_transforms_collated(task_paths, encoder_type='taskonomy', dtype=np.float32):
num_tasks = 0
if ((task_paths != 'pixels_as_state') and (task_paths != 'blind')):
task_path_list = [tp.strip() for tp in task_paths.split(',')]
num_tasks = len(task_path_list)
assert (num_tasks > 0), 'at least need one path'
if (encoder_type == 'taskonomy'):
nets = [TaskonomyEncoder(normalize_outputs=False) for _ in range(num_tasks)]
else:
assert False, f'do not recongize encoder type {encoder_type}'
for (i, task_path) in enumerate(task_path_list):
checkpoint = torch.load(task_path)
net_in_ckpt = [v for v in checkpoint.values() if isinstance(v, nn.Module)]
if (len(net_in_ckpt) > 0):
nets[i] = net_in_ckpt[0]
elif ('state_dict' in checkpoint.keys()):
nets[i].load_state_dict(checkpoint['state_dict'])
else:
assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
nets[i] = nets[i].cuda()
nets[i].eval()
def encode(x):
if ((task_paths == 'pixels_as_state') or (task_paths == 'blind')):
return x
with torch.no_grad():
feats = []
for net in nets:
feats.append(net(x))
return torch.cat(feats, dim=1)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
return x
def pixels_as_state_pipeline(x):
return pixels_as_state(x).cpu()
if (task_path == 'pixels_as_state'):
return (pixels_as_state_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
else:
return (pipeline, spaces.Box((- 1), 1, ((8 * num_tasks), 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def image_to_input_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
assert (x.shape[2] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def map_pool_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
assert (x.shape[2] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2, 3))
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def map_pool(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
assert (x.shape[0] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x.unsqueeze_(0)
x = (x.permute(0, 3, 1, 2) / 255.0)
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2, 3))
x = ((2.0 * x) - 1.0)
x.squeeze_(0)
return x.cpu()
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
class Pipeline(object):
def __init__(self, env_or_pipeline):
pass
def forward(self):
pass
|
def identity_transform():
def _thunk(obs_space):
return ((lambda x: x), obs_space)
return _thunk
|
def fill_like(output_size, fill_value=0.0, dtype=torch.float32):
def _thunk(obs_space):
tensor = torch.ones((1,), dtype=dtype)
def _process(x):
return tensor.new_full(output_size, fill_value).numpy()
return (_process, spaces.Box((- 1), 1, output_size, tensor.numpy().dtype))
return _thunk
|
def rescale_centercrop_resize(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n\n obs_space: Should be form WxHxC\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
assert (obs_min_wh > 10), 'are you sure your data format is correct? is your min wh really < 10?'
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def rescale_centercrop_resize_collated(output_size, dtype=np.float32):
" rescale_centercrop_resize\n\n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n\n obs_space: Should be form WxHxC\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def iterative_pipeline(pipeline):
def runner(x):
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return runner
return (iterative_pipeline(pipeline), spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def rescale():
" Rescales observations to a new values\n\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_thunk(obs_space):
obs_shape = obs_space.shape
np_pipeline = vision.transforms.Compose([vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def pipeline(im):
if isinstance(im, np.ndarray):
return np_pipeline(im)
else:
return RESCALE_0_255_NEG1_POS1(im)
return (pipeline, spaces.Box((- 1.0), 1.0, obs_space.shape, np.float32))
return _rescale_thunk
|
def grayscale_rescale():
" Rescales observations to a new values\n\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _grayscale_rescale_thunk(obs_space):
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Grayscale(), vision.transforms.ToTensor(), vision.transforms.Normalize([0.5], [0.5])])
obs_shape = obs_space.shape
return (pipeline, spaces.Box((- 1.0), 1.0, (1, obs_shape[0], obs_shape[1]), dtype=np.float32))
return _grayscale_rescale_thunk
|
def cross_modal_transform(eval_to_get_net, output_shape=(3, 84, 84), dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
output_size = output_shape[(- 1)]
output_shape = output_shape
net = eval_to_get_net
resize_fn = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Resize(output_size), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def encode(x):
with torch.no_grad():
return net(x)
def _transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = encode(x)
y = ((x + 1.0) / 2)
z = torch.stack([resize_fn(y_.cpu()) for y_ in y])
return z
return (pipeline, spaces.Box((- 1), 1, output_shape, dtype))
return _transform_thunk
|
def image_to_input_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
assert (x.shape[2] == x.shape[1]), 'Input image must be square, of the form: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def map_pool(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
assert (x.shape[0] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x.unsqueeze_(0)
x = (x.permute(0, 3, 1, 2) / 255.0)
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2, 3))
x = ((2.0 * x) - 1.0)
x.squeeze_(0)
return x.cpu()
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def map_pool_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
assert (x.shape[2] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
with torch.no_grad():
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2, 3))
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def taskonomy_features_transform(task_path, model='TaskonomyEncoder', dtype=np.float32, device=None, normalize_outputs=False):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
net = eval(model)(normalize_outputs=normalize_outputs, eval_only=True).cuda(device=device)
net.eval()
checkpoint = torch.load(task_path)
net.load_state_dict(checkpoint['state_dict'])
print(f'Loaded taskonomy transform with {model} from {task_path}')
def encode(x):
with torch.no_grad():
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
x = torch.Tensor(x).cuda(device=device)
x = encode(x)
return x
return (pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def _load_encoder(encoder_path):
if (('student' in encoder_path) or ('distil' in encoder_path)):
net = FCN5(normalize_outputs=True, eval_only=True, train=False)
else:
net = TaskonomyEncoder()
net.eval()
checkpoint = torch.load(encoder_path)
state_dict = checkpoint['state_dict']
try:
net.load_state_dict(state_dict, strict=True)
except RuntimeError as e:
incompatible = net.load_state_dict(state_dict, strict=False)
if (incompatible is None):
warnings.warn('load_state_dict not showing missing/unexpected keys!')
else:
print(f'''{e}, reloaded with strict=False
Num matches: {len([k for k in net.state_dict() if (k in state_dict)])}
Num missing: {len(incompatible.missing_keys)}
Num unexpected: {len(incompatible.unexpected_keys)}''')
for p in net.parameters():
p.requires_grad = False
return net
|
def _load_encoders_seq(encoder_paths):
experts = []
for encoder_path in encoder_paths:
try:
encoder = _load_encoder(encoder_path)
experts.append(encoder)
except RuntimeError as e:
warnings.warn(f'Unable to load {encoder_path} due to {e}')
raise e
experts = [e.cuda() for e in experts]
return experts
|
def _load_encoders_parallel(encoder_paths, n_processes=None):
n_processes = (len(encoder_paths) if (n_processes is None) else min(len(encoder_paths), n_processes))
n_parallel = min(multiprocessing.cpu_count(), n_processes)
pool = multiprocessing.Pool(min(n_parallel, n_processes))
experts = pool.map(_load_encoder, encoder_paths)
pool.close()
pool.join()
experts = [e.cuda() for e in experts]
return experts
|
def taskonomy_multi_features_transform(task_paths, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
nets = _load_encoders_seq(task_paths)
def encode(x):
with torch.no_grad():
return torch.cat([net(x) for net in nets], dim=1)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
x = torch.Tensor(x).cuda()
x = encode(x)
return x.cpu()
return (pipeline, spaces.Box((- 1), 1, ((8 * len(nets)), 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def taskonomy_features_transform_collated(task_path, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
net = TaskonomyEncoder().cuda()
net.eval()
checkpoint = torch.load(task_path)
net.load_state_dict(checkpoint['state_dict'])
def encode(x):
with torch.no_grad():
x = torch.Tensor(x).cuda()
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
pipeline = (lambda x: encode(x).cpu())
return (pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def taskonomy_features_transforms_collated(task_paths, encoder_type='taskonomy', dtype=np.float32):
num_tasks = 0
if ((task_paths != 'pixels_as_state') and (task_paths != 'blind')):
task_path_list = [tp.strip() for tp in task_paths.split(',')]
num_tasks = len(task_path_list)
assert (num_tasks > 0), 'at least need one path'
if (encoder_type == 'taskonomy'):
nets = [TaskonomyEncoder(normalize_outputs=False) for _ in range(num_tasks)]
else:
assert False, f'do not recongize encoder type {encoder_type}'
for (i, task_path) in enumerate(task_path_list):
checkpoint = torch.load(task_path)
net_in_ckpt = [v for v in checkpoint.values() if isinstance(v, nn.Module)]
if (len(net_in_ckpt) > 0):
nets[i] = net_in_ckpt[0]
elif ('state_dict' in checkpoint.keys()):
nets[i].load_state_dict(checkpoint['state_dict'])
else:
assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
nets[i] = nets[i].cuda()
nets[i].eval()
def encode(x):
if ((task_paths == 'pixels_as_state') or (task_paths == 'blind')):
return x
with torch.no_grad():
feats = []
for net in nets:
feats.append(net(x))
return torch.cat(feats, dim=1)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
return x
def pixels_as_state_pipeline(x):
return pixels_as_state(x).cpu()
if (task_path == 'pixels_as_state'):
return (pixels_as_state_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
else:
return (pipeline, spaces.Box((- 1), 1, ((8 * num_tasks), 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
class A2C_ACKTR(object):
def __init__(self, actor_critic, value_loss_coef, entropy_coef, lr=None, eps=None, alpha=None, max_grad_norm=None, acktr=False):
self.actor_critic = actor_critic
self.acktr = acktr
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
if acktr:
self.optimizer = KFACOptimizer(actor_critic)
else:
self.optimizer = optim.RMSprop(actor_critic.parameters(), lr, eps=eps, alpha=alpha)
def update(self, rollouts):
obs_shape = rollouts.observations.size()[2:]
action_shape = rollouts.actions.size()[(- 1)]
(num_steps, num_processes, _) = rollouts.rewards.size()
(values, action_log_probs, dist_entropy, states) = self.actor_critic.evaluate_actions(rollouts.observations[:(- 1)].view((- 1), *obs_shape), rollouts.states[0].view((- 1), self.actor_critic.state_size), rollouts.masks[:(- 1)].view((- 1), 1), rollouts.actions.view((- 1), action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = (rollouts.returns[:(- 1)] - values)
value_loss = advantages.pow(2).mean()
action_loss = (- (advantages.detach() * action_log_probs).mean())
if (self.acktr and ((self.optimizer.steps % self.optimizer.Ts) == 0)):
self.actor_critic.zero_grad()
pg_fisher_loss = (- action_log_probs.mean())
value_noise = torch.randn(values.size())
if values.is_cuda:
value_noise = value_noise.cuda()
sample_values = (values + value_noise)
vf_fisher_loss = (- (values - sample_values.detach()).pow(2).mean())
fisher_loss = (pg_fisher_loss + vf_fisher_loss)
self.optimizer.acc_stats = True
fisher_loss.backward(retain_graph=True)
self.optimizer.acc_stats = False
self.optimizer.zero_grad()
(((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward()
if (self.acktr == False):
nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
return (value_loss.item(), action_loss.item(), dist_entropy.item())
|
class QLearner(nn.Module):
def __init__(self, actor_network, target_network, action_dim, batch_size, lr, eps, gamma, copy_frequency, start_schedule, schedule_timesteps, initial_p, final_p):
super(QLearner, self).__init__()
self.actor_network = actor_network
self.target_network = target_network
self.learning_schedule = LearningSchedule(start_schedule, schedule_timesteps, initial_p, final_p)
self.beta_schedule = LearningSchedule(start_schedule, schedule_timesteps, 0.4, 1.0)
self.action_dim = action_dim
self.copy_frequency = copy_frequency
self.batch_size = batch_size
self.gamma = gamma
self.optimizer = optim.Adam(actor_network.parameters(), lr=lr, eps=eps)
self.step = 0
def cuda(self):
self.actor_network = self.actor_network.cuda()
self.target_network = self.target_network.cuda()
def act(self, observation, greedy=False):
self.step += 1
if ((self.step % self.copy_frequency) == 1):
self.target_network.load_state_dict(self.actor_network.state_dict())
if ((random.random() > self.learning_schedule.value(self.step)) or greedy):
with torch.no_grad():
return self.actor_network(observation).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(self.action_dim)]])
def update(self, rollouts):
loss_epoch = 0
(observations, actions, rewards, masks, next_observations, weights, indices) = rollouts.sample(self.batch_size, beta=self.beta_schedule.value(self.step))
next_state_values = self.target_network(next_observations).detach().max(1)[0].unsqueeze(1)
state_action_values = self.actor_network(observations).gather(1, actions)
targets = (rewards + ((self.gamma * masks) * next_state_values))
if rollouts.use_priority:
with torch.no_grad():
td_errors = (torch.abs((targets - state_action_values)).detach() + 1e-06)
rollouts.update_priorities(indices, td_errors)
loss = torch.sum((weights * ((targets - state_action_values) ** 2)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_epoch += loss.item()
return loss_epoch
def get_epsilon(self):
return self.learning_schedule.value(self.step)
|
class LearningSchedule(object):
def __init__(self, start_schedule, schedule_timesteps, initial_p=1.0, final_p=0.05):
self.initial_p = initial_p
self.final_p = final_p
self.schedule_timesteps = schedule_timesteps
self.start_schedule = start_schedule
def value(self, t):
fraction = min((max(0.0, float((t - self.start_schedule))) / self.schedule_timesteps), 1.0)
return (self.initial_p + (fraction * (self.final_p - self.initial_p)))
|
def _extract_patches(x, kernel_size, stride, padding):
if ((padding[0] + padding[1]) > 0):
x = F.pad(x, (padding[1], padding[1], padding[0], padding[0])).data
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(x.size(0), x.size(1), x.size(2), ((x.size(3) * x.size(4)) * x.size(5)))
return x
|
def compute_cov_a(a, classname, layer_info, fast_cnn):
batch_size = a.size(0)
if (classname == 'Conv2d'):
if fast_cnn:
a = _extract_patches(a, *layer_info)
a = a.view(a.size(0), (- 1), a.size((- 1)))
a = a.mean(1)
else:
a = _extract_patches(a, *layer_info)
a = a.view((- 1), a.size((- 1))).div_(a.size(1)).div_(a.size(2))
elif (classname == 'AddBias'):
is_cuda = a.is_cuda
a = torch.ones(a.size(0), 1)
if is_cuda:
a = a.cuda()
return (a.t() @ (a / batch_size))
|
def compute_cov_g(g, classname, layer_info, fast_cnn):
batch_size = g.size(0)
if (classname == 'Conv2d'):
if fast_cnn:
g = g.view(g.size(0), g.size(1), (- 1))
g = g.sum((- 1))
else:
g = g.transpose(1, 2).transpose(2, 3).contiguous()
g = g.view((- 1), g.size((- 1))).mul_(g.size(1)).mul_(g.size(2))
elif (classname == 'AddBias'):
g = g.view(g.size(0), g.size(1), (- 1))
g = g.sum((- 1))
g_ = (g * batch_size)
return (g_.t() @ (g_ / g.size(0)))
|
def update_running_stat(aa, m_aa, momentum):
m_aa *= (momentum / (1 - momentum))
m_aa += aa
m_aa *= (1 - momentum)
|
class SplitBias(nn.Module):
def __init__(self, module):
super(SplitBias, self).__init__()
self.module = module
self.add_bias = AddBias(module.bias.data)
self.module.bias = None
def forward(self, input):
x = self.module(input)
x = self.add_bias(x)
return x
|
class KFACOptimizer(optim.Optimizer):
def __init__(self, model, lr=0.25, momentum=0.9, stat_decay=0.99, kl_clip=0.001, damping=0.01, weight_decay=0, fast_cnn=False, Ts=1, Tf=10):
defaults = dict()
def split_bias(module):
for (mname, child) in module.named_children():
if (hasattr(child, 'bias') and (child.bias is not None)):
module._modules[mname] = SplitBias(child)
else:
split_bias(child)
split_bias(model)
super(KFACOptimizer, self).__init__(model.parameters(), defaults)
self.known_modules = {'Linear', 'Conv2d', 'AddBias'}
self.modules = []
self.grad_outputs = {}
self.model = model
self._prepare_model()
self.steps = 0
(self.m_aa, self.m_gg) = ({}, {})
(self.Q_a, self.Q_g) = ({}, {})
(self.d_a, self.d_g) = ({}, {})
self.momentum = momentum
self.stat_decay = stat_decay
self.lr = lr
self.kl_clip = kl_clip
self.damping = damping
self.weight_decay = weight_decay
self.fast_cnn = fast_cnn
self.Ts = Ts
self.Tf = Tf
self.optim = optim.SGD(model.parameters(), lr=(self.lr * (1 - self.momentum)), momentum=self.momentum)
def _save_input(self, module, input):
if (torch.is_grad_enabled() and ((self.steps % self.Ts) == 0)):
classname = module.__class__.__name__
layer_info = None
if (classname == 'Conv2d'):
layer_info = (module.kernel_size, module.stride, module.padding)
aa = compute_cov_a(input[0].data, classname, layer_info, self.fast_cnn)
if (self.steps == 0):
self.m_aa[module] = aa.clone()
update_running_stat(aa, self.m_aa[module], self.stat_decay)
def _save_grad_output(self, module, grad_input, grad_output):
if self.acc_stats:
classname = module.__class__.__name__
layer_info = None
if (classname == 'Conv2d'):
layer_info = (module.kernel_size, module.stride, module.padding)
gg = compute_cov_g(grad_output[0].data, classname, layer_info, self.fast_cnn)
if (self.steps == 0):
self.m_gg[module] = gg.clone()
update_running_stat(gg, self.m_gg[module], self.stat_decay)
def _prepare_model(self):
for module in self.model.modules():
classname = module.__class__.__name__
if (classname in self.known_modules):
assert (not ((classname in ['Linear', 'Conv2d']) and (module.bias is not None))), 'You must have a bias as a separate layer'
self.modules.append(module)
module.register_forward_pre_hook(self._save_input)
module.register_backward_hook(self._save_grad_output)
def step(self):
if (self.weight_decay > 0):
for p in self.model.parameters():
p.grad.data.add_(self.weight_decay, p.data)
updates = {}
for (i, m) in enumerate(self.modules):
assert (len(list(m.parameters())) == 1), 'Can handle only one parameter at the moment'
classname = m.__class__.__name__
p = next(m.parameters())
la = (self.damping + self.weight_decay)
if ((self.steps % self.Tf) == 0):
(self.d_a[m], self.Q_a[m]) = torch.symeig(self.m_aa[m], eigenvectors=True)
(self.d_g[m], self.Q_g[m]) = torch.symeig(self.m_gg[m], eigenvectors=True)
self.d_a[m].mul_((self.d_a[m] > 1e-06).float())
self.d_g[m].mul_((self.d_g[m] > 1e-06).float())
if (classname == 'Conv2d'):
p_grad_mat = p.grad.data.view(p.grad.data.size(0), (- 1))
else:
p_grad_mat = p.grad.data
v1 = ((self.Q_g[m].t() @ p_grad_mat) @ self.Q_a[m])
v2 = (v1 / ((self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0)) + la))
v = ((self.Q_g[m] @ v2) @ self.Q_a[m].t())
v = v.view(p.grad.data.size())
updates[p] = v
vg_sum = 0
for p in self.model.parameters():
v = updates[p]
vg_sum += (((v * p.grad.data) * self.lr) * self.lr).sum()
nu = min(1, math.sqrt((self.kl_clip / vg_sum)))
for p in self.model.parameters():
v = updates[p]
p.grad.data.copy_(v)
p.grad.data.mul_(nu)
self.optim.step()
self.steps += 1
|
class PPO(object):
def __init__(self, actor_critic, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, lr=None, eps=None, max_grad_norm=None, amsgrad=True, weight_decay=0.0):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
self.last_grad_norm = None
def update(self, rollouts):
advantages = (rollouts.returns[:(- 1)] - rollouts.value_preds[:(- 1)])
advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05))
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
max_importance_weight_epoch = 0
for e in range(self.ppo_epoch):
if hasattr(self.actor_critic.base, 'gru'):
data_generator = rollouts.recurrent_generator(advantages, self.num_mini_batch)
else:
data_generator = rollouts.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
(observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
(values, action_log_probs, dist_entropy, states) = self.actor_critic.evaluate_actions(observations_batch, states_batch, masks_batch, actions_batch)
ratio = torch.exp((action_log_probs - old_action_log_probs_batch))
surr1 = (ratio * adv_targ)
surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ)
action_loss = (- torch.min(surr1, surr2).mean())
value_loss = F.mse_loss(values, return_batch)
self.optimizer.zero_grad()
(((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward()
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
num_updates = (self.ppo_epoch * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch, max_importance_weight_epoch, {})
|
class PPOCuriosity(object):
def __init__(self, actor_critic, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, optimizer=None, lr=None, eps=None, max_grad_norm=None, amsgrad=True, weight_decay=0.0):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.forward_loss_coef = 0.2
self.inverse_loss_coef = 0.8
self.curiosity_coef = 0.2
self.original_task_reward_proportion = 1.0
self.max_grad_norm = max_grad_norm
self.optimizer = optimizer
if (self.optimizer is None):
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
self.last_grad_norm = None
def update(self, rollouts):
advantages = ((rollouts.returns * self.original_task_reward_proportion) - rollouts.value_preds)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
max_importance_weight_epoch = 0
self.forward_loss_epoch = 0
self.inverse_loss_epoch = 0
for e in range(self.ppo_epoch):
if hasattr(self.actor_critic.base, 'gru'):
data_generator = rollouts.recurrent_generator(advantages, self.num_mini_batch)
raise NotImplementedError('PPOCuriosity has not implemented for recurrent networks because masking is undefined')
else:
data_generator = rollouts.feed_forward_generator_with_next_state(advantages, self.num_mini_batch)
for sample in data_generator:
(observations_batch, next_observations_batch, rnn_history_state, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
(values, action_log_probs, dist_entropy, next_rnn_history_state, state_features) = self.actor_critic.evaluate_actions(observations_batch, rnn_history_state, masks_batch, actions_batch)
(value, next_state_features, _) = self.actor_critic.base(next_observations_batch, next_rnn_history_state, masks_batch)
pred_action = self.actor_critic.base.inverse_model(state_features.detach(), next_state_features)
self.inverse_loss = F.cross_entropy(pred_action, actions_batch.squeeze(1))
one_hot_actions = torch.zeros((actions_batch.shape[0], self.actor_critic.dist.num_outputs), device=actions_batch.device)
one_hot_actions.scatter_(1, actions_batch, 1.0)
pred_next_state = self.actor_critic.base.forward_model(state_features.detach(), one_hot_actions)
self.forward_loss = F.mse_loss(pred_next_state, next_state_features.detach())
curiosity_bonus = (((1.0 - self.original_task_reward_proportion) * self.curiosity_coef) * self.forward_loss)
return_batch += curiosity_bonus
adv_targ += curiosity_bonus
adv_targ = ((adv_targ - adv_targ.mean()) / (adv_targ.std() + 1e-05))
ratio = torch.exp((action_log_probs - old_action_log_probs_batch))
clipped_ratio = torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param))
surr1 = (ratio * adv_targ)
surr2 = (clipped_ratio * adv_targ)
self.action_loss = (- torch.min(surr1, surr2).mean())
self.value_loss = F.mse_loss(values, return_batch)
self.dist_entropy = dist_entropy
self.optimizer.zero_grad()
self.get_loss().backward()
nn.utils.clip_grad_norm_(self.forward_loss.parameters(), self.max_grad_norm)
nn.utils.clip_grad_norm_(self.inverse_loss.parameters(), self.max_grad_norm)
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += self.value_loss.item()
action_loss_epoch += self.action_loss.item()
dist_entropy_epoch += self.dist_entropy.item()
self.forward_loss_epoch += self.forward_loss.item()
self.inverse_loss_epoch += self.inverse_loss.item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
num_updates = (self.ppo_epoch * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
self.forward_loss_epoch /= num_updates
self.inverse_loss_epoch /= num_updates
self.last_update_max_importance_weight = max_importance_weight_epoch
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch, {})
def get_loss(self):
return (((((self.value_loss * self.value_loss_coef) + self.action_loss) - (self.dist_entropy * self.entropy_coef)) + (self.forward_loss * self.forward_loss_coef)) + (self.inverse_loss * self.inverse_loss_coef))
|
class PPOReplayCuriosity(object):
def __init__(self, actor_critic, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, on_policy_epoch, off_policy_epoch, lr=None, eps=None, max_grad_norm=None, amsgrad=True, weight_decay=0.0, curiosity_reward_coef=0.1, forward_loss_coef=0.2, inverse_loss_coef=0.8):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.on_policy_epoch = on_policy_epoch
self.off_policy_epoch = off_policy_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.forward_loss_coef = forward_loss_coef
self.inverse_loss_coef = inverse_loss_coef
self.curiosity_reward_coef = curiosity_reward_coef
self.max_grad_norm = max_grad_norm
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
self.last_grad_norm = None
def update(self, rollouts):
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
self.forward_loss_epoch = 0
self.inverse_loss_epoch = 0
max_importance_weight_epoch = 0
on_policy = ([0] * self.on_policy_epoch)
off_policy = ([1] * self.off_policy_epoch)
epochs = (on_policy + off_policy)
random.shuffle(epochs)
for e in epochs:
if (e == 0):
data_generator = rollouts.feed_forward_generator_with_next_state(None, self.num_mini_batch, on_policy=True)
else:
data_generator = rollouts.feed_forward_generator_with_next_state(None, self.num_mini_batch, on_policy=False)
for sample in data_generator:
(observations_batch, next_observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
actions_batch_long = actions_batch.type(torch.cuda.LongTensor)
(values, action_log_probs, dist_entropy, next_states_batch) = self.actor_critic.evaluate_actions(observations_batch, states_batch, masks_batch, actions_batch)
state_feats = self.actor_critic.base.perception_unit(observations_batch)
next_state_feats = self.actor_critic.base.perception_unit(next_observations_batch)
pred_action = self.actor_critic.base.inverse_model(state_feats, next_state_feats)
self.inverse_loss = F.cross_entropy(pred_action, actions_batch_long.squeeze(1))
one_hot_actions = torch.zeros((actions_batch.shape[0], self.actor_critic.dist.num_outputs), device=actions_batch.device)
one_hot_actions.scatter_(1, actions_batch_long, 1.0)
pred_next_state = self.actor_critic.base.forward_model(state_feats, one_hot_actions)
self.forward_loss = F.mse_loss(pred_next_state, next_state_feats)
curiosity_bonus = (self.curiosity_reward_coef * self.forward_loss)
adv_targ += curiosity_bonus.detach()
adv_targ = ((adv_targ - adv_targ.mean()) / (adv_targ.std() + 1e-05))
ratio = torch.exp((action_log_probs - old_action_log_probs_batch))
surr1 = (ratio * adv_targ)
surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ)
self.action_loss = (- torch.min(surr1, surr2).mean())
self.value_loss = F.mse_loss(values, return_batch)
self.dist_entropy = dist_entropy
self.optimizer.zero_grad()
self.get_loss().backward()
nn.utils.clip_grad_norm_(self.actor_critic.base.forward_model.parameters(), self.max_grad_norm)
nn.utils.clip_grad_norm_(self.actor_critic.base.inverse_model.parameters(), self.max_grad_norm)
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += self.value_loss.item()
action_loss_epoch += self.action_loss.item()
dist_entropy_epoch += dist_entropy.item()
self.forward_loss_epoch += self.forward_loss.item()
self.inverse_loss_epoch += self.inverse_loss.item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
self.last_update_max_importance_weight = max_importance_weight_epoch
num_updates = (self.ppo_epoch * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch, max_importance_weight_epoch, {})
def get_loss(self):
return (((((self.value_loss * self.value_loss_coef) + self.action_loss) - (self.dist_entropy * self.entropy_coef)) + (self.forward_loss * self.forward_loss_coef)) + (self.inverse_loss * self.inverse_loss_coef))
|
class PPOReplay(object):
def __init__(self, actor_critic: BasePolicy, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, on_policy_epoch, off_policy_epoch, num_steps, n_frames, lr=None, eps=None, max_grad_norm=None, amsgrad=True, weight_decay=0.0, gpu_devices=None, loss_kwargs={}, cache_kwargs={}, optimizer_class='optim.Adam', optimizer_kwargs={}):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.on_policy_epoch = on_policy_epoch
self.off_policy_epoch = off_policy_epoch
self.num_mini_batch = num_mini_batch
self.num_steps = num_steps
self.n_frames = n_frames
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.loss_kwargs = loss_kwargs
self.loss_kwargs['intrinsic_loss_coefs'] = (self.loss_kwargs['intrinsic_loss_coefs'] if ('intrinsic_loss_coefs' in loss_kwargs) else [])
self.loss_kwargs['intrinsic_loss_types'] = (self.loss_kwargs['intrinsic_loss_types'] if ('intrinsic_loss_types' in loss_kwargs) else [])
assert (len(loss_kwargs['intrinsic_loss_coefs']) == len(loss_kwargs['intrinsic_loss_types'])), 'must have same number of losses as loss_coefs'
self.max_grad_norm = max_grad_norm
self.optimizer = eval(optimizer_class)([{'params': [param for (name, param) in actor_critic.named_parameters() if ('alpha' in name)], 'weight_decay': 0.0}, {'params': [param for (name, param) in actor_critic.named_parameters() if ('alpha' not in name)]}], lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, **optimizer_kwargs)
self.last_grad_norm = None
self.gpu_devices = gpu_devices
def update(self, rollouts):
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
max_importance_weight_epoch = 0
on_policy = ([0] * self.on_policy_epoch)
off_policy = ([1] * self.off_policy_epoch)
epochs = (on_policy + off_policy)
random.shuffle(epochs)
info = {}
yield_cuda = (not ((torch.cuda.device_count() > 1) and ((self.gpu_devices is None) or (len(self.gpu_devices) > 1))))
for e in epochs:
if (e == 0):
data_generator = rollouts.feed_forward_generator(None, self.num_mini_batch, on_policy=True, device=self.gpu_devices[0], yield_cuda=yield_cuda)
else:
data_generator = rollouts.feed_forward_generator(None, self.num_mini_batch, on_policy=False, device=self.gpu_devices[0], yield_cuda=yield_cuda)
for sample in data_generator:
(observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
cache = {}
(values, action_log_probs, dist_entropy, states) = self.actor_critic.evaluate_actions(observations_batch, states_batch, masks_batch, actions_batch, cache)
intrinsic_loss_dict = self.actor_critic.compute_intrinsic_losses(self.loss_kwargs, observations_batch, states_batch, masks_batch, actions_batch, cache)
ratio = torch.exp((action_log_probs - old_action_log_probs_batch.to(self.gpu_devices[0])))
surr1 = (ratio * adv_targ.to(self.gpu_devices[0]))
surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ.to(self.gpu_devices[0]))
action_loss = (- torch.min(surr1, surr2).mean())
value_loss = F.mse_loss(values, return_batch.to(self.gpu_devices[0]))
self.optimizer.zero_grad()
total_loss = (((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef))
for (iloss, iloss_coef) in zip(self.loss_kwargs['intrinsic_loss_types'], self.loss_kwargs['intrinsic_loss_coefs']):
total_loss += (intrinsic_loss_dict[iloss] * iloss_coef)
total_loss.backward()
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
for iloss in self.loss_kwargs['intrinsic_loss_types']:
if (iloss in info):
info[iloss] += intrinsic_loss_dict[iloss].item()
else:
info[iloss] = intrinsic_loss_dict[iloss].item()
for key in cache:
key_flat = torch.cat(cache[key]).view((- 1)).detach()
if (key in info):
info[key] = torch.cat((info[key], key_flat))
else:
info[key] = key_flat
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
num_updates = ((self.on_policy_epoch + self.off_policy_epoch) * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
for iloss in self.loss_kwargs['intrinsic_loss_types']:
info[iloss] /= num_updates
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch, max_importance_weight_epoch, info)
|
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
self.num_outputs = num_outputs
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), gain=0.01))
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
|
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(DiagGaussian, self).__init__()
self.num_outputs = num_outputs
init_ = (lambda m: init(m, init_normc_, (lambda x: nn.init.constant_(x, 0))))
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
|
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), (- 1))
|
class LearnerModel(nn.Module):
def __init__(self, num_inputs):
super().__init__()
@property
def state_size(self):
raise NotImplementedError('state_size not implemented in abstract class LearnerModel')
@property
def output_size(self):
raise NotImplementedError('output_size not implemented in abstract class LearnerModel')
def forward(self, inputs, states, masks):
raise NotImplementedError('forward not implemented in abstract class LearnerModel')
|
class CNNModel(nn.Module):
def __init__(self, num_inputs, use_gru, input_transforms=None):
super().__init__()
self.input_transforms = input_transforms
|
class CNNBase(nn.Module):
def __init__(self, num_inputs, use_gru):
super(CNNBase, self).__init__()
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
self.main = nn.Sequential(init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(), init_(nn.Linear(((32 * 7) * 7), 512)), nn.ReLU())
if use_gru:
self.gru = nn.GRUCell(512, 512)
nn.init.orthogonal_(self.gru.weight_ih.data)
nn.init.orthogonal_(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0))))
self.critic_linear = init_(nn.Linear(512, 1))
self.train()
@property
def state_size(self):
if hasattr(self, 'gru'):
return 512
else:
return 1
@property
def output_size(self):
return 512
def forward(self, inputs, states, masks):
x = self.main(inputs)
if hasattr(self, 'gru'):
if (inputs.size(0) == states.size(0)):
x = states = self.gru(x, (states * masks))
else:
N = states.size(0)
T = int((x.size(0) / N))
x = x.view(T, N, x.size(1))
masks = masks.view(T, N, 1)
outputs = []
for i in range(T):
hx = states = self.gru(x[i], (states * masks[i]))
outputs.append(hx)
x = torch.stack(outputs, dim=0)
x = x.view((T * N), (- 1))
return (self.critic_linear(x), x, states)
|
class MLPBase(nn.Module):
def __init__(self, num_inputs):
super(MLPBase, self).__init__()
init_ = (lambda m: init(m, init_normc_, (lambda x: nn.init.constant_(x, 0))))
self.actor = nn.Sequential(init_(nn.Linear(num_inputs, 64)), nn.Tanh(), init_(nn.Linear(64, 64)), nn.Tanh())
self.critic = nn.Sequential(init_(nn.Linear(num_inputs, 64)), nn.Tanh(), init_(nn.Linear(64, 64)), nn.Tanh())
self.critic_linear = init_(nn.Linear(64, 1))
self.train()
@property
def state_size(self):
return 1
@property
def output_size(self):
return 64
def forward(self, inputs, states, masks):
hidden_critic = self.critic(inputs)
hidden_actor = self.actor(inputs)
return (self.critic_linear(hidden_critic), hidden_actor, states)
|
class PreprocessingTranforms(object):
def __init__(self, input_dims):
pass
def forward(self, batch):
pass
|
class SegmentTree():
def __init__(self, size):
self.index = 0
self.size = size
self.full = False
self.sum_tree = ([0] * ((2 * size) - 1))
self.data = ([None] * size)
self.max = 1
def _propagate(self, index, value):
parent = ((index - 1) // 2)
(left, right) = (((2 * parent) + 1), ((2 * parent) + 2))
self.sum_tree[parent] = (self.sum_tree[left] + self.sum_tree[right])
if (parent != 0):
self._propagate(parent, value)
def update(self, index, value):
self.sum_tree[index] = value
self._propagate(index, value)
self.max = max(value, self.max)
def append(self, data, value):
self.data[self.index] = data
self.update(((self.index + self.size) - 1), value)
self.index = ((self.index + 1) % self.size)
self.full = (self.full or (self.index == 0))
self.max = max(value, self.max)
def _retrieve(self, index, value):
(left, right) = (((2 * index) + 1), ((2 * index) + 2))
if (left >= len(self.sum_tree)):
return index
elif (value <= self.sum_tree[left]):
return self._retrieve(left, value)
else:
return self._retrieve(right, (value - self.sum_tree[left]))
def find(self, value):
index = self._retrieve(0, value)
data_index = ((index - self.size) + 1)
return (self.sum_tree[index], data_index, index)
def get(self, data_index):
return self.data[(data_index % self.size)]
def total(self):
return self.sum_tree[0]
|
class ReplayMemory():
def __init__(self, device, history_length, discount, multi_step, priority_weight, priority_exponent, capacity, blank_state):
self.device = device
self.capacity = capacity
self.history = history_length
self.discount = discount
self.n = multi_step
self.priority_weight = priority_weight
self.priority_exponent = priority_exponent
self.t = 0
self.transitions = SegmentTree(capacity)
self.keys = blank_state.keys()
self.blank_trans = Transition(0, blank_state, None, None, 0, False)
def append(self, state, action, action_log_probs, reward, terminal):
state = {k: state[k].peek()[(- 1)].to(dtype=torch.float32, device=torch.device('cpu')) for k in state}
self.transitions.append(Transition(self.t, state, action, action_log_probs, reward, (not terminal)), self.transitions.max)
self.t = (0 if terminal else (self.t + 1))
def _get_transition(self, idx):
transition = ([None] * (self.history + self.n))
transition[(self.history - 1)] = self.transitions.get(idx)
for t in range((self.history - 2), (- 1), (- 1)):
if (transition[(t + 1)].timestep == 0):
transition[t] = self.blank_trans
else:
transition[t] = self.transitions.get((((idx - self.history) + 1) + t))
for t in range(self.history, (self.history + self.n)):
if transition[(t - 1)].nonterminal:
transition[t] = self.transitions.get((((idx - self.history) + 1) + t))
else:
transition[t] = self.blank_trans
return transition
def _get_sample_from_segment(self, segment, i):
valid = False
while (not valid):
sample = random.uniform((i * segment), ((i + 1) * segment))
(prob, idx, tree_idx) = self.transitions.find(sample)
if ((((self.transitions.index - idx) % self.capacity) > self.n) and (((idx - self.transitions.index) % self.capacity) >= self.history) and (prob != 0)):
valid = True
transition = self._get_transition(idx)
state = {k: torch.stack([trans.state[k] for trans in transition[:self.history]]).to(dtype=torch.float32, device=self.device) for k in self.keys}
next_state = {k: torch.stack([trans.state[k] for trans in transition[self.n:(self.n + self.history)]]).to(dtype=torch.float32, device=self.device) for k in self.keys}
action = torch.tensor([transition[(self.history - 1)].action], dtype=torch.int64, device=self.device)
action_log_prob = torch.tensor([transition[(self.history - 1)].action_log_prob], dtype=torch.float32, device=self.device)
R = torch.tensor([sum((((self.discount ** n) * transition[((self.history + n) - 1)].reward) for n in range(self.n)))], dtype=torch.float32, device=self.device)
nonterminal = torch.tensor([transition[((self.history + self.n) - 1)].nonterminal], dtype=torch.float32, device=self.device)
return (prob, idx, tree_idx, state, action, action_log_prob, R, next_state, nonterminal)
def sample(self, batch_size):
p_total = self.transitions.total()
segment = (p_total / batch_size)
batch = [self._get_sample_from_segment(segment, i) for i in range(batch_size)]
(probs, idxs, tree_idxs, states, actions, action_log_probs, returns, next_states, nonterminals) = zip(*batch)
states = {k: torch.stack([state[k] for state in states]).squeeze_() for k in self.keys}
next_states = {k: torch.stack([state[k] for state in next_states]).squeeze_() for k in self.keys}
(actions, action_log_probs, returns, nonterminals) = (torch.cat(actions), torch.cat(action_log_probs), torch.cat(returns), torch.stack(nonterminals))
probs = (torch.tensor(probs, dtype=torch.float32, device=self.device) / p_total)
capacity = (self.capacity if self.transitions.full else self.transitions.index)
weights = ((capacity * probs) ** (- self.priority_weight))
weights = (weights / weights.max())
return (tree_idxs, states, actions, action_log_probs, returns, next_states, nonterminals, weights)
def update_priorities(self, idxs, priorities):
priorities.pow_(self.priority_exponent)
[self.transitions.update(idx, priority) for (idx, priority) in zip(idxs, priorities)]
def __iter__(self):
self.current_idx = 0
return self
def __next__(self):
if (self.current_idx == self.capacity):
raise StopIteration
state_stack = ([None] * self.history)
state_stack[(- 1)] = self.transitions.data[self.current_idx].state
prev_timestep = self.transitions.data[self.current_idx].timestep
for t in reversed(range((self.history - 1))):
if (prev_timestep == 0):
state_stack[t] = blank_trans.state
else:
state_stack[t] = self.transitions.data[(((self.current_idx + t) - self.history) + 1)].state
prev_timestep -= 1
state = torch.stack(state_stack, 0).to(dtype=torch.float32, device=self.device).div_(255)
self.current_idx += 1
return state
|
class RolloutSensorDictCuriosityReplayBuffer(object):
def __init__(self, num_steps, num_processes, obs_shape, action_space, state_size, actor_critic, use_gae, gamma, tau, memory_size=10000):
self.num_steps = num_steps
self.num_processes = num_processes
self.state_size = state_size
self.memory_size = memory_size
self.obs_shape = obs_shape
self.sensor_names = set(obs_shape.keys())
self.observations = SensorDict({k: torch.zeros(memory_size, num_processes, *ob_shape) for (k, ob_shape) in obs_shape.items()})
self.states = torch.zeros(memory_size, num_processes, state_size)
self.rewards = torch.zeros(memory_size, num_processes, 1)
self.value_preds = torch.zeros(memory_size, num_processes, 1)
self.returns = torch.zeros(memory_size, num_processes, 1)
self.action_log_probs = torch.zeros(memory_size, num_processes, 1)
self.actions = torch.zeros(memory_size, num_processes, 1)
self.masks = torch.ones(memory_size, num_processes, 1)
self.actor_critic = actor_critic
self.use_gae = use_gae
self.gamma = gamma
self.tau = tau
self.num_steps = num_steps
self.step = 0
self.memory_occupied = 0
def cuda(self):
self.observations = self.observations.apply((lambda k, v: v.cuda()))
self.states = self.states.cuda()
self.rewards = self.rewards.cuda()
self.value_preds = self.value_preds.cuda()
self.returns = self.returns.cuda()
self.action_log_probs = self.action_log_probs.cuda()
self.actions = self.actions.cuda()
self.masks = self.masks.cuda()
self.actor_critic = self.actor_critic.cuda()
def insert(self, current_obs, state, action, action_log_prob, value_pred, reward, mask):
next_step = ((self.step + 1) % self.memory_size)
modules = [self.observations[k][next_step].copy_ for k in self.observations]
inputs = tuple([(current_obs[k].peek(),) for k in self.observations])
nn.parallel.parallel_apply(modules, inputs)
self.states[next_step].copy_(state)
self.actions[self.step].copy_(action)
self.action_log_probs[self.step].copy_(action_log_prob)
self.value_preds[self.step].copy_(value_pred)
self.rewards[self.step].copy_(reward)
self.masks[next_step].copy_(mask)
self.step = ((self.step + 1) % self.memory_size)
if (self.memory_occupied < self.memory_size):
self.memory_occupied += 1
def get_current_observation(self):
return self.observations.at(self.step)
def get_current_state(self):
return self.states[self.step]
def get_current_mask(self):
return self.masks[self.step]
def after_update(self):
pass
def feed_forward_generator_with_next_state(self, advantages, num_mini_batch, on_policy=True):
if (on_policy or (self.memory_occupied < self.memory_size)):
stop_idx = (self.step - 1)
start_idx = (((self.step - self.num_steps) - 1) % self.memory_size)
else:
start_idx = (((self.step - 1) - np.random.randint((self.num_steps + 1), self.memory_size)) % self.memory_size)
stop_idx = (((start_idx + self.num_steps) - 1) % self.memory_size)
observations_sample = SensorDict({k: torch.zeros((self.num_steps + 1), self.num_processes, *ob_shape) for (k, ob_shape) in self.obs_shape.items()}).apply((lambda k, v: v.cuda()))
next_observations_sample = SensorDict({k: torch.zeros((self.num_steps + 1), self.num_processes, *ob_shape) for (k, ob_shape) in self.obs_shape.items()}).apply((lambda k, v: v.cuda()))
states_sample = torch.zeros((self.num_steps + 1), self.num_processes, self.state_size).cuda()
rewards_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
values_sample = torch.zeros((self.num_steps + 1), self.num_processes, 1).cuda()
returns_sample = torch.zeros((self.num_steps + 1), self.num_processes, 1).cuda()
action_log_probs_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
actions_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
masks_sample = torch.ones((self.num_steps + 1), self.num_processes, 1).cuda()
idx = start_idx
sample_idx = 0
while (idx != (stop_idx % self.memory_size)):
next_idx = ((idx + 1) % self.memory_size)
for k in self.observations:
observations_sample[k][sample_idx] = self.observations[k][idx]
for (j, not_done) in enumerate(self.masks[next_idx]):
if (not_done > 0.5):
next_observations_sample[k][sample_idx][j] = self.observations[k][next_idx][j]
else:
next_observations_sample[k][sample_idx][j] = self.observations[k][idx][j]
states_sample[sample_idx] = self.states[idx]
try:
rewards_sample[sample_idx] = self.rewards[idx]
except:
print(rewards_sample, self.rewards)
print(sample_idx, idx, next_idx, start_idx, stop_idx)
raise
action_log_probs_sample[sample_idx] = self.action_log_probs[idx]
actions_sample[sample_idx] = self.actions[idx]
masks_sample[sample_idx] = self.masks[idx]
with torch.no_grad():
next_value = self.actor_critic.get_value(self.observations.at(idx), self.states[idx], self.masks[idx])
values_sample[sample_idx] = self.actor_critic.get_value(self.observations.at(idx), self.states[idx], self.masks[idx])
idx = next_idx
sample_idx += 1
with torch.no_grad():
next_value = self.actor_critic.get_value(self.observations.at(stop_idx), self.states[stop_idx], self.masks[stop_idx])
if self.use_gae:
values_sample[(- 1)] = next_value
gae = 0
for step in reversed(range(rewards_sample.size(0))):
delta = ((rewards_sample[step] + ((self.gamma * values_sample[(step + 1)]) * masks_sample[(step + 1)])) - values_sample[step])
gae = (delta + (((self.gamma * self.tau) * masks_sample[(step + 1)]) * gae))
returns_sample[step] = (gae + values_sample[step])
else:
returns[(- 1)] = next_value
for step in reversed(range(self.rewards.size(0))):
returns_sample[step] = (((returns_sample[(step + 1)] * self.gamma) * masks_batch[(step + 1)]) + rewards_sample[step])
mini_batch_size = (self.num_steps // num_mini_batch)
observations_batch = {}
next_observations_batch = {}
sampler = BatchSampler(SubsetRandomSampler(range(self.num_steps)), mini_batch_size, drop_last=False)
advantages = (returns_sample[:(- 1)] - values_sample[:(- 1)])
for indices in sampler:
subsequent_indices = [(i + 1) for i in indices]
for (k, sensor_ob) in observations_sample.items():
observations_batch[k] = sensor_ob[:(- 1)].view((- 1), *sensor_ob.size()[2:])[indices]
next_observations_batch[k] = next_observations_sample[k][:(- 1)].view((- 1), *sensor_ob.size()[2:])[indices]
states_batch = states_sample[:(- 1)].view((- 1), states_sample.size((- 1)))[indices]
actions_batch = actions_sample.view((- 1), actions_sample.size((- 1)))[indices]
return_batch = returns_sample[:(- 1)].view((- 1), 1)[indices]
masks_batch = masks_sample[:(- 1)].view((- 1), 1)[indices]
old_action_log_probs_batch = action_log_probs_sample.view((- 1), 1)[indices]
adv_targ = advantages.view((- 1), 1)[indices]
(yield (observations_batch, next_observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ))
def feed_forward_generator(self, advantages, num_mini_batch, on_policy=True):
if (on_policy or (self.memory_occupied < self.memory_size)):
stop_idx = self.step
start_idx = ((self.step - self.num_steps) % self.memory_size)
else:
start_idx = ((self.step - np.random.randint((self.num_steps + 1), self.memory_size)) % self.memory_size)
stop_idx = ((start_idx + self.num_steps) % self.memory_size)
observations_sample = SensorDict({k: torch.zeros((self.num_steps + 1), self.num_processes, *ob_shape) for (k, ob_shape) in self.obs_shape.items()}).apply((lambda k, v: v.cuda()))
states_sample = torch.zeros((self.num_steps + 1), self.num_processes, self.state_size).cuda()
rewards_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
values_sample = torch.zeros((self.num_steps + 1), self.num_processes, 1).cuda()
returns_sample = torch.zeros((self.num_steps + 1), self.num_processes, 1).cuda()
action_log_probs_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
actions_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
masks_sample = torch.ones((self.num_steps + 1), self.num_processes, 1).cuda()
idx = start_idx
sample_idx = 0
while (idx != stop_idx):
for k in self.observations:
observations_sample[k][sample_idx] = self.observations[k][idx]
states_sample[sample_idx] = self.states[idx]
rewards_sample[sample_idx] = self.rewards[idx]
action_log_probs_sample[sample_idx] = self.action_log_probs[idx]
actions_sample[sample_idx] = self.actions[idx]
masks_sample[sample_idx] = self.masks[idx]
with torch.no_grad():
next_value = self.actor_critic.get_value(self.observations.at(idx), self.states[idx], self.masks[idx])
values_sample[sample_idx] = self.actor_critic.get_value(self.observations.at(idx), self.states[idx], self.masks[idx])
idx = ((idx + 1) % self.memory_size)
sample_idx += 1
with torch.no_grad():
next_value = self.actor_critic.get_value(self.observations.at(stop_idx), self.states[stop_idx], self.masks[stop_idx])
if self.use_gae:
values_sample[(- 1)] = next_value
gae = 0
for step in reversed(range(rewards_sample.size(0))):
delta = ((rewards_sample[step] + ((self.gamma * values_sample[(step + 1)]) * masks_sample[(step + 1)])) - values_sample[step])
gae = (delta + (((self.gamma * self.tau) * masks_sample[(step + 1)]) * gae))
returns_sample[step] = (gae + values_sample[step])
else:
returns[(- 1)] = next_value
for step in reversed(range(self.rewards.size(0))):
returns_sample[step] = (((returns_sample[(step + 1)] * self.gamma) * masks_batch[(step + 1)]) + rewards_sample[step])
mini_batch_size = (self.num_steps // num_mini_batch)
observations_batch = {}
sampler = BatchSampler(SubsetRandomSampler(range(self.num_steps)), mini_batch_size, drop_last=False)
advantages = (returns_sample[:(- 1)] - values_sample[:(- 1)])
advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05))
for indices in sampler:
for (k, sensor_ob) in observations_sample.items():
observations_batch[k] = sensor_ob[:(- 1)].view((- 1), *sensor_ob.size()[2:])[indices]
states_batch = states_sample[:(- 1)].view((- 1), states_sample.size((- 1)))[indices]
actions_batch = actions_sample.view((- 1), actions_sample.size((- 1)))[indices]
return_batch = returns_sample[:(- 1)].view((- 1), 1)[indices]
masks_batch = masks_sample[:(- 1)].view((- 1), 1)[indices]
old_action_log_probs_batch = action_log_probs_sample.view((- 1), 1)[indices]
adv_targ = advantages.view((- 1), 1)[indices]
(yield (observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ))
|
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"Build a Segment Tree data structure.\n\n https://en.wikipedia.org/wiki/Segment_tree\n\n Can be used as regular array, but with two\n important differences:\n\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient ( O(log segment size) )\n `reduce` operation which reduces `operation` over\n a contiguous subsequence of items in the array.\n\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must form a mathematical group together with the set of\n possible values for array elements (i.e. be associative)\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.\n "
assert ((capacity > 0) and ((capacity & (capacity - 1)) == 0)), 'capacity must be positive and a power of 2.'
self._capacity = capacity
self._value = [neutral_element for _ in range((2 * capacity))]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if ((start == node_start) and (end == node_end)):
return self._value[node]
mid = ((node_start + node_end) // 2)
if (end <= mid):
return self._reduce_helper(start, end, (2 * node), node_start, mid)
elif ((mid + 1) <= start):
return self._reduce_helper(start, end, ((2 * node) + 1), (mid + 1), node_end)
else:
return self._operation(self._reduce_helper(start, mid, (2 * node), node_start, mid), self._reduce_helper((mid + 1), end, ((2 * node) + 1), (mid + 1), node_end))
def reduce(self, start=0, end=None):
'Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n\n self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))\n\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array elements.\n '
if (end is None):
end = self._capacity
if (end < 0):
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, (self._capacity - 1))
def __setitem__(self, idx, val):
idx += self._capacity
self._value[idx] = val
idx //= 2
while (idx >= 1):
self._value[idx] = self._operation(self._value[(2 * idx)], self._value[((2 * idx) + 1)])
idx //= 2
def __getitem__(self, idx):
assert (0 <= idx < self._capacity)
return self._value[(self._capacity + idx)]
|
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(capacity=capacity, operation=operator.add, neutral_element=0.0)
def sum(self, start=0, end=None):
'Returns arr[start] + ... + arr[end]'
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
'Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint\n '
assert (0 <= prefixsum <= (self.sum() + 1e-05))
idx = 1
while (idx < self._capacity):
if (self._value[(2 * idx)] > prefixsum):
idx = (2 * idx)
else:
prefixsum -= self._value[(2 * idx)]
idx = ((2 * idx) + 1)
return (idx - self._capacity)
|
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(capacity=capacity, operation=min, neutral_element=float('inf'))
def min(self, start=0, end=None):
'Returns min(arr[start], ..., arr[end])'
return super(MinSegmentTree, self).reduce(start, end)
|
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if (x.dim() == 2):
bias = self._bias.t().view(1, (- 1))
else:
bias = self._bias.t().view(1, (- 1), 1, 1)
return (x + bias)
|
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
|
def init_normc_(weight, gain=1):
weight.normal_(0, 1)
weight *= (gain / torch.sqrt(weight.pow(2).sum(1, keepdim=True)))
|
def load_experiment_configs(log_dir, uuid=None):
' \n Loads all experiments in a given directory \n Optionally, may be restricted to those with a given uuid\n '
dirs = [f for f in os.listdir(log_dir) if os.path.isdir(os.path.join(log_dir, f))]
results = []
for d in dirs:
cfg_path = os.path.join(log_dir, d, 'config.json')
if (not os.path.exists(cfg_path)):
continue
with open(os.path.join(log_dir, d, 'config.json'), 'r') as f:
results.append(json.load(f))
if ((uuid is not None) and (results[(- 1)]['uuid'] != uuid)):
results.pop()
return results
|
def load_experiment_config_paths(log_dir, uuid=None):
dirs = [f for f in os.listdir(log_dir) if os.path.isdir(os.path.join(log_dir, f))]
results = []
for d in dirs:
cfg_path = os.path.join(log_dir, d, 'config.json')
if (not os.path.exists(cfg_path)):
continue
with open(cfg_path, 'r') as f:
cfg = json.load(f)
results.append(cfg_path)
if ((uuid is not None) and (cfg['uuid'] != uuid)):
results.pop()
return results
|
def checkpoint_name(checkpoint_dir, epoch='latest'):
return os.path.join(checkpoint_dir, 'ckpt-{}.dat'.format(epoch))
|
def last_archived_run(base_dir, uuid):
" Returns the name of the last archived run. Of the form:\n 'UUID_run_K'\n "
archive_dir = os.path.join(base_dir, 'archive')
existing_runs = glob.glob(os.path.join(archive_dir, (uuid + '_run_*')))
print(os.path.join(archive_dir, (uuid + '_run_*')))
if (len(existing_runs) == 0):
return None
run_numbers = [int(run.split('_')[(- 1)]) for run in existing_runs]
current_run_number = (max(run_numbers) if (len(existing_runs) > 0) else 0)
current_run_archive_dir = os.path.join(archive_dir, '{}_run_{}'.format(uuid, current_run_number))
return current_run_archive_dir
|
def archive_current_run(base_dir, uuid):
' Archives the current run. That is, it moves everything\n base_dir/*uuid* -> base_dir/archive/uuid_run_K/\n where K is determined automatically.\n '
matching_files = glob.glob(os.path.join(base_dir, (('*' + uuid) + '*')))
if (len(matching_files) == 0):
return
archive_dir = os.path.join(base_dir, 'archive')
os.makedirs(archive_dir, exist_ok=True)
existing_runs = glob.glob(os.path.join(archive_dir, (uuid + '_run_*')))
run_numbers = [int(run.split('_')[(- 1)]) for run in existing_runs]
current_run_number = ((max(run_numbers) + 1) if (len(existing_runs) > 0) else 0)
current_run_archive_dir = os.path.join(archive_dir, '{}_run_{}'.format(uuid, current_run_number))
os.makedirs(current_run_archive_dir)
for f in matching_files:
shutil.move(f, current_run_archive_dir)
return
|
def save_checkpoint(obj, directory, step_num, use_thread=False):
if use_thread:
warnings.warn('use_threads set to True, but done synchronously still')
os.makedirs(directory, exist_ok=True)
torch.save(obj, checkpoint_name(directory), pickle_module=pickle)
torch.save(obj, checkpoint_name(directory, step_num), pickle_module=pickle)
|
class VisdomMonitor(Monitor):
def __init__(self, env, directory, video_callable=None, force=False, resume=False, write_upon_reset=False, uid=None, mode=None, server='localhost', env='main', port=8097):
super(VisdomMonitor, self).__init__(env, directory, video_callable=video_callable, force=force, resume=resume, write_upon_reset=write_upon_reset, uid=uid, mode=mode)
def _close_video_recorder(self):
video_recorder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.