code stringlengths 17 6.64M |
|---|
class UNetHeteroscedasticFull(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-05):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, (((out_channels + 1) * out_channels) / 2), 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
mu = mu.clamp(min=(- 1.0), max=1.0)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return (mu, scale)
|
class UNetHeteroscedasticIndep(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-05):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
mu = mu.clamp(min=(- 1.0), max=1.0)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return (mu, scale)
|
class UNetHeteroscedasticPooled(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-05, use_clamp=False):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, 1, 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
self.use_clamp = use_clamp
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
if self.use_clamp:
mu = mu.clamp(min=(- 1.0), max=1.0)
else:
mu = F.tanh(mu)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return (mu, scale)
|
class UNetReshade(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3):
super().__init__()
(self.in_channels, self.out_channels, self.downsample) = (in_channels, out_channels, downsample)
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([UNet_down_block((2 ** (4 + i)), (2 ** (5 + i)), True) for i in range(0, downsample)])
bottleneck = (2 ** (4 + downsample))
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([UNet_up_block((2 ** (4 + i)), (2 ** (5 + i)), (2 ** (4 + i))) for i in range(0, downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::(- 1)]:
x = self.up_blocks[i](xvals[i], x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.relu(self.last_conv2(x))
x = x.clamp(max=1, min=0).mean(dim=1, keepdim=True)
x = x.expand((- 1), 3, (- 1), (- 1))
return x
def loss(self, pred, target):
loss = torch.tensor(0.0, device=pred.device)
return (loss, (loss.detach(),))
|
class ConvBlock(nn.Module):
def __init__(self, f1, f2, kernel_size=3, padding=1, use_groupnorm=True, groups=8, dilation=1, transpose=False):
super().__init__()
self.transpose = transpose
self.conv = nn.Conv2d(f1, f2, (kernel_size, kernel_size), dilation=dilation, padding=(padding * dilation))
if self.transpose:
self.convt = nn.ConvTranspose2d(f1, f1, (3, 3), dilation=dilation, stride=2, padding=dilation, output_padding=1)
if use_groupnorm:
self.bn = nn.GroupNorm(groups, f1)
else:
self.bn = nn.BatchNorm2d(f1)
def forward(self, x):
x = self.bn(x)
if self.transpose:
x = F.relu(self.convt(x))
x = F.relu(self.conv(x))
return x
|
def load_from_file(net, checkpoint_path):
checkpoint = torch.load(checkpoint_path)
sd = {k.replace('module.', ''): v for (k, v) in checkpoint['state_dict'].items()}
net.load_state_dict(sd)
for p in net.parameters():
p.requires_grad = False
return net
|
def blind(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _thunk(obs_space):
pipeline = (lambda x: torch.zeros(output_size))
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def pixels_as_state(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
base_pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh)])
grayscale_pipeline = vision.transforms.Compose([vision.transforms.Grayscale(), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
rgb_pipeline = vision.transforms.Compose([vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def pipeline(x):
base = base_pipeline(x)
rgb = rgb_pipeline(base)
gray = grayscale_pipeline(base)
n_rgb = (output_size[0] // 3)
n_gray = (output_size[0] % 3)
return torch.cat((([rgb] * n_rgb) + ([gray] * n_gray)))
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
class GaussianSmoothing(nn.Module):
'\n Apply gaussian smoothing on a\n 1d, 2d or 3d tensor. Filtering is performed seperately for each channel\n in the input using a depthwise convolution.\n Arguments:\n channels (int, sequence): Number of channels of the input tensors. Output will\n have this number of channels as well.\n kernel_size (int, sequence): Size of the gaussian kernel.\n sigma (float, sequence): Standard deviation of the gaussian kernel.\n dim (int, optional): The number of dimensions of the data.\n Default value is 2 (spatial).\n '
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = ([kernel_size] * dim)
if isinstance(sigma, numbers.Number):
sigma = ([sigma] * dim)
self.kernel_size = kernel_size
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids):
mean = ((size - 1) / 2)
kernel *= ((1 / (std * math.sqrt((2 * math.pi)))) * torch.exp(((- (((mgrid - mean) / std) ** 2)) / 2)))
kernel = (kernel / torch.sum(kernel))
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *([1] * (kernel.dim() - 1)))
self.register_buffer('weight', kernel)
self.groups = channels
if (dim == 1):
self.conv = F.conv1d
elif (dim == 2):
self.conv = F.conv2d
elif (dim == 3):
self.conv = F.conv3d
else:
raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim))
def forward(self, input):
'\n Apply gaussian filter to input.\n Arguments:\n input (torch.Tensor): Input to apply gaussian filter on.\n Returns:\n filtered (torch.Tensor): Filtered output.\n '
input_was_3 = (len(input) == 3)
if input_was_3:
input = input.unsqueeze(0)
input = F.pad(input, ([(self.kernel_size[0] // 2)] * 4), mode='reflect')
res = self.conv(input, weight=self.weight, groups=self.groups)
return (res.squeeze(0) if input_was_3 else res)
|
class GaussianSmoothing(nn.Module):
'\n Apply gaussian smoothing on a\n 1d, 2d or 3d tensor. Filtering is performed seperately for each channel\n in the input using a depthwise convolution.\n Arguments:\n channels (int, sequence): Number of channels of the input tensors. Output will\n have this number of channels as well.\n kernel_size (int, sequence): Size of the gaussian kernel.\n sigma (float, sequence): Standard deviation of the gaussian kernel.\n dim (int, optional): The number of dimensions of the data.\n Default value is 2 (spatial).\n '
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = ([kernel_size] * dim)
self.kernel_size = kernel_size[0]
self.dim = dim
if isinstance(sigma, numbers.Number):
sigma = ([sigma] * dim)
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids):
mean = ((size - 1) / 2)
kernel *= ((1 / (std * math.sqrt((2 * math.pi)))) * torch.exp(((- (((mgrid - mean) / std) ** 2)) / 2)))
kernel = (kernel / torch.sum(kernel))
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *([1] * (kernel.dim() - 1)))
self.register_buffer('weight', kernel)
self.groups = channels
if (dim == 1):
self.conv = F.conv1d
elif (dim == 2):
self.conv = F.conv2d
elif (dim == 3):
self.conv = F.conv3d
else:
raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim))
def forward(self, input):
'\n Apply gaussian filter to input.\n Arguments:\n input (torch.Tensor): Input to apply gaussian filter on.\n Returns:\n filtered (torch.Tensor): Filtered output.\n '
input = F.pad(input, (([(self.kernel_size // 2)] * 2) * self.dim), mode='reflect')
return self.conv(input, weight=self.weight, groups=self.groups)
|
class TransformFactory(object):
@staticmethod
def independent(names_to_transforms, multithread=False, keep_unnamed=True):
def processing_fn(obs_space):
' Obs_space is expected to be a 1-layer deep spaces.Dict '
transforms = {}
sensor_space = {}
transform_names = set(names_to_transforms.keys())
obs_space_names = set(obs_space.spaces.keys())
assert transform_names.issubset(obs_space_names), 'Trying to transform observations that are not present ({})'.format((transform_names - obs_space_names))
for name in obs_space_names:
if (name in names_to_transforms):
transform = names_to_transforms[name]
(transforms[name], sensor_space[name]) = transform(obs_space.spaces[name])
elif keep_unnamed:
sensor_space[name] = obs_space.spaces[name]
else:
print(f'Did not transform {name}, removing from obs')
def _independent_tranform_thunk(obs):
results = {}
if multithread:
pool = mp.pool(min(mp.cpu_count(), len(sensor_shapes)))
pool.map()
else:
for (name, transform) in transforms.items():
try:
results[name] = transform(obs[name])
except Exception as e:
print(f'Problem applying preproces transform to {name}.', e)
raise e
for (name, val) in obs.items():
if ((name not in results) and keep_unnamed):
results[name] = val
return SensorPack(results)
return (_independent_tranform_thunk, spaces.Dict(sensor_space))
return processing_fn
@staticmethod
def splitting(names_to_transforms, multithread=False, keep_unnamed=True):
def processing_fn(obs_space):
' Obs_space is expected to be a 1-layer deep spaces.Dict '
old_name_to_new_name_to_transform = defaultdict(dict)
sensor_space = {}
transform_names = set(names_to_transforms.keys())
obs_space_names = set(obs_space.spaces.keys())
assert transform_names.issubset(obs_space_names), 'Trying to transform observations that are not present ({})'.format((transform_names - obs_space_names))
for old_name in obs_space_names:
if (old_name in names_to_transforms):
assert hasattr(names_to_transforms, 'items'), 'each sensor must map to a dict of transfors'
for (new_name, transform_maker) in names_to_transforms[old_name].items():
(transform, sensor_space[new_name]) = transform_maker(obs_space.spaces[old_name])
old_name_to_new_name_to_transform[old_name][new_name] = transform
elif keep_unnamed:
sensor_space[old_name] = obs_space.spaces[old_name]
def _transform_thunk(obs):
results = {}
transforms_to_run = []
for (old_name, new_names_to_transform) in old_name_to_new_name_to_transform.items():
for (new_name, transform) in new_names_to_transform.items():
transforms_to_run.append((old_name, new_name, transform))
if multithread:
pool = mp.Pool(min(multiprocessing.cpu_count(), len(transforms_to_run)))
res = pool.map((lambda t_o: t_o[0](t_o[1])), zip([t for (_, _, t) in transforms_to_run], [obs[old_name] for (old_name, _, _) in transforms_to_run]))
for (transformed, (old_name, new_name, _)) in zip(res, transforms_to_run):
results[new_name] = transformed
else:
for (old_name, new_names_to_transform) in old_name_to_new_name_to_transform.items():
for (new_name, transform) in new_names_to_transform.items():
results[new_name] = transform(obs[old_name])
if keep_unnamed:
for (name, val) in obs.items():
if ((name not in results) and (name not in old_name_to_new_name_to_transform)):
results[name] = val
return SensorPack(results)
return (_transform_thunk, spaces.Dict(sensor_space))
return processing_fn
|
class Pipeline(object):
def __init__(self, env_or_pipeline):
pass
def forward(self):
pass
|
def identity_transform():
def _thunk(obs_space):
return ((lambda x: x), obs_space)
return _thunk
|
def fill_like(output_size, fill_value=0.0, dtype=torch.float32):
def _thunk(obs_space):
tensor = torch.ones((1,), dtype=dtype)
def _process(x):
return tensor.new_full(output_size, fill_value).numpy()
return (_process, spaces.Box((- 1), 1, output_size, tensor.numpy().dtype))
return _thunk
|
def rescale_centercrop_resize(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n\n obs_space: Should be form WxHxC\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def rescale_centercrop_resize_collated(output_size, dtype=np.float32):
" rescale_centercrop_resize\n\n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n\n obs_space: Should be form WxHxC\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def iterative_pipeline(pipeline):
def runner(x):
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return runner
return (iterative_pipeline(pipeline), spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def rescale():
" Rescales observations to a new values\n\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_thunk(obs_space):
obs_shape = obs_space.shape
np_pipeline = vision.transforms.Compose([vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def pipeline(im):
if isinstance(im, np.ndarray):
return np_pipeline(im)
else:
return RESCALE_0_255_NEG1_POS1(im)
return (pipeline, spaces.Box((- 1.0), 1.0, obs_space.shape, np.float32))
return _rescale_thunk
|
def grayscale_rescale():
" Rescales observations to a new values\n\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _grayscale_rescale_thunk(obs_space):
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Grayscale(), vision.transforms.ToTensor(), vision.transforms.Normalize([0.5], [0.5])])
obs_shape = obs_space.shape
return (pipeline, spaces.Box((- 1.0), 1.0, (1, obs_shape[0], obs_shape[1]), dtype=np.float32))
return _grayscale_rescale_thunk
|
def cross_modal_transform(eval_to_get_net, output_shape=(3, 84, 84), dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
output_size = output_shape[(- 1)]
output_shape = output_shape
net = eval_to_get_net
resize_fn = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Resize(output_size), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def encode(x):
with torch.no_grad():
return net(x)
def _transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
x = rescale(x).view(1, 3, 256, 256)
x = torch.Tensor(x).cuda()
x = encode(x)
y = ((x + 1.0) / 2)
z = resize_fn(y[0].cpu())
return z
return (pipeline, spaces.Box((- 1), 1, output_shape, dtype))
return _transform_thunk
|
def cross_modal_transform_collated(eval_to_get_net, output_shape=(3, 84, 84), dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
output_size = output_shape[(- 1)]
output_shape = output_shape
net = eval_to_get_net
resize_fn = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Resize(output_size), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def encode(x):
with torch.no_grad():
return net(x)
def _transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
x = (torch.FloatTensor(x).cuda().permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
y = ((x + 1.0) / 2)
z = torch.stack([resize_fn(y_.cpu()) for y_ in y])
return z
return (pipeline, spaces.Box((- 1), 1, output_shape, dtype))
return _transform_thunk
|
def pixels_as_state(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
base_pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh)])
grayscale_pipeline = vision.transforms.Compose([vision.transforms.Grayscale(), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
rgb_pipeline = vision.transforms.Compose([vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def pipeline(x):
base = base_pipeline(x)
rgb = rgb_pipeline(base)
gray = grayscale_pipeline(base)
n_rgb = (output_size[0] // 3)
n_gray = (output_size[0] % 3)
return torch.cat((([rgb] * n_rgb) + ([gray] * n_gray)))
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def taskonomy_features_transform_collated(task_path, encoder_type='taskonomy', dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
_pixels_as_state_thunk = pixels_as_state((8, 16, 16))
if ((task_path != 'pixels_as_state') and (task_path != 'blind')):
if (encoder_type == 'taskonomy'):
net = TaskonomyEncoder(normalize_outputs=False)
if (task_path != 'None'):
checkpoint = torch.load(task_path)
if any([isinstance(v, nn.Module) for v in checkpoint.values()]):
net = [v for v in checkpoint.values() if isinstance(v, nn.Module)][0]
elif ('state_dict' in checkpoint.keys()):
net.load_state_dict(checkpoint['state_dict'])
else:
assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
net = net.cuda()
net.eval()
def encode(x):
if ((task_path == 'pixels_as_state') or (task_path == 'blind')):
return x
with torch.no_grad():
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
(pixels_as_state, _) = _pixels_as_state_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
return x
def pixels_as_state_pipeline(x):
return pixels_as_state(x).cpu()
def blind_pipeline(x):
batch_size = x.shape[0]
return torch.zeros((batch_size, 8, 16, 16))
if (task_path == 'blind'):
return (blind_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
elif (task_path == 'pixels_as_state'):
return (pixels_as_state_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
else:
return (pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def taskonomy_features_transforms_collated(task_paths, encoder_type='taskonomy', dtype=np.float32):
num_tasks = 0
if ((task_paths != 'pixels_as_state') and (task_paths != 'blind')):
task_path_list = [tp.strip() for tp in task_paths.split(',')]
num_tasks = len(task_path_list)
assert (num_tasks > 0), 'at least need one path'
if (encoder_type == 'taskonomy'):
nets = [TaskonomyEncoder(normalize_outputs=False) for _ in range(num_tasks)]
else:
assert False, f'do not recongize encoder type {encoder_type}'
for (i, task_path) in enumerate(task_path_list):
checkpoint = torch.load(task_path)
net_in_ckpt = [v for v in checkpoint.values() if isinstance(v, nn.Module)]
if (len(net_in_ckpt) > 0):
nets[i] = net_in_ckpt[0]
elif ('state_dict' in checkpoint.keys()):
nets[i].load_state_dict(checkpoint['state_dict'])
else:
assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
nets[i] = nets[i].cuda()
nets[i].eval()
def encode(x):
if ((task_paths == 'pixels_as_state') or (task_paths == 'blind')):
return x
with torch.no_grad():
feats = []
for net in nets:
feats.append(net(x))
return torch.cat(feats, dim=1)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
return x
def pixels_as_state_pipeline(x):
return pixels_as_state(x).cpu()
if (task_path == 'pixels_as_state'):
return (pixels_as_state_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
else:
return (pipeline, spaces.Box((- 1), 1, ((8 * num_tasks), 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def image_to_input_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
assert (x.shape[2] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def map_pool_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
assert (x.shape[2] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2, 3))
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def map_pool(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
assert (x.shape[0] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x.unsqueeze_(0)
x = (x.permute(0, 3, 1, 2) / 255.0)
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2, 3))
x = ((2.0 * x) - 1.0)
x.squeeze_(0)
return x.cpu()
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
class Pipeline(object):
def __init__(self, env_or_pipeline):
pass
def forward(self):
pass
|
def identity_transform():
def _thunk(obs_space):
return ((lambda x: x), obs_space)
return _thunk
|
def fill_like(output_size, fill_value=0.0, dtype=torch.float32):
def _thunk(obs_space):
tensor = torch.ones((1,), dtype=dtype)
def _process(x):
return tensor.new_full(output_size, fill_value).numpy()
return (_process, spaces.Box((- 1), 1, output_size, tensor.numpy().dtype))
return _thunk
|
def rescale_centercrop_resize(output_size, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n\n obs_space: Should be form WxHxC\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
assert (obs_min_wh > 10), 'are you sure your data format is correct? is your min wh really < 10?'
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def rescale_centercrop_resize_collated(output_size, dtype=np.float32):
" rescale_centercrop_resize\n\n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n\n obs_space: Should be form WxHxC\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def iterative_pipeline(pipeline):
def runner(x):
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return runner
return (iterative_pipeline(pipeline), spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk
|
def rescale():
" Rescales observations to a new values\n\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _rescale_thunk(obs_space):
obs_shape = obs_space.shape
np_pipeline = vision.transforms.Compose([vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def pipeline(im):
if isinstance(im, np.ndarray):
return np_pipeline(im)
else:
return RESCALE_0_255_NEG1_POS1(im)
return (pipeline, spaces.Box((- 1.0), 1.0, obs_space.shape, np.float32))
return _rescale_thunk
|
def grayscale_rescale():
" Rescales observations to a new values\n\n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
def _grayscale_rescale_thunk(obs_space):
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Grayscale(), vision.transforms.ToTensor(), vision.transforms.Normalize([0.5], [0.5])])
obs_shape = obs_space.shape
return (pipeline, spaces.Box((- 1.0), 1.0, (1, obs_shape[0], obs_shape[1]), dtype=np.float32))
return _grayscale_rescale_thunk
|
def cross_modal_transform(eval_to_get_net, output_shape=(3, 84, 84), dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
output_size = output_shape[(- 1)]
output_shape = output_shape
net = eval_to_get_net
resize_fn = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.Resize(output_size), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
def encode(x):
with torch.no_grad():
return net(x)
def _transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = encode(x)
y = ((x + 1.0) / 2)
z = torch.stack([resize_fn(y_.cpu()) for y_ in y])
return z
return (pipeline, spaces.Box((- 1), 1, output_shape, dtype))
return _transform_thunk
|
def image_to_input_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
assert (x.shape[2] == x.shape[1]), 'Input image must be square, of the form: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def map_pool(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
assert (x.shape[0] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x.unsqueeze_(0)
x = (x.permute(0, 3, 1, 2) / 255.0)
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2, 3))
x = ((2.0 * x) - 1.0)
x.squeeze_(0)
return x.cpu()
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def map_pool_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
assert (x.shape[2] == x.shape[1]), 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
with torch.no_grad():
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2, 3))
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk
|
def taskonomy_features_transform(task_path, model='TaskonomyEncoder', dtype=np.float32, device=None, normalize_outputs=False):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
net = eval(model)(normalize_outputs=normalize_outputs, eval_only=True).cuda(device=device)
net.eval()
checkpoint = torch.load(task_path)
net.load_state_dict(checkpoint['state_dict'])
print(f'Loaded taskonomy transform with {model} from {task_path}')
def encode(x):
with torch.no_grad():
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
x = torch.Tensor(x).cuda(device=device)
x = encode(x)
return x
return (pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def _load_encoder(encoder_path):
if (('student' in encoder_path) or ('distil' in encoder_path)):
net = FCN5(normalize_outputs=True, eval_only=True, train=False)
else:
net = TaskonomyEncoder()
net.eval()
checkpoint = torch.load(encoder_path)
state_dict = checkpoint['state_dict']
try:
net.load_state_dict(state_dict, strict=True)
except RuntimeError as e:
incompatible = net.load_state_dict(state_dict, strict=False)
if (incompatible is None):
warnings.warn('load_state_dict not showing missing/unexpected keys!')
else:
print(f'''{e}, reloaded with strict=False
Num matches: {len([k for k in net.state_dict() if (k in state_dict)])}
Num missing: {len(incompatible.missing_keys)}
Num unexpected: {len(incompatible.unexpected_keys)}''')
for p in net.parameters():
p.requires_grad = False
return net
|
def _load_encoders_seq(encoder_paths):
experts = []
for encoder_path in encoder_paths:
try:
encoder = _load_encoder(encoder_path)
experts.append(encoder)
except RuntimeError as e:
warnings.warn(f'Unable to load {encoder_path} due to {e}')
raise e
experts = [e.cuda() for e in experts]
return experts
|
def _load_encoders_parallel(encoder_paths, n_processes=None):
n_processes = (len(encoder_paths) if (n_processes is None) else min(len(encoder_paths), n_processes))
n_parallel = min(multiprocessing.cpu_count(), n_processes)
pool = multiprocessing.Pool(min(n_parallel, n_processes))
experts = pool.map(_load_encoder, encoder_paths)
pool.close()
pool.join()
experts = [e.cuda() for e in experts]
return experts
|
def taskonomy_multi_features_transform(task_paths, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
nets = _load_encoders_seq(task_paths)
def encode(x):
with torch.no_grad():
return torch.cat([net(x) for net in nets], dim=1)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
x = torch.Tensor(x).cuda()
x = encode(x)
return x.cpu()
return (pipeline, spaces.Box((- 1), 1, ((8 * len(nets)), 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def taskonomy_features_transform_collated(task_path, dtype=np.float32):
" rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n "
net = TaskonomyEncoder().cuda()
net.eval()
checkpoint = torch.load(task_path)
net.load_state_dict(checkpoint['state_dict'])
def encode(x):
with torch.no_grad():
x = torch.Tensor(x).cuda()
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
pipeline = (lambda x: encode(x).cpu())
return (pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
def taskonomy_features_transforms_collated(task_paths, encoder_type='taskonomy', dtype=np.float32):
num_tasks = 0
if ((task_paths != 'pixels_as_state') and (task_paths != 'blind')):
task_path_list = [tp.strip() for tp in task_paths.split(',')]
num_tasks = len(task_path_list)
assert (num_tasks > 0), 'at least need one path'
if (encoder_type == 'taskonomy'):
nets = [TaskonomyEncoder(normalize_outputs=False) for _ in range(num_tasks)]
else:
assert False, f'do not recongize encoder type {encoder_type}'
for (i, task_path) in enumerate(task_path_list):
checkpoint = torch.load(task_path)
net_in_ckpt = [v for v in checkpoint.values() if isinstance(v, nn.Module)]
if (len(net_in_ckpt) > 0):
nets[i] = net_in_ckpt[0]
elif ('state_dict' in checkpoint.keys()):
nets[i].load_state_dict(checkpoint['state_dict'])
else:
assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
nets[i] = nets[i].cuda()
nets[i].eval()
def encode(x):
if ((task_paths == 'pixels_as_state') or (task_paths == 'blind')):
return x
with torch.no_grad():
feats = []
for net in nets:
feats.append(net(x))
return torch.cat(feats, dim=1)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
return x
def pixels_as_state_pipeline(x):
return pixels_as_state(x).cpu()
if (task_path == 'pixels_as_state'):
return (pixels_as_state_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
else:
return (pipeline, spaces.Box((- 1), 1, ((8 * num_tasks), 16, 16), dtype))
return _taskonomy_features_transform_thunk
|
class A2C_ACKTR(object):
def __init__(self, actor_critic, value_loss_coef, entropy_coef, lr=None, eps=None, alpha=None, max_grad_norm=None, acktr=False):
self.actor_critic = actor_critic
self.acktr = acktr
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
if acktr:
self.optimizer = KFACOptimizer(actor_critic)
else:
self.optimizer = optim.RMSprop(actor_critic.parameters(), lr, eps=eps, alpha=alpha)
def update(self, rollouts):
obs_shape = rollouts.observations.size()[2:]
action_shape = rollouts.actions.size()[(- 1)]
(num_steps, num_processes, _) = rollouts.rewards.size()
(values, action_log_probs, dist_entropy, states) = self.actor_critic.evaluate_actions(rollouts.observations[:(- 1)].view((- 1), *obs_shape), rollouts.states[0].view((- 1), self.actor_critic.state_size), rollouts.masks[:(- 1)].view((- 1), 1), rollouts.actions.view((- 1), action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = (rollouts.returns[:(- 1)] - values)
value_loss = advantages.pow(2).mean()
action_loss = (- (advantages.detach() * action_log_probs).mean())
if (self.acktr and ((self.optimizer.steps % self.optimizer.Ts) == 0)):
self.actor_critic.zero_grad()
pg_fisher_loss = (- action_log_probs.mean())
value_noise = torch.randn(values.size())
if values.is_cuda:
value_noise = value_noise.cuda()
sample_values = (values + value_noise)
vf_fisher_loss = (- (values - sample_values.detach()).pow(2).mean())
fisher_loss = (pg_fisher_loss + vf_fisher_loss)
self.optimizer.acc_stats = True
fisher_loss.backward(retain_graph=True)
self.optimizer.acc_stats = False
self.optimizer.zero_grad()
(((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward()
if (self.acktr == False):
nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
return (value_loss.item(), action_loss.item(), dist_entropy.item())
|
class QLearner(nn.Module):
def __init__(self, actor_network, target_network, action_dim, batch_size, lr, eps, gamma, copy_frequency, start_schedule, schedule_timesteps, initial_p, final_p):
super(QLearner, self).__init__()
self.actor_network = actor_network
self.target_network = target_network
self.learning_schedule = LearningSchedule(start_schedule, schedule_timesteps, initial_p, final_p)
self.beta_schedule = LearningSchedule(start_schedule, schedule_timesteps, 0.4, 1.0)
self.action_dim = action_dim
self.copy_frequency = copy_frequency
self.batch_size = batch_size
self.gamma = gamma
self.optimizer = optim.Adam(actor_network.parameters(), lr=lr, eps=eps)
self.step = 0
def cuda(self):
self.actor_network = self.actor_network.cuda()
self.target_network = self.target_network.cuda()
def act(self, observation, greedy=False):
self.step += 1
if ((self.step % self.copy_frequency) == 1):
self.target_network.load_state_dict(self.actor_network.state_dict())
if ((random.random() > self.learning_schedule.value(self.step)) or greedy):
with torch.no_grad():
return self.actor_network(observation).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(self.action_dim)]])
def update(self, rollouts):
loss_epoch = 0
(observations, actions, rewards, masks, next_observations, weights, indices) = rollouts.sample(self.batch_size, beta=self.beta_schedule.value(self.step))
next_state_values = self.target_network(next_observations).detach().max(1)[0].unsqueeze(1)
state_action_values = self.actor_network(observations).gather(1, actions)
targets = (rewards + ((self.gamma * masks) * next_state_values))
if rollouts.use_priority:
with torch.no_grad():
td_errors = (torch.abs((targets - state_action_values)).detach() + 1e-06)
rollouts.update_priorities(indices, td_errors)
loss = torch.sum((weights * ((targets - state_action_values) ** 2)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_epoch += loss.item()
return loss_epoch
def get_epsilon(self):
return self.learning_schedule.value(self.step)
|
class LearningSchedule(object):
def __init__(self, start_schedule, schedule_timesteps, initial_p=1.0, final_p=0.05):
self.initial_p = initial_p
self.final_p = final_p
self.schedule_timesteps = schedule_timesteps
self.start_schedule = start_schedule
def value(self, t):
fraction = min((max(0.0, float((t - self.start_schedule))) / self.schedule_timesteps), 1.0)
return (self.initial_p + (fraction * (self.final_p - self.initial_p)))
|
def _extract_patches(x, kernel_size, stride, padding):
if ((padding[0] + padding[1]) > 0):
x = F.pad(x, (padding[1], padding[1], padding[0], padding[0])).data
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(x.size(0), x.size(1), x.size(2), ((x.size(3) * x.size(4)) * x.size(5)))
return x
|
def compute_cov_a(a, classname, layer_info, fast_cnn):
batch_size = a.size(0)
if (classname == 'Conv2d'):
if fast_cnn:
a = _extract_patches(a, *layer_info)
a = a.view(a.size(0), (- 1), a.size((- 1)))
a = a.mean(1)
else:
a = _extract_patches(a, *layer_info)
a = a.view((- 1), a.size((- 1))).div_(a.size(1)).div_(a.size(2))
elif (classname == 'AddBias'):
is_cuda = a.is_cuda
a = torch.ones(a.size(0), 1)
if is_cuda:
a = a.cuda()
return (a.t() @ (a / batch_size))
|
def compute_cov_g(g, classname, layer_info, fast_cnn):
batch_size = g.size(0)
if (classname == 'Conv2d'):
if fast_cnn:
g = g.view(g.size(0), g.size(1), (- 1))
g = g.sum((- 1))
else:
g = g.transpose(1, 2).transpose(2, 3).contiguous()
g = g.view((- 1), g.size((- 1))).mul_(g.size(1)).mul_(g.size(2))
elif (classname == 'AddBias'):
g = g.view(g.size(0), g.size(1), (- 1))
g = g.sum((- 1))
g_ = (g * batch_size)
return (g_.t() @ (g_ / g.size(0)))
|
def update_running_stat(aa, m_aa, momentum):
m_aa *= (momentum / (1 - momentum))
m_aa += aa
m_aa *= (1 - momentum)
|
class SplitBias(nn.Module):
def __init__(self, module):
super(SplitBias, self).__init__()
self.module = module
self.add_bias = AddBias(module.bias.data)
self.module.bias = None
def forward(self, input):
x = self.module(input)
x = self.add_bias(x)
return x
|
class KFACOptimizer(optim.Optimizer):
def __init__(self, model, lr=0.25, momentum=0.9, stat_decay=0.99, kl_clip=0.001, damping=0.01, weight_decay=0, fast_cnn=False, Ts=1, Tf=10):
defaults = dict()
def split_bias(module):
for (mname, child) in module.named_children():
if (hasattr(child, 'bias') and (child.bias is not None)):
module._modules[mname] = SplitBias(child)
else:
split_bias(child)
split_bias(model)
super(KFACOptimizer, self).__init__(model.parameters(), defaults)
self.known_modules = {'Linear', 'Conv2d', 'AddBias'}
self.modules = []
self.grad_outputs = {}
self.model = model
self._prepare_model()
self.steps = 0
(self.m_aa, self.m_gg) = ({}, {})
(self.Q_a, self.Q_g) = ({}, {})
(self.d_a, self.d_g) = ({}, {})
self.momentum = momentum
self.stat_decay = stat_decay
self.lr = lr
self.kl_clip = kl_clip
self.damping = damping
self.weight_decay = weight_decay
self.fast_cnn = fast_cnn
self.Ts = Ts
self.Tf = Tf
self.optim = optim.SGD(model.parameters(), lr=(self.lr * (1 - self.momentum)), momentum=self.momentum)
def _save_input(self, module, input):
if (torch.is_grad_enabled() and ((self.steps % self.Ts) == 0)):
classname = module.__class__.__name__
layer_info = None
if (classname == 'Conv2d'):
layer_info = (module.kernel_size, module.stride, module.padding)
aa = compute_cov_a(input[0].data, classname, layer_info, self.fast_cnn)
if (self.steps == 0):
self.m_aa[module] = aa.clone()
update_running_stat(aa, self.m_aa[module], self.stat_decay)
def _save_grad_output(self, module, grad_input, grad_output):
if self.acc_stats:
classname = module.__class__.__name__
layer_info = None
if (classname == 'Conv2d'):
layer_info = (module.kernel_size, module.stride, module.padding)
gg = compute_cov_g(grad_output[0].data, classname, layer_info, self.fast_cnn)
if (self.steps == 0):
self.m_gg[module] = gg.clone()
update_running_stat(gg, self.m_gg[module], self.stat_decay)
def _prepare_model(self):
for module in self.model.modules():
classname = module.__class__.__name__
if (classname in self.known_modules):
assert (not ((classname in ['Linear', 'Conv2d']) and (module.bias is not None))), 'You must have a bias as a separate layer'
self.modules.append(module)
module.register_forward_pre_hook(self._save_input)
module.register_backward_hook(self._save_grad_output)
def step(self):
if (self.weight_decay > 0):
for p in self.model.parameters():
p.grad.data.add_(self.weight_decay, p.data)
updates = {}
for (i, m) in enumerate(self.modules):
assert (len(list(m.parameters())) == 1), 'Can handle only one parameter at the moment'
classname = m.__class__.__name__
p = next(m.parameters())
la = (self.damping + self.weight_decay)
if ((self.steps % self.Tf) == 0):
(self.d_a[m], self.Q_a[m]) = torch.symeig(self.m_aa[m], eigenvectors=True)
(self.d_g[m], self.Q_g[m]) = torch.symeig(self.m_gg[m], eigenvectors=True)
self.d_a[m].mul_((self.d_a[m] > 1e-06).float())
self.d_g[m].mul_((self.d_g[m] > 1e-06).float())
if (classname == 'Conv2d'):
p_grad_mat = p.grad.data.view(p.grad.data.size(0), (- 1))
else:
p_grad_mat = p.grad.data
v1 = ((self.Q_g[m].t() @ p_grad_mat) @ self.Q_a[m])
v2 = (v1 / ((self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0)) + la))
v = ((self.Q_g[m] @ v2) @ self.Q_a[m].t())
v = v.view(p.grad.data.size())
updates[p] = v
vg_sum = 0
for p in self.model.parameters():
v = updates[p]
vg_sum += (((v * p.grad.data) * self.lr) * self.lr).sum()
nu = min(1, math.sqrt((self.kl_clip / vg_sum)))
for p in self.model.parameters():
v = updates[p]
p.grad.data.copy_(v)
p.grad.data.mul_(nu)
self.optim.step()
self.steps += 1
|
class PPO(object):
def __init__(self, actor_critic, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, lr=None, eps=None, max_grad_norm=None, amsgrad=True, weight_decay=0.0):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
self.last_grad_norm = None
def update(self, rollouts):
advantages = (rollouts.returns[:(- 1)] - rollouts.value_preds[:(- 1)])
advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05))
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
max_importance_weight_epoch = 0
for e in range(self.ppo_epoch):
if hasattr(self.actor_critic.base, 'gru'):
data_generator = rollouts.recurrent_generator(advantages, self.num_mini_batch)
else:
data_generator = rollouts.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
(observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
(values, action_log_probs, dist_entropy, states) = self.actor_critic.evaluate_actions(observations_batch, states_batch, masks_batch, actions_batch)
ratio = torch.exp((action_log_probs - old_action_log_probs_batch))
surr1 = (ratio * adv_targ)
surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ)
action_loss = (- torch.min(surr1, surr2).mean())
value_loss = F.mse_loss(values, return_batch)
self.optimizer.zero_grad()
(((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward()
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
num_updates = (self.ppo_epoch * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch, max_importance_weight_epoch, {})
|
class PPOCuriosity(object):
def __init__(self, actor_critic, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, optimizer=None, lr=None, eps=None, max_grad_norm=None, amsgrad=True, weight_decay=0.0):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.forward_loss_coef = 0.2
self.inverse_loss_coef = 0.8
self.curiosity_coef = 0.2
self.original_task_reward_proportion = 1.0
self.max_grad_norm = max_grad_norm
self.optimizer = optimizer
if (self.optimizer is None):
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
self.last_grad_norm = None
def update(self, rollouts):
advantages = ((rollouts.returns * self.original_task_reward_proportion) - rollouts.value_preds)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
max_importance_weight_epoch = 0
self.forward_loss_epoch = 0
self.inverse_loss_epoch = 0
for e in range(self.ppo_epoch):
if hasattr(self.actor_critic.base, 'gru'):
data_generator = rollouts.recurrent_generator(advantages, self.num_mini_batch)
raise NotImplementedError('PPOCuriosity has not implemented for recurrent networks because masking is undefined')
else:
data_generator = rollouts.feed_forward_generator_with_next_state(advantages, self.num_mini_batch)
for sample in data_generator:
(observations_batch, next_observations_batch, rnn_history_state, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
(values, action_log_probs, dist_entropy, next_rnn_history_state, state_features) = self.actor_critic.evaluate_actions(observations_batch, rnn_history_state, masks_batch, actions_batch)
(value, next_state_features, _) = self.actor_critic.base(next_observations_batch, next_rnn_history_state, masks_batch)
pred_action = self.actor_critic.base.inverse_model(state_features.detach(), next_state_features)
self.inverse_loss = F.cross_entropy(pred_action, actions_batch.squeeze(1))
one_hot_actions = torch.zeros((actions_batch.shape[0], self.actor_critic.dist.num_outputs), device=actions_batch.device)
one_hot_actions.scatter_(1, actions_batch, 1.0)
pred_next_state = self.actor_critic.base.forward_model(state_features.detach(), one_hot_actions)
self.forward_loss = F.mse_loss(pred_next_state, next_state_features.detach())
curiosity_bonus = (((1.0 - self.original_task_reward_proportion) * self.curiosity_coef) * self.forward_loss)
return_batch += curiosity_bonus
adv_targ += curiosity_bonus
adv_targ = ((adv_targ - adv_targ.mean()) / (adv_targ.std() + 1e-05))
ratio = torch.exp((action_log_probs - old_action_log_probs_batch))
clipped_ratio = torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param))
surr1 = (ratio * adv_targ)
surr2 = (clipped_ratio * adv_targ)
self.action_loss = (- torch.min(surr1, surr2).mean())
self.value_loss = F.mse_loss(values, return_batch)
self.dist_entropy = dist_entropy
self.optimizer.zero_grad()
self.get_loss().backward()
nn.utils.clip_grad_norm_(self.forward_loss.parameters(), self.max_grad_norm)
nn.utils.clip_grad_norm_(self.inverse_loss.parameters(), self.max_grad_norm)
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += self.value_loss.item()
action_loss_epoch += self.action_loss.item()
dist_entropy_epoch += self.dist_entropy.item()
self.forward_loss_epoch += self.forward_loss.item()
self.inverse_loss_epoch += self.inverse_loss.item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
num_updates = (self.ppo_epoch * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
self.forward_loss_epoch /= num_updates
self.inverse_loss_epoch /= num_updates
self.last_update_max_importance_weight = max_importance_weight_epoch
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch, {})
def get_loss(self):
return (((((self.value_loss * self.value_loss_coef) + self.action_loss) - (self.dist_entropy * self.entropy_coef)) + (self.forward_loss * self.forward_loss_coef)) + (self.inverse_loss * self.inverse_loss_coef))
|
class PPOReplayCuriosity(object):
def __init__(self, actor_critic, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, on_policy_epoch, off_policy_epoch, lr=None, eps=None, max_grad_norm=None, amsgrad=True, weight_decay=0.0, curiosity_reward_coef=0.1, forward_loss_coef=0.2, inverse_loss_coef=0.8):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.on_policy_epoch = on_policy_epoch
self.off_policy_epoch = off_policy_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.forward_loss_coef = forward_loss_coef
self.inverse_loss_coef = inverse_loss_coef
self.curiosity_reward_coef = curiosity_reward_coef
self.max_grad_norm = max_grad_norm
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
self.last_grad_norm = None
def update(self, rollouts):
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
self.forward_loss_epoch = 0
self.inverse_loss_epoch = 0
max_importance_weight_epoch = 0
on_policy = ([0] * self.on_policy_epoch)
off_policy = ([1] * self.off_policy_epoch)
epochs = (on_policy + off_policy)
random.shuffle(epochs)
for e in epochs:
if (e == 0):
data_generator = rollouts.feed_forward_generator_with_next_state(None, self.num_mini_batch, on_policy=True)
else:
data_generator = rollouts.feed_forward_generator_with_next_state(None, self.num_mini_batch, on_policy=False)
for sample in data_generator:
(observations_batch, next_observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
actions_batch_long = actions_batch.type(torch.cuda.LongTensor)
(values, action_log_probs, dist_entropy, next_states_batch) = self.actor_critic.evaluate_actions(observations_batch, states_batch, masks_batch, actions_batch)
state_feats = self.actor_critic.base.perception_unit(observations_batch)
next_state_feats = self.actor_critic.base.perception_unit(next_observations_batch)
pred_action = self.actor_critic.base.inverse_model(state_feats, next_state_feats)
self.inverse_loss = F.cross_entropy(pred_action, actions_batch_long.squeeze(1))
one_hot_actions = torch.zeros((actions_batch.shape[0], self.actor_critic.dist.num_outputs), device=actions_batch.device)
one_hot_actions.scatter_(1, actions_batch_long, 1.0)
pred_next_state = self.actor_critic.base.forward_model(state_feats, one_hot_actions)
self.forward_loss = F.mse_loss(pred_next_state, next_state_feats)
curiosity_bonus = (self.curiosity_reward_coef * self.forward_loss)
adv_targ += curiosity_bonus.detach()
adv_targ = ((adv_targ - adv_targ.mean()) / (adv_targ.std() + 1e-05))
ratio = torch.exp((action_log_probs - old_action_log_probs_batch))
surr1 = (ratio * adv_targ)
surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ)
self.action_loss = (- torch.min(surr1, surr2).mean())
self.value_loss = F.mse_loss(values, return_batch)
self.dist_entropy = dist_entropy
self.optimizer.zero_grad()
self.get_loss().backward()
nn.utils.clip_grad_norm_(self.actor_critic.base.forward_model.parameters(), self.max_grad_norm)
nn.utils.clip_grad_norm_(self.actor_critic.base.inverse_model.parameters(), self.max_grad_norm)
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += self.value_loss.item()
action_loss_epoch += self.action_loss.item()
dist_entropy_epoch += dist_entropy.item()
self.forward_loss_epoch += self.forward_loss.item()
self.inverse_loss_epoch += self.inverse_loss.item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
self.last_update_max_importance_weight = max_importance_weight_epoch
num_updates = (self.ppo_epoch * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch, max_importance_weight_epoch, {})
def get_loss(self):
return (((((self.value_loss * self.value_loss_coef) + self.action_loss) - (self.dist_entropy * self.entropy_coef)) + (self.forward_loss * self.forward_loss_coef)) + (self.inverse_loss * self.inverse_loss_coef))
|
class PPOReplay(object):
def __init__(self, actor_critic: BasePolicy, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, on_policy_epoch, off_policy_epoch, num_steps, n_frames, lr=None, eps=None, max_grad_norm=None, amsgrad=True, weight_decay=0.0, gpu_devices=None, loss_kwargs={}, cache_kwargs={}, optimizer_class='optim.Adam', optimizer_kwargs={}):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.on_policy_epoch = on_policy_epoch
self.off_policy_epoch = off_policy_epoch
self.num_mini_batch = num_mini_batch
self.num_steps = num_steps
self.n_frames = n_frames
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.loss_kwargs = loss_kwargs
self.loss_kwargs['intrinsic_loss_coefs'] = (self.loss_kwargs['intrinsic_loss_coefs'] if ('intrinsic_loss_coefs' in loss_kwargs) else [])
self.loss_kwargs['intrinsic_loss_types'] = (self.loss_kwargs['intrinsic_loss_types'] if ('intrinsic_loss_types' in loss_kwargs) else [])
assert (len(loss_kwargs['intrinsic_loss_coefs']) == len(loss_kwargs['intrinsic_loss_types'])), 'must have same number of losses as loss_coefs'
self.max_grad_norm = max_grad_norm
self.optimizer = eval(optimizer_class)([{'params': [param for (name, param) in actor_critic.named_parameters() if ('alpha' in name)], 'weight_decay': 0.0}, {'params': [param for (name, param) in actor_critic.named_parameters() if ('alpha' not in name)]}], lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, **optimizer_kwargs)
self.last_grad_norm = None
self.gpu_devices = gpu_devices
def update(self, rollouts):
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
max_importance_weight_epoch = 0
on_policy = ([0] * self.on_policy_epoch)
off_policy = ([1] * self.off_policy_epoch)
epochs = (on_policy + off_policy)
random.shuffle(epochs)
info = {}
yield_cuda = (not ((torch.cuda.device_count() > 1) and ((self.gpu_devices is None) or (len(self.gpu_devices) > 1))))
for e in epochs:
if (e == 0):
data_generator = rollouts.feed_forward_generator(None, self.num_mini_batch, on_policy=True, device=self.gpu_devices[0], yield_cuda=yield_cuda)
else:
data_generator = rollouts.feed_forward_generator(None, self.num_mini_batch, on_policy=False, device=self.gpu_devices[0], yield_cuda=yield_cuda)
for sample in data_generator:
(observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
cache = {}
(values, action_log_probs, dist_entropy, states) = self.actor_critic.evaluate_actions(observations_batch, states_batch, masks_batch, actions_batch, cache)
intrinsic_loss_dict = self.actor_critic.compute_intrinsic_losses(self.loss_kwargs, observations_batch, states_batch, masks_batch, actions_batch, cache)
ratio = torch.exp((action_log_probs - old_action_log_probs_batch.to(self.gpu_devices[0])))
surr1 = (ratio * adv_targ.to(self.gpu_devices[0]))
surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ.to(self.gpu_devices[0]))
action_loss = (- torch.min(surr1, surr2).mean())
value_loss = F.mse_loss(values, return_batch.to(self.gpu_devices[0]))
self.optimizer.zero_grad()
total_loss = (((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef))
for (iloss, iloss_coef) in zip(self.loss_kwargs['intrinsic_loss_types'], self.loss_kwargs['intrinsic_loss_coefs']):
total_loss += (intrinsic_loss_dict[iloss] * iloss_coef)
total_loss.backward()
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
for iloss in self.loss_kwargs['intrinsic_loss_types']:
if (iloss in info):
info[iloss] += intrinsic_loss_dict[iloss].item()
else:
info[iloss] = intrinsic_loss_dict[iloss].item()
for key in cache:
key_flat = torch.cat(cache[key]).view((- 1)).detach()
if (key in info):
info[key] = torch.cat((info[key], key_flat))
else:
info[key] = key_flat
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
num_updates = ((self.on_policy_epoch + self.off_policy_epoch) * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
for iloss in self.loss_kwargs['intrinsic_loss_types']:
info[iloss] /= num_updates
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch, max_importance_weight_epoch, info)
|
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
self.num_outputs = num_outputs
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), gain=0.01))
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
|
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(DiagGaussian, self).__init__()
self.num_outputs = num_outputs
init_ = (lambda m: init(m, init_normc_, (lambda x: nn.init.constant_(x, 0))))
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
|
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), (- 1))
|
class LearnerModel(nn.Module):
def __init__(self, num_inputs):
super().__init__()
@property
def state_size(self):
raise NotImplementedError('state_size not implemented in abstract class LearnerModel')
@property
def output_size(self):
raise NotImplementedError('output_size not implemented in abstract class LearnerModel')
def forward(self, inputs, states, masks):
raise NotImplementedError('forward not implemented in abstract class LearnerModel')
|
class CNNModel(nn.Module):
def __init__(self, num_inputs, use_gru, input_transforms=None):
super().__init__()
self.input_transforms = input_transforms
|
class CNNBase(nn.Module):
def __init__(self, num_inputs, use_gru):
super(CNNBase, self).__init__()
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu')))
self.main = nn.Sequential(init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(), init_(nn.Linear(((32 * 7) * 7), 512)), nn.ReLU())
if use_gru:
self.gru = nn.GRUCell(512, 512)
nn.init.orthogonal_(self.gru.weight_ih.data)
nn.init.orthogonal_(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0))))
self.critic_linear = init_(nn.Linear(512, 1))
self.train()
@property
def state_size(self):
if hasattr(self, 'gru'):
return 512
else:
return 1
@property
def output_size(self):
return 512
def forward(self, inputs, states, masks):
x = self.main(inputs)
if hasattr(self, 'gru'):
if (inputs.size(0) == states.size(0)):
x = states = self.gru(x, (states * masks))
else:
N = states.size(0)
T = int((x.size(0) / N))
x = x.view(T, N, x.size(1))
masks = masks.view(T, N, 1)
outputs = []
for i in range(T):
hx = states = self.gru(x[i], (states * masks[i]))
outputs.append(hx)
x = torch.stack(outputs, dim=0)
x = x.view((T * N), (- 1))
return (self.critic_linear(x), x, states)
|
class MLPBase(nn.Module):
def __init__(self, num_inputs):
super(MLPBase, self).__init__()
init_ = (lambda m: init(m, init_normc_, (lambda x: nn.init.constant_(x, 0))))
self.actor = nn.Sequential(init_(nn.Linear(num_inputs, 64)), nn.Tanh(), init_(nn.Linear(64, 64)), nn.Tanh())
self.critic = nn.Sequential(init_(nn.Linear(num_inputs, 64)), nn.Tanh(), init_(nn.Linear(64, 64)), nn.Tanh())
self.critic_linear = init_(nn.Linear(64, 1))
self.train()
@property
def state_size(self):
return 1
@property
def output_size(self):
return 64
def forward(self, inputs, states, masks):
hidden_critic = self.critic(inputs)
hidden_actor = self.actor(inputs)
return (self.critic_linear(hidden_critic), hidden_actor, states)
|
class PreprocessingTranforms(object):
def __init__(self, input_dims):
pass
def forward(self, batch):
pass
|
class SegmentTree():
def __init__(self, size):
self.index = 0
self.size = size
self.full = False
self.sum_tree = ([0] * ((2 * size) - 1))
self.data = ([None] * size)
self.max = 1
def _propagate(self, index, value):
parent = ((index - 1) // 2)
(left, right) = (((2 * parent) + 1), ((2 * parent) + 2))
self.sum_tree[parent] = (self.sum_tree[left] + self.sum_tree[right])
if (parent != 0):
self._propagate(parent, value)
def update(self, index, value):
self.sum_tree[index] = value
self._propagate(index, value)
self.max = max(value, self.max)
def append(self, data, value):
self.data[self.index] = data
self.update(((self.index + self.size) - 1), value)
self.index = ((self.index + 1) % self.size)
self.full = (self.full or (self.index == 0))
self.max = max(value, self.max)
def _retrieve(self, index, value):
(left, right) = (((2 * index) + 1), ((2 * index) + 2))
if (left >= len(self.sum_tree)):
return index
elif (value <= self.sum_tree[left]):
return self._retrieve(left, value)
else:
return self._retrieve(right, (value - self.sum_tree[left]))
def find(self, value):
index = self._retrieve(0, value)
data_index = ((index - self.size) + 1)
return (self.sum_tree[index], data_index, index)
def get(self, data_index):
return self.data[(data_index % self.size)]
def total(self):
return self.sum_tree[0]
|
class ReplayMemory():
def __init__(self, device, history_length, discount, multi_step, priority_weight, priority_exponent, capacity, blank_state):
self.device = device
self.capacity = capacity
self.history = history_length
self.discount = discount
self.n = multi_step
self.priority_weight = priority_weight
self.priority_exponent = priority_exponent
self.t = 0
self.transitions = SegmentTree(capacity)
self.keys = blank_state.keys()
self.blank_trans = Transition(0, blank_state, None, None, 0, False)
def append(self, state, action, action_log_probs, reward, terminal):
state = {k: state[k].peek()[(- 1)].to(dtype=torch.float32, device=torch.device('cpu')) for k in state}
self.transitions.append(Transition(self.t, state, action, action_log_probs, reward, (not terminal)), self.transitions.max)
self.t = (0 if terminal else (self.t + 1))
def _get_transition(self, idx):
transition = ([None] * (self.history + self.n))
transition[(self.history - 1)] = self.transitions.get(idx)
for t in range((self.history - 2), (- 1), (- 1)):
if (transition[(t + 1)].timestep == 0):
transition[t] = self.blank_trans
else:
transition[t] = self.transitions.get((((idx - self.history) + 1) + t))
for t in range(self.history, (self.history + self.n)):
if transition[(t - 1)].nonterminal:
transition[t] = self.transitions.get((((idx - self.history) + 1) + t))
else:
transition[t] = self.blank_trans
return transition
def _get_sample_from_segment(self, segment, i):
valid = False
while (not valid):
sample = random.uniform((i * segment), ((i + 1) * segment))
(prob, idx, tree_idx) = self.transitions.find(sample)
if ((((self.transitions.index - idx) % self.capacity) > self.n) and (((idx - self.transitions.index) % self.capacity) >= self.history) and (prob != 0)):
valid = True
transition = self._get_transition(idx)
state = {k: torch.stack([trans.state[k] for trans in transition[:self.history]]).to(dtype=torch.float32, device=self.device) for k in self.keys}
next_state = {k: torch.stack([trans.state[k] for trans in transition[self.n:(self.n + self.history)]]).to(dtype=torch.float32, device=self.device) for k in self.keys}
action = torch.tensor([transition[(self.history - 1)].action], dtype=torch.int64, device=self.device)
action_log_prob = torch.tensor([transition[(self.history - 1)].action_log_prob], dtype=torch.float32, device=self.device)
R = torch.tensor([sum((((self.discount ** n) * transition[((self.history + n) - 1)].reward) for n in range(self.n)))], dtype=torch.float32, device=self.device)
nonterminal = torch.tensor([transition[((self.history + self.n) - 1)].nonterminal], dtype=torch.float32, device=self.device)
return (prob, idx, tree_idx, state, action, action_log_prob, R, next_state, nonterminal)
def sample(self, batch_size):
p_total = self.transitions.total()
segment = (p_total / batch_size)
batch = [self._get_sample_from_segment(segment, i) for i in range(batch_size)]
(probs, idxs, tree_idxs, states, actions, action_log_probs, returns, next_states, nonterminals) = zip(*batch)
states = {k: torch.stack([state[k] for state in states]).squeeze_() for k in self.keys}
next_states = {k: torch.stack([state[k] for state in next_states]).squeeze_() for k in self.keys}
(actions, action_log_probs, returns, nonterminals) = (torch.cat(actions), torch.cat(action_log_probs), torch.cat(returns), torch.stack(nonterminals))
probs = (torch.tensor(probs, dtype=torch.float32, device=self.device) / p_total)
capacity = (self.capacity if self.transitions.full else self.transitions.index)
weights = ((capacity * probs) ** (- self.priority_weight))
weights = (weights / weights.max())
return (tree_idxs, states, actions, action_log_probs, returns, next_states, nonterminals, weights)
def update_priorities(self, idxs, priorities):
priorities.pow_(self.priority_exponent)
[self.transitions.update(idx, priority) for (idx, priority) in zip(idxs, priorities)]
def __iter__(self):
self.current_idx = 0
return self
def __next__(self):
if (self.current_idx == self.capacity):
raise StopIteration
state_stack = ([None] * self.history)
state_stack[(- 1)] = self.transitions.data[self.current_idx].state
prev_timestep = self.transitions.data[self.current_idx].timestep
for t in reversed(range((self.history - 1))):
if (prev_timestep == 0):
state_stack[t] = blank_trans.state
else:
state_stack[t] = self.transitions.data[(((self.current_idx + t) - self.history) + 1)].state
prev_timestep -= 1
state = torch.stack(state_stack, 0).to(dtype=torch.float32, device=self.device).div_(255)
self.current_idx += 1
return state
|
class RolloutSensorDictCuriosityReplayBuffer(object):
def __init__(self, num_steps, num_processes, obs_shape, action_space, state_size, actor_critic, use_gae, gamma, tau, memory_size=10000):
self.num_steps = num_steps
self.num_processes = num_processes
self.state_size = state_size
self.memory_size = memory_size
self.obs_shape = obs_shape
self.sensor_names = set(obs_shape.keys())
self.observations = SensorDict({k: torch.zeros(memory_size, num_processes, *ob_shape) for (k, ob_shape) in obs_shape.items()})
self.states = torch.zeros(memory_size, num_processes, state_size)
self.rewards = torch.zeros(memory_size, num_processes, 1)
self.value_preds = torch.zeros(memory_size, num_processes, 1)
self.returns = torch.zeros(memory_size, num_processes, 1)
self.action_log_probs = torch.zeros(memory_size, num_processes, 1)
self.actions = torch.zeros(memory_size, num_processes, 1)
self.masks = torch.ones(memory_size, num_processes, 1)
self.actor_critic = actor_critic
self.use_gae = use_gae
self.gamma = gamma
self.tau = tau
self.num_steps = num_steps
self.step = 0
self.memory_occupied = 0
def cuda(self):
self.observations = self.observations.apply((lambda k, v: v.cuda()))
self.states = self.states.cuda()
self.rewards = self.rewards.cuda()
self.value_preds = self.value_preds.cuda()
self.returns = self.returns.cuda()
self.action_log_probs = self.action_log_probs.cuda()
self.actions = self.actions.cuda()
self.masks = self.masks.cuda()
self.actor_critic = self.actor_critic.cuda()
def insert(self, current_obs, state, action, action_log_prob, value_pred, reward, mask):
next_step = ((self.step + 1) % self.memory_size)
modules = [self.observations[k][next_step].copy_ for k in self.observations]
inputs = tuple([(current_obs[k].peek(),) for k in self.observations])
nn.parallel.parallel_apply(modules, inputs)
self.states[next_step].copy_(state)
self.actions[self.step].copy_(action)
self.action_log_probs[self.step].copy_(action_log_prob)
self.value_preds[self.step].copy_(value_pred)
self.rewards[self.step].copy_(reward)
self.masks[next_step].copy_(mask)
self.step = ((self.step + 1) % self.memory_size)
if (self.memory_occupied < self.memory_size):
self.memory_occupied += 1
def get_current_observation(self):
return self.observations.at(self.step)
def get_current_state(self):
return self.states[self.step]
def get_current_mask(self):
return self.masks[self.step]
def after_update(self):
pass
def feed_forward_generator_with_next_state(self, advantages, num_mini_batch, on_policy=True):
if (on_policy or (self.memory_occupied < self.memory_size)):
stop_idx = (self.step - 1)
start_idx = (((self.step - self.num_steps) - 1) % self.memory_size)
else:
start_idx = (((self.step - 1) - np.random.randint((self.num_steps + 1), self.memory_size)) % self.memory_size)
stop_idx = (((start_idx + self.num_steps) - 1) % self.memory_size)
observations_sample = SensorDict({k: torch.zeros((self.num_steps + 1), self.num_processes, *ob_shape) for (k, ob_shape) in self.obs_shape.items()}).apply((lambda k, v: v.cuda()))
next_observations_sample = SensorDict({k: torch.zeros((self.num_steps + 1), self.num_processes, *ob_shape) for (k, ob_shape) in self.obs_shape.items()}).apply((lambda k, v: v.cuda()))
states_sample = torch.zeros((self.num_steps + 1), self.num_processes, self.state_size).cuda()
rewards_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
values_sample = torch.zeros((self.num_steps + 1), self.num_processes, 1).cuda()
returns_sample = torch.zeros((self.num_steps + 1), self.num_processes, 1).cuda()
action_log_probs_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
actions_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
masks_sample = torch.ones((self.num_steps + 1), self.num_processes, 1).cuda()
idx = start_idx
sample_idx = 0
while (idx != (stop_idx % self.memory_size)):
next_idx = ((idx + 1) % self.memory_size)
for k in self.observations:
observations_sample[k][sample_idx] = self.observations[k][idx]
for (j, not_done) in enumerate(self.masks[next_idx]):
if (not_done > 0.5):
next_observations_sample[k][sample_idx][j] = self.observations[k][next_idx][j]
else:
next_observations_sample[k][sample_idx][j] = self.observations[k][idx][j]
states_sample[sample_idx] = self.states[idx]
try:
rewards_sample[sample_idx] = self.rewards[idx]
except:
print(rewards_sample, self.rewards)
print(sample_idx, idx, next_idx, start_idx, stop_idx)
raise
action_log_probs_sample[sample_idx] = self.action_log_probs[idx]
actions_sample[sample_idx] = self.actions[idx]
masks_sample[sample_idx] = self.masks[idx]
with torch.no_grad():
next_value = self.actor_critic.get_value(self.observations.at(idx), self.states[idx], self.masks[idx])
values_sample[sample_idx] = self.actor_critic.get_value(self.observations.at(idx), self.states[idx], self.masks[idx])
idx = next_idx
sample_idx += 1
with torch.no_grad():
next_value = self.actor_critic.get_value(self.observations.at(stop_idx), self.states[stop_idx], self.masks[stop_idx])
if self.use_gae:
values_sample[(- 1)] = next_value
gae = 0
for step in reversed(range(rewards_sample.size(0))):
delta = ((rewards_sample[step] + ((self.gamma * values_sample[(step + 1)]) * masks_sample[(step + 1)])) - values_sample[step])
gae = (delta + (((self.gamma * self.tau) * masks_sample[(step + 1)]) * gae))
returns_sample[step] = (gae + values_sample[step])
else:
returns[(- 1)] = next_value
for step in reversed(range(self.rewards.size(0))):
returns_sample[step] = (((returns_sample[(step + 1)] * self.gamma) * masks_batch[(step + 1)]) + rewards_sample[step])
mini_batch_size = (self.num_steps // num_mini_batch)
observations_batch = {}
next_observations_batch = {}
sampler = BatchSampler(SubsetRandomSampler(range(self.num_steps)), mini_batch_size, drop_last=False)
advantages = (returns_sample[:(- 1)] - values_sample[:(- 1)])
for indices in sampler:
subsequent_indices = [(i + 1) for i in indices]
for (k, sensor_ob) in observations_sample.items():
observations_batch[k] = sensor_ob[:(- 1)].view((- 1), *sensor_ob.size()[2:])[indices]
next_observations_batch[k] = next_observations_sample[k][:(- 1)].view((- 1), *sensor_ob.size()[2:])[indices]
states_batch = states_sample[:(- 1)].view((- 1), states_sample.size((- 1)))[indices]
actions_batch = actions_sample.view((- 1), actions_sample.size((- 1)))[indices]
return_batch = returns_sample[:(- 1)].view((- 1), 1)[indices]
masks_batch = masks_sample[:(- 1)].view((- 1), 1)[indices]
old_action_log_probs_batch = action_log_probs_sample.view((- 1), 1)[indices]
adv_targ = advantages.view((- 1), 1)[indices]
(yield (observations_batch, next_observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ))
def feed_forward_generator(self, advantages, num_mini_batch, on_policy=True):
if (on_policy or (self.memory_occupied < self.memory_size)):
stop_idx = self.step
start_idx = ((self.step - self.num_steps) % self.memory_size)
else:
start_idx = ((self.step - np.random.randint((self.num_steps + 1), self.memory_size)) % self.memory_size)
stop_idx = ((start_idx + self.num_steps) % self.memory_size)
observations_sample = SensorDict({k: torch.zeros((self.num_steps + 1), self.num_processes, *ob_shape) for (k, ob_shape) in self.obs_shape.items()}).apply((lambda k, v: v.cuda()))
states_sample = torch.zeros((self.num_steps + 1), self.num_processes, self.state_size).cuda()
rewards_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
values_sample = torch.zeros((self.num_steps + 1), self.num_processes, 1).cuda()
returns_sample = torch.zeros((self.num_steps + 1), self.num_processes, 1).cuda()
action_log_probs_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
actions_sample = torch.zeros(self.num_steps, self.num_processes, 1).cuda()
masks_sample = torch.ones((self.num_steps + 1), self.num_processes, 1).cuda()
idx = start_idx
sample_idx = 0
while (idx != stop_idx):
for k in self.observations:
observations_sample[k][sample_idx] = self.observations[k][idx]
states_sample[sample_idx] = self.states[idx]
rewards_sample[sample_idx] = self.rewards[idx]
action_log_probs_sample[sample_idx] = self.action_log_probs[idx]
actions_sample[sample_idx] = self.actions[idx]
masks_sample[sample_idx] = self.masks[idx]
with torch.no_grad():
next_value = self.actor_critic.get_value(self.observations.at(idx), self.states[idx], self.masks[idx])
values_sample[sample_idx] = self.actor_critic.get_value(self.observations.at(idx), self.states[idx], self.masks[idx])
idx = ((idx + 1) % self.memory_size)
sample_idx += 1
with torch.no_grad():
next_value = self.actor_critic.get_value(self.observations.at(stop_idx), self.states[stop_idx], self.masks[stop_idx])
if self.use_gae:
values_sample[(- 1)] = next_value
gae = 0
for step in reversed(range(rewards_sample.size(0))):
delta = ((rewards_sample[step] + ((self.gamma * values_sample[(step + 1)]) * masks_sample[(step + 1)])) - values_sample[step])
gae = (delta + (((self.gamma * self.tau) * masks_sample[(step + 1)]) * gae))
returns_sample[step] = (gae + values_sample[step])
else:
returns[(- 1)] = next_value
for step in reversed(range(self.rewards.size(0))):
returns_sample[step] = (((returns_sample[(step + 1)] * self.gamma) * masks_batch[(step + 1)]) + rewards_sample[step])
mini_batch_size = (self.num_steps // num_mini_batch)
observations_batch = {}
sampler = BatchSampler(SubsetRandomSampler(range(self.num_steps)), mini_batch_size, drop_last=False)
advantages = (returns_sample[:(- 1)] - values_sample[:(- 1)])
advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05))
for indices in sampler:
for (k, sensor_ob) in observations_sample.items():
observations_batch[k] = sensor_ob[:(- 1)].view((- 1), *sensor_ob.size()[2:])[indices]
states_batch = states_sample[:(- 1)].view((- 1), states_sample.size((- 1)))[indices]
actions_batch = actions_sample.view((- 1), actions_sample.size((- 1)))[indices]
return_batch = returns_sample[:(- 1)].view((- 1), 1)[indices]
masks_batch = masks_sample[:(- 1)].view((- 1), 1)[indices]
old_action_log_probs_batch = action_log_probs_sample.view((- 1), 1)[indices]
adv_targ = advantages.view((- 1), 1)[indices]
(yield (observations_batch, states_batch, actions_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ))
|
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"Build a Segment Tree data structure.\n\n https://en.wikipedia.org/wiki/Segment_tree\n\n Can be used as regular array, but with two\n important differences:\n\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient ( O(log segment size) )\n `reduce` operation which reduces `operation` over\n a contiguous subsequence of items in the array.\n\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must form a mathematical group together with the set of\n possible values for array elements (i.e. be associative)\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.\n "
assert ((capacity > 0) and ((capacity & (capacity - 1)) == 0)), 'capacity must be positive and a power of 2.'
self._capacity = capacity
self._value = [neutral_element for _ in range((2 * capacity))]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if ((start == node_start) and (end == node_end)):
return self._value[node]
mid = ((node_start + node_end) // 2)
if (end <= mid):
return self._reduce_helper(start, end, (2 * node), node_start, mid)
elif ((mid + 1) <= start):
return self._reduce_helper(start, end, ((2 * node) + 1), (mid + 1), node_end)
else:
return self._operation(self._reduce_helper(start, mid, (2 * node), node_start, mid), self._reduce_helper((mid + 1), end, ((2 * node) + 1), (mid + 1), node_end))
def reduce(self, start=0, end=None):
'Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n\n self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))\n\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array elements.\n '
if (end is None):
end = self._capacity
if (end < 0):
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, (self._capacity - 1))
def __setitem__(self, idx, val):
idx += self._capacity
self._value[idx] = val
idx //= 2
while (idx >= 1):
self._value[idx] = self._operation(self._value[(2 * idx)], self._value[((2 * idx) + 1)])
idx //= 2
def __getitem__(self, idx):
assert (0 <= idx < self._capacity)
return self._value[(self._capacity + idx)]
|
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(capacity=capacity, operation=operator.add, neutral_element=0.0)
def sum(self, start=0, end=None):
'Returns arr[start] + ... + arr[end]'
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
'Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint\n '
assert (0 <= prefixsum <= (self.sum() + 1e-05))
idx = 1
while (idx < self._capacity):
if (self._value[(2 * idx)] > prefixsum):
idx = (2 * idx)
else:
prefixsum -= self._value[(2 * idx)]
idx = ((2 * idx) + 1)
return (idx - self._capacity)
|
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(capacity=capacity, operation=min, neutral_element=float('inf'))
def min(self, start=0, end=None):
'Returns min(arr[start], ..., arr[end])'
return super(MinSegmentTree, self).reduce(start, end)
|
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if (x.dim() == 2):
bias = self._bias.t().view(1, (- 1))
else:
bias = self._bias.t().view(1, (- 1), 1, 1)
return (x + bias)
|
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
|
def init_normc_(weight, gain=1):
weight.normal_(0, 1)
weight *= (gain / torch.sqrt(weight.pow(2).sum(1, keepdim=True)))
|
def load_experiment_configs(log_dir, uuid=None):
' \n Loads all experiments in a given directory \n Optionally, may be restricted to those with a given uuid\n '
dirs = [f for f in os.listdir(log_dir) if os.path.isdir(os.path.join(log_dir, f))]
results = []
for d in dirs:
cfg_path = os.path.join(log_dir, d, 'config.json')
if (not os.path.exists(cfg_path)):
continue
with open(os.path.join(log_dir, d, 'config.json'), 'r') as f:
results.append(json.load(f))
if ((uuid is not None) and (results[(- 1)]['uuid'] != uuid)):
results.pop()
return results
|
def load_experiment_config_paths(log_dir, uuid=None):
dirs = [f for f in os.listdir(log_dir) if os.path.isdir(os.path.join(log_dir, f))]
results = []
for d in dirs:
cfg_path = os.path.join(log_dir, d, 'config.json')
if (not os.path.exists(cfg_path)):
continue
with open(cfg_path, 'r') as f:
cfg = json.load(f)
results.append(cfg_path)
if ((uuid is not None) and (cfg['uuid'] != uuid)):
results.pop()
return results
|
def checkpoint_name(checkpoint_dir, epoch='latest'):
return os.path.join(checkpoint_dir, 'ckpt-{}.dat'.format(epoch))
|
def last_archived_run(base_dir, uuid):
" Returns the name of the last archived run. Of the form:\n 'UUID_run_K'\n "
archive_dir = os.path.join(base_dir, 'archive')
existing_runs = glob.glob(os.path.join(archive_dir, (uuid + '_run_*')))
print(os.path.join(archive_dir, (uuid + '_run_*')))
if (len(existing_runs) == 0):
return None
run_numbers = [int(run.split('_')[(- 1)]) for run in existing_runs]
current_run_number = (max(run_numbers) if (len(existing_runs) > 0) else 0)
current_run_archive_dir = os.path.join(archive_dir, '{}_run_{}'.format(uuid, current_run_number))
return current_run_archive_dir
|
def archive_current_run(base_dir, uuid):
' Archives the current run. That is, it moves everything\n base_dir/*uuid* -> base_dir/archive/uuid_run_K/\n where K is determined automatically.\n '
matching_files = glob.glob(os.path.join(base_dir, (('*' + uuid) + '*')))
if (len(matching_files) == 0):
return
archive_dir = os.path.join(base_dir, 'archive')
os.makedirs(archive_dir, exist_ok=True)
existing_runs = glob.glob(os.path.join(archive_dir, (uuid + '_run_*')))
run_numbers = [int(run.split('_')[(- 1)]) for run in existing_runs]
current_run_number = ((max(run_numbers) + 1) if (len(existing_runs) > 0) else 0)
current_run_archive_dir = os.path.join(archive_dir, '{}_run_{}'.format(uuid, current_run_number))
os.makedirs(current_run_archive_dir)
for f in matching_files:
shutil.move(f, current_run_archive_dir)
return
|
def save_checkpoint(obj, directory, step_num, use_thread=False):
if use_thread:
warnings.warn('use_threads set to True, but done synchronously still')
os.makedirs(directory, exist_ok=True)
torch.save(obj, checkpoint_name(directory), pickle_module=pickle)
torch.save(obj, checkpoint_name(directory, step_num), pickle_module=pickle)
|
class VisdomMonitor(Monitor):
def __init__(self, env, directory, video_callable=None, force=False, resume=False, write_upon_reset=False, uid=None, mode=None, server='localhost', env='main', port=8097):
super(VisdomMonitor, self).__init__(env, directory, video_callable=video_callable, force=force, resume=resume, write_upon_reset=write_upon_reset, uid=uid, mode=mode)
def _close_video_recorder(self):
video_recorder
|
def checkpoint_name(checkpoint_dir, epoch='latest'):
return os.path.join(checkpoint_dir, 'ckpt-{}.dat'.format(epoch))
|
class FileStorageObserverWithExUuid(FileStorageObserver):
' Wraps the FileStorageObserver so that we can pass in the Id.\n This allows us to save experiments into subdirectories with \n meaningful names. The standard FileStorageObserver jsut increments \n a counter.'
UNUSED_VALUE = (- 1)
def started_event(self, ex_info, command, host_info, start_time, config, meta_info, _id):
_id = (config['uuid'] + '_metadata')
super().started_event(ex_info, command, host_info, start_time, config, meta_info, _id=_id)
def queued_event(self, ex_info, command, host_info, queue_time, config, meta_info, _id):
assert ('uuid' in config), "The config must contain a key 'uuid'"
_id = (config['uuid'] + '_metadata')
super().queued_event(ex_info, command, host_info, queue_time, config, meta_info, _id=_id)
|
class VideoLogger(object):
' Logs a video to a file, frame-by-frame \n \n All frames must be the same height.\n \n Example:\n >>> logger = VideoLogger("output.mp4")\n >>> for i in range(30):\n >>> logger.log(color_transitions_(i, n_frames, width, height) )\n >>> del logger #or, just let the logger go out of scope\n '
def __init__(self, save_path, fps=30):
fps = str(fps)
self.writer = skvideo.io.FFmpegWriter(save_path, inputdict={'-r': fps}, outputdict={'-vcodec': 'libx264', '-r': fps})
self.f_open = False
def log(self, frame):
' Adds a frame to the file\n Parameters:\n frame: A WxHxC numpy array (uint8). All frames must be the same height\n '
self.writer.writeFrame(frame)
def close(self):
try:
self.writer.close()
except AttributeError:
pass
def __del__(self):
self.close()
|
def color_transitions_(i, k, width, height):
x = np.linspace(0, 1.0, width)
y = np.linspace(0, 1.0, height)
bg = np.array(np.meshgrid(x, y))
bg = (((1.0 - (i / k)) * bg) + ((i / k) * (1 - bg)))
r = ((np.ones_like(bg[0][(np.newaxis, ...)]) * i) / k)
return np.uint8((np.rollaxis(np.concatenate([bg, r], axis=0), 0, 3) * 255))
|
class SensorPack(dict):
' Fun fact, you can slice using np.s_. E.g.\n sensors.at(np.s_[:2])\n '
def at(self, val):
return SensorPack({k: v[val] for (k, v) in self.items()})
def apply(self, lambda_fn):
return SensorPack({k: lambda_fn(k, v) for (k, v) in self.items()})
def size(self, idx, key=None):
assert (idx == 0), 'can only get batch size for SensorPack'
if (key is None):
key = list(self.keys())[0]
return self[key].size(idx)
|
def replay_logs(existing_log_paths, mlog):
existing_results_path = combined_paths(existing_log_paths, 'result_log.pkl')
save_training_logs(existing_results_path, mlog)
|
def move_metadata_file(old_log_dir, new_log_dir, uuid):
fp_metadata_old = get_subdir(old_log_dir, 'metadata')
fp_metadata_old = [fp for fp in fp_metadata_old if (uuid in fp)]
if (len(fp_metadata_old) == 0):
logger.info(f'No metadata for new experiment found at {old_log_dir} for {uuid}')
else:
fp_metadata_new = new_log_dir
logger.info(f'Moving logs from {fp_metadata_old[0]} to {fp_metadata_new}')
shutil.move(fp_metadata_old, fp_metadata_new)
|
def checkpoint_name(checkpoint_dir, epoch='latest'):
return os.path.join(checkpoint_dir, 'ckpt-{}.dat'.format(epoch))
|
def get_parent_dirname(path):
return os.path.basename(os.path.dirname(path))
|
def get_subdir(training_directory, subdir_name):
"\n look through all files/directories in training_directory\n return all files/subdirectories whose basename have subdir_name\n if 0, return none\n if 1, return it\n if more, return list of them\n\n e.g. training_directory: '/path/to/exp'\n subdir_name: 'checkpoints' (directory)\n subdir_name: 'rewards' (files)\n "
training_directory = training_directory.strip()
subdirectories = os.listdir(training_directory)
special_subdirs = []
for subdir in subdirectories:
if (subdir_name in subdir):
special_subdir = os.path.join(training_directory, subdir)
special_subdirs.append(special_subdir)
if (len(special_subdirs) == 0):
return None
elif (len(special_subdirs) == 1):
return special_subdirs[0]
return special_subdirs
|
def read_pkl(pkl_name):
with open(pkl_name, 'rb') as f:
data = pickle.load(f)
return data
|
def unused_dir_name(output_dir):
"\n Returns a unique (not taken) output_directory name with similar structure to existing one\n Specifically,\n if dir is not taken, return itself\n if dir is taken, return a new name where\n if dir = base + number, then newdir = base + {number+1}\n ow: newdir = base1\n e.g. if output_dir = '/eval/'\n if empty: return '/eval/'\n if '/eval/' exists: return '/eval1/'\n if '/eval/' and '/eval1/' exists, return '/eval2/'\n\n "
existing_output_paths = []
if os.path.exists(output_dir):
if (os.path.basename(output_dir) == ''):
output_dir = os.path.dirname(output_dir)
dirname = os.path.dirname(output_dir)
base_name_prefix = re.sub('\\d+$', '', os.path.basename(output_dir))
existing_output_paths = get_subdir(dirname, base_name_prefix)
assert (existing_output_paths is not None), f'Bug, cannot find output_dir {output_dir}'
if (not isinstance(existing_output_paths, list)):
existing_output_paths = [existing_output_paths]
numbers = [get_number(os.path.basename(path)[(- 5):]) for path in existing_output_paths]
eval_num = (max(max(numbers), 0) + 1)
output_dir = os.path.join(dirname, f'{base_name_prefix}{eval_num}', '')
print('New output dir', output_dir)
return (output_dir, existing_output_paths)
|
def combined_paths(paths, name):
'\n Runs get_subdir on every path in paths then flattens\n Finds all files/directories in all paths whose basename includes name\n Returns all these in a one-dimensional list\n '
ret_paths = []
for exp_path in paths:
evals = get_subdir(exp_path, name)
if (evals is None):
continue
if isinstance(evals, list):
ret_paths.extend(evals)
else:
ret_paths.append(evals)
return ret_paths
|
def read_logs(pkl_name):
return read_pkl(pkl_name)['results'][0]
|
def save_training_logs(results_paths, mlog):
"\n results_path is a list of experiment's result pkl file paths\n e.g. results_path = ['exp1/results_log.pkl', 'exp2/results_log.pkl']\n "
step_num_set = set()
for results_path in results_paths:
print(f'logging {results_path}')
try:
results = read_logs(results_path)
except Exception as e:
print(f'Could not read {results_path}. could be empty', e)
continue
for result in results:
i = result['step_num']
if (i in step_num_set):
continue
else:
step_num_set.add(i)
del result['step_num']
for (k, v) in result.items():
log(mlog, k, v, phase='train')
reset_log(mlog, None, i, phase='train')
|
def save_testing_logs(eval_paths, mlog):
"\n eval_paths is a list of eval runs path\n e.g. eval_paths = ['exp1/eval', 'exp1/eval1', 'exp2/eval']\n "
data_all_epochs = []
seen_epochs = set()
for eval_path in eval_paths:
subdirectories = os.listdir(eval_path)
for subdir in subdirectories:
if ('rewards' in subdir):
print(f'logging {eval_path}/{subdir}')
epoch_num = get_number(subdir)
if (epoch_num in seen_epochs):
continue
else:
seen_epochs.add(epoch_num)
rewards_pkl = os.path.join(eval_path, subdir)
try:
rewards_lst = read_logs(rewards_pkl)
rewards = [r['reward'] for r in rewards_lst]
lengths = [r['length'] for r in rewards_lst]
except Exception as e:
print(f'Could not read {rewards_pkl}', e)
continue
data_all_epochs.append((epoch_num, (rewards, lengths)))
data_all_epochs = sorted(data_all_epochs, key=(lambda x: x[0]))
for (epoch_num, (reward, length)) in data_all_epochs:
reward = np.array(reward)
avg_reward = np.mean(reward)
length = np.array(length)
avg_length = np.mean(length)
print(f'logging epoch {epoch_num} with r={avg_reward} of length {avg_length}')
log(mlog, 'rewards_all_epochs', avg_reward, phase='val')
log(mlog, 'rewards_histogram', reward, phase='val')
log(mlog, 'lengths_all_epochs', avg_length, phase='val')
log(mlog, 'lengths_histogram', length, phase='val')
reset_log(mlog, None, epoch_num, phase='val')
|
def save_train_testing(exp_paths, mlog):
train_result_paths = combined_paths(exp_paths, 'result_log.pkl')
save_training_logs(train_result_paths, mlog)
eval_paths = combined_paths(exp_paths, 'eval')
save_testing_logs(eval_paths, mlog)
|
class EpisodeTracker(object):
'\n Provides a method for tracking important metrics with a simultaneous batch of episodes\n '
def __init__(self, n_to_track):
self.episodes = [[] for _ in range(n_to_track)]
def append(self, obs, actions):
for (i, (o, a)) in enumerate(zip(obs['global_pos'], actions)):
self.episodes[i].append((np.array(o), a))
def clear_episode(self, k):
first_obs = self.episodes[k][(- 1)]
self.episodes[k] = [first_obs]
|
def softmax_cross_entropy(inputs, target, weight=None, cache={}, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean'):
cache['predictions'] = inputs
cache['labels'] = target
if (len(target.shape) == 2):
target = torch.argmax(target, dim=1)
loss = F.cross_entropy(inputs, target, weight)
return {'total': loss, 'xentropy': loss}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.