code stringlengths 17 6.64M |
|---|
class SegmentationDataSet1(data.Dataset):
'Most basic image segmentation dataset.'
def __init__(self, inputs: list, targets: list, transform=None):
self.inputs = inputs
self.targets = targets
self.transform = transform
self.inputs_dtype = torch.float32
self.targets_dtype = torch.long
def __len__(self):
return len(self.inputs)
def __getitem__(self, index: int):
input_ID = self.inputs[index]
target_ID = self.targets[index]
(x, y) = (imread(str(input_ID)), imread(str(target_ID)))
if (self.transform is not None):
(x, y) = self.transform(x, y)
(x, y) = (torch.from_numpy(x).type(self.inputs_dtype), torch.from_numpy(y).type(self.targets_dtype))
return (x, y)
|
class SegmentationDataSet2(data.Dataset):
'Image segmentation dataset with caching and pretransforms.'
def __init__(self, inputs: list, targets: list, transform=None, use_cache: bool=False, pre_transform=None):
self.inputs = inputs
self.targets = targets
self.transform = transform
self.inputs_dtype = torch.float32
self.targets_dtype = torch.long
self.use_cache = use_cache
self.pre_transform = pre_transform
if self.use_cache:
self.cached_data = []
progressbar = tqdm(range(len(self.inputs)), desc='Caching')
for (i, img_name, tar_name) in zip(progressbar, self.inputs, self.targets):
(img, tar) = (imread(str(img_name)), imread(str(tar_name)))
if (self.pre_transform is not None):
(img, tar) = self.pre_transform(img, tar)
self.cached_data.append((img, tar))
def __len__(self):
return len(self.inputs)
def __getitem__(self, index: int):
if self.use_cache:
(x, y) = self.cached_data[index]
else:
input_ID = self.inputs[index]
target_ID = self.targets[index]
(x, y) = (imread(str(input_ID)), imread(str(target_ID)))
if (self.transform is not None):
(x, y) = self.transform(x, y)
(x, y) = (torch.from_numpy(x).type(self.inputs_dtype), torch.from_numpy(y).type(self.targets_dtype))
return (x, y)
|
class SegmentationDataSet3(data.Dataset):
'Image segmentation dataset with caching, pretransforms and multiprocessing.'
def __init__(self, inputs: list, targets: list, transform=None, use_cache: bool=False, pre_transform=None):
self.inputs = inputs
self.targets = targets
self.transform = transform
self.inputs_dtype = torch.float32
self.targets_dtype = torch.long
self.use_cache = use_cache
self.pre_transform = pre_transform
if self.use_cache:
from itertools import repeat
from multiprocessing import Pool
with Pool() as pool:
self.cached_data = pool.starmap(self.read_images, zip(inputs, targets, repeat(self.pre_transform)))
def __len__(self):
return len(self.inputs)
def __getitem__(self, index: int):
if self.use_cache:
(x, y) = self.cached_data[index]
else:
input_ID = self.inputs[index]
target_ID = self.targets[index]
(x, y) = (imread(str(input_ID)), imread(str(target_ID)))
if (self.transform is not None):
(x, y) = self.transform(x, y)
(x, y) = (torch.from_numpy(x).type(self.inputs_dtype), torch.from_numpy(y).type(self.targets_dtype))
return (x, y)
@staticmethod
def read_images(inp, tar, pre_transform):
(inp, tar) = (imread(str(inp)), imread(str(tar)))
if pre_transform:
(inp, tar) = pre_transform(inp, tar)
return (inp, tar)
|
class SegmentationDataSet4(data.Dataset):
'Image segmentation dataset with caching, pretransforms and multiprocessing. Output is a dict.'
def __init__(self, inputs: list, targets: list, transform=None, use_cache: bool=False, pre_transform=None):
self.inputs = inputs
self.targets = targets
self.transform = transform
self.inputs_dtype = torch.float32
self.targets_dtype = torch.long
self.use_cache = use_cache
self.pre_transform = pre_transform
if self.use_cache:
from itertools import repeat
from multiprocessing import Pool
with Pool() as pool:
self.cached_data = pool.starmap(self.read_images, zip(inputs, targets, repeat(self.pre_transform)))
def __len__(self):
return len(self.inputs)
def __getitem__(self, index: int):
if self.use_cache:
(x, y) = self.cached_data[index]
else:
input_ID = self.inputs[index]
target_ID = self.targets[index]
(x, y) = (imread(str(input_ID)), imread(str(target_ID)))
if (self.transform is not None):
(x, y) = self.transform(x, y)
(x, y) = (torch.from_numpy(x).type(self.inputs_dtype), torch.from_numpy(y).type(self.targets_dtype))
return {'x': x, 'y': y, 'x_name': self.inputs[index].name, 'y_name': self.targets[index].name}
@staticmethod
def read_images(inp, tar, pre_transform):
(inp, tar) = (imread(str(inp)), imread(str(tar)))
if pre_transform:
(inp, tar) = pre_transform(inp, tar)
return (inp, tar)
|
class SegmentationDataSetRandom(data.Dataset):
'Random image segmentation dataset for testing purposes.'
def __init__(self, num_samples, size, num_classes: int=4, inputs_dtype=torch.float32, targets_dtype=torch.long):
self.num_samples = num_samples
self.size = size
self.num_classes = num_classes
self.inputs_dtype = inputs_dtype
self.targets_dtype = targets_dtype
self.cached_data = []
for num in range(self.num_samples):
inp = torch.from_numpy(np.random.uniform(low=0, high=1, size=size))
tar = torch.randint(low=0, high=num_classes, size=size[1:])
self.cached_data.append((inp, tar))
def __len__(self):
return self.num_samples
def __getitem__(self, index: int):
(x, y) = self.cached_data[index]
(x, y) = (x.type(self.inputs_dtype), y.type(self.targets_dtype))
return {'x': x, 'y': y, 'x_name': f'x_name_{index}', 'y_name': f'y_name_{index}'}
|
def predict(img: np.ndarray, model: torch.nn.Module, preprocess: Callable, postprocess: Callable, device: str) -> np.ndarray:
model.eval()
img = preprocess(img)
x = torch.from_numpy(img).to(device)
with torch.no_grad():
out = model(x)
out_softmax = torch.softmax(out, dim=1)
result = postprocess(out_softmax)
return result
|
class CombinedLoss(torch.nn.Module):
'Defines a loss function as a weighted sum of combinable loss criteria.\n Args:\n criteria: List of loss criterion modules that should be combined.\n weight: Weight assigned to the individual loss criteria (in the same\n order as ``criteria``).\n device: The device on which the loss should be computed. This needs\n to be set to the device that the loss arguments are allocated on.\n '
def __init__(self, criteria: Sequence[torch.nn.Module], weight: Optional[Sequence[float]]=None, device: Optional[torch.device]=None):
super().__init__()
self.criteria = torch.nn.ModuleList(criteria)
self.device = device
if (weight is None):
weight = torch.ones(len(criteria))
else:
weight = torch.as_tensor(weight, dtype=torch.float32)
assert (weight.shape == (len(criteria),))
self.register_buffer('weight', weight.to(self.device))
def forward(self, *args):
loss = torch.tensor(0.0, device=self.device)
for (crit, weight) in zip(self.criteria, self.weight):
loss += (weight * crit(*args))
return loss
|
def _channelwise_sum(x: torch.Tensor) -> torch.Tensor:
'Sum-reduce all dimensions of a tensor except dimension 1 (C)'
reduce_dims = tuple(([0] + list(range(x.dim()))[2:]))
return x.sum(dim=reduce_dims)
|
def dice_loss(probs: torch.Tensor, target: torch.Tensor, weight: float=1.0, eps: float=0.0001, smooth: float=0.0):
(tsh, psh) = (target.shape, probs.shape)
if (tsh == psh):
onehot_target = target.to(probs.dtype)
elif ((tsh[0] == psh[0]) and (tsh[1:] == psh[2:])):
onehot_target = torch.zeros_like(probs)
onehot_target.scatter_(1, target.unsqueeze(1), 1)
else:
raise ValueError(f'Target shape {target.shape} is not compatible with output shape {probs.shape}.')
intersection = (probs * onehot_target)
numerator = ((2 * _channelwise_sum(intersection)) + smooth)
denominator = (probs + onehot_target)
denominator = ((_channelwise_sum(denominator) + smooth) + eps)
loss_per_channel = (1 - (numerator / denominator))
weighted_loss_per_channel = (weight * loss_per_channel)
return weighted_loss_per_channel.mean()
|
class DiceLoss(torch.nn.Module):
def __init__(self, apply_softmax: bool=True, weight: Optional[torch.Tensor]=None, smooth: float=0.0):
super().__init__()
if apply_softmax:
self.softmax = torch.nn.Softmax(dim=1)
else:
self.softmax = (lambda x: x)
self.dice = dice_loss
if (weight is None):
weight = torch.tensor(1.0)
self.register_buffer('weight', weight)
self.smooth = smooth
def forward(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
probs = self.softmax(output)
return self.dice(probs=probs, target=target, weight=self.weight, smooth=self.smooth)
|
class LearningRateFinder():
'\n Train a model using different learning rates within a range to find the optimal learning rate.\n '
def __init__(self, model: nn.Module, criterion, optimizer, device):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.loss_history = {}
self._model_init = model.state_dict()
self._opt_init = optimizer.state_dict()
self.device = device
def fit(self, data_loader: DataLoader, steps: int=100, min_lr: float=1e-07, max_lr: float=1, constant_increment: bool=False):
'\n Trains the model for number of steps using varied learning rate and store the statistics\n '
self.loss_history = {}
self.model.train()
current_lr = min_lr
steps_counter = 0
epochs = math.ceil((steps / len(data_loader)))
progressbar = trange(epochs, desc='Progress')
for epoch in progressbar:
batch_iter = tqdm(enumerate(data_loader), 'Training', total=len(data_loader), leave=False)
for (i, (x, y)) in batch_iter:
(x, y) = (x.to(self.device), y.to(self.device))
for param_group in self.optimizer.param_groups:
param_group['lr'] = current_lr
self.optimizer.zero_grad()
out = self.model(x)
loss = self.criterion(out, y)
loss.backward()
self.optimizer.step()
self.loss_history[current_lr] = loss.item()
steps_counter += 1
if (steps_counter > steps):
break
if constant_increment:
current_lr += ((max_lr - min_lr) / steps)
else:
current_lr = (current_lr * ((max_lr / min_lr) ** (1 / steps)))
def plot(self, smoothing: bool=True, clipping: bool=True, smoothing_factor: float=0.1):
'\n Shows loss vs learning rate(log scale) in a matplotlib plot\n '
loss_data = pd.Series(list(self.loss_history.values()))
lr_list = list(self.loss_history.keys())
if smoothing:
loss_data = loss_data.ewm(alpha=smoothing_factor).mean()
loss_data = loss_data.divide(pd.Series([(1 - ((1.0 - smoothing_factor) ** i)) for i in range(1, (loss_data.shape[0] + 1))]))
if clipping:
loss_data = loss_data[10:(- 5)]
lr_list = lr_list[10:(- 5)]
plt.plot(lr_list, loss_data)
plt.xscale('log')
plt.title('Loss vs Learning rate')
plt.xlabel('Learning rate (log scale)')
plt.ylabel('Loss (exponential moving average)')
plt.show()
def reset(self):
'\n Resets the model and optimizer to its initial state\n '
self.model.load_state_dict(self._model_init)
self.optimizer.load_state_dict(self._opt_init)
print('Model and optimizer in initial state.')
|
def test_unet_2d():
batch_size = 1
in_channels = 1
out_channels = 2
height = 256
width = 256
unet = UNet(in_channels=in_channels, out_channels=out_channels, n_blocks=4, start_filters=32, activation=ActivationFunction.RELU, normalization=NormalizationLayer.BATCH, conv_mode=ConvMode.SAME, dim=Dimensions.TWO, up_mode=UpMode.TRANSPOSED)
inp = torch.rand(size=(batch_size, in_channels, height, width), dtype=torch.float32)
out = unet(inp)
assert (out.shape == (batch_size, out_channels, height, width))
|
def test_unet_3d():
batch_size = 1
in_channels = 1
out_channels = 2
height = 64
width = 64
depth = 64
unet = UNet(in_channels=in_channels, out_channels=out_channels, n_blocks=4, start_filters=32, activation=ActivationFunction.RELU, normalization=NormalizationLayer.BATCH, conv_mode=ConvMode.SAME, dim=Dimensions.THREE, up_mode=UpMode.TRANSPOSED)
inp = torch.rand(size=(batch_size, in_channels, depth, height, width), dtype=torch.float32)
out = unet(inp)
assert (out.shape == (batch_size, out_channels, depth, height, width))
|
def test_unet_valid():
'\n Same settings & input as in U-Net: Convolutional Networks for Biomedical Image Segmentation: https://arxiv.org/abs/1505.04597\n '
batch_size = 1
in_channels = 1
out_channels = 2
input_spatial_dim = 572
expected_spatial_dim = 388
unet = UNet(in_channels=in_channels, out_channels=out_channels, n_blocks=5, start_filters=32, activation=ActivationFunction.RELU, normalization=NormalizationLayer.BATCH, conv_mode=ConvMode.VALID, dim=Dimensions.TWO, up_mode=UpMode.TRANSPOSED)
inp = torch.rand(size=(batch_size, in_channels, input_spatial_dim, input_spatial_dim), dtype=torch.float32)
out = unet(inp)
assert (out.shape == (batch_size, out_channels, expected_spatial_dim, expected_spatial_dim))
|
@pytest.mark.parametrize(argnames='up_mode', argvalues=[UpMode.BILINEAR, UpMode.BICUBIC])
def test_unet_2d_up_modes(up_mode):
batch_size = 1
in_channels = 1
out_channels = 2
height = 256
width = 256
unet = UNet(in_channels=in_channels, out_channels=out_channels, n_blocks=4, start_filters=32, activation=ActivationFunction.RELU, normalization=NormalizationLayer.BATCH, conv_mode=ConvMode.SAME, dim=Dimensions.TWO, up_mode=up_mode)
inp = torch.rand(size=(batch_size, in_channels, height, width), dtype=torch.float32)
out = unet(inp)
assert (out.shape == (batch_size, out_channels, height, width))
|
@pytest.mark.parametrize(argnames='up_mode', argvalues=[UpMode.TRILINEAR])
def test_unet_3d_up_modes(up_mode):
batch_size = 1
in_channels = 1
out_channels = 2
height = 64
width = 64
depth = 64
unet = UNet(in_channels=in_channels, out_channels=out_channels, n_blocks=4, start_filters=32, activation=ActivationFunction.RELU, normalization=NormalizationLayer.BATCH, conv_mode=ConvMode.SAME, dim=Dimensions.THREE, up_mode=up_mode)
inp = torch.rand(size=(batch_size, in_channels, depth, height, width), dtype=torch.float32)
out = unet(inp)
assert (out.shape == (batch_size, out_channels, depth, height, width))
|
class Trainer():
def __init__(self, model: torch.nn.Module, device: torch.device, criterion: torch.nn.Module, optimizer: torch.optim.Optimizer, training_dataloader: Dataset, validation_dataloader: Optional[Dataset]=None, lr_scheduler: Optional[torch.optim.lr_scheduler]=None, epochs: int=100, epoch: int=0, notebook: bool=False):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.training_dataloader = training_dataloader
self.validation_dataloader = validation_dataloader
self.device = device
self.epochs = epochs
self.epoch = epoch
self.notebook = notebook
self.training_loss = []
self.validation_loss = []
self.learning_rate = []
def run_trainer(self):
if self.notebook:
from tqdm.notebook import tqdm, trange
else:
from tqdm import tqdm, trange
progressbar = trange(self.epochs, desc='Progress')
for i in progressbar:
'Epoch counter'
self.epoch += 1
'Training block'
self._train()
'Validation block'
if (self.validation_dataloader is not None):
self._validate()
'Learning rate scheduler block'
if (self.lr_scheduler is not None):
if ((self.validation_dataloader is not None) and (self.lr_scheduler.__class__.__name__ == 'ReduceLROnPlateau')):
self.lr_scheduler.batch(self.validation_loss[i])
else:
self.lr_scheduler.batch()
return (self.training_loss, self.validation_loss, self.learning_rate)
def _train(self):
if self.notebook:
from tqdm.notebook import tqdm, trange
else:
from tqdm import tqdm, trange
self.model.train()
train_losses = []
batch_iter = tqdm(enumerate(self.training_dataloader), 'Training', total=len(self.training_dataloader), leave=False)
for (i, (x, y)) in batch_iter:
(input_x, target_y) = (x.to(self.device), y.to(self.device))
self.optimizer.zero_grad()
out = self.model(input_x)
loss = self.criterion(out, target_y)
loss_value = loss.item()
train_losses.append(loss_value)
loss.backward()
self.optimizer.step()
batch_iter.set_description(f'Training: (loss {loss_value:.4f})')
self.training_loss.append(np.mean(train_losses))
self.learning_rate.append(self.optimizer.param_groups[0]['lr'])
batch_iter.close()
def _validate(self):
if self.notebook:
from tqdm.notebook import tqdm, trange
else:
from tqdm import tqdm, trange
self.model.eval()
valid_losses = []
batch_iter = tqdm(enumerate(self.validation_dataloader), 'Validation', total=len(self.validation_dataloader), leave=False)
for (i, (x, y)) in batch_iter:
(input, target) = (x.to(self.device), y.to(self.device))
with torch.no_grad():
out = self.model(input)
loss = self.criterion(out, target)
loss_value = loss.item()
valid_losses.append(loss_value)
batch_iter.set_description(f'Validation: (loss {loss_value:.4f})')
self.validation_loss.append(np.mean(valid_losses))
batch_iter.close()
|
def normalize_01(inp: np.ndarray):
'Squash image input to the value range [0, 1] (no clipping)'
inp_out = ((inp - np.min(inp)) / np.ptp(inp))
return inp_out
|
def normalize(inp: np.ndarray, mean: float, std: float):
'Normalize based on mean and standard deviation.'
inp_out = ((inp - mean) / std)
return inp_out
|
def create_dense_target(tar: np.ndarray):
classes = np.unique(tar)
dummy = np.zeros_like(tar)
for (idx, value) in enumerate(classes):
mask = np.where((tar == value))
dummy[mask] = idx
return dummy
|
def center_crop_to_size(x: np.ndarray, size: Tuple, copy: bool=False) -> np.ndarray:
'\n Center crops a given array x to the size passed in the function.\n Expects even spatial dimensions!\n '
x_shape = np.array(x.shape)
size = np.array(size)
params_list = ((x_shape - size) / 2).astype(np.int).tolist()
params_tuple = tuple([(i, i) for i in params_list])
cropped_image = crop(x, crop_width=params_tuple, copy=copy)
return cropped_image
|
def re_normalize(inp: np.ndarray, low: int=0, high: int=255):
'Normalize the data to a certain range. Default: [0-255]'
inp_out = img_as_ubyte(inp)
return inp_out
|
def random_flip(inp: np.ndarray, tar: np.ndarray, ndim_spatial: int):
flip_dims = [np.random.randint(low=0, high=2) for _ in range(ndim_spatial)]
flip_dims_inp = tuple([(i + 1) for (i, element) in enumerate(flip_dims) if (element == 1)])
flip_dims_tar = tuple([i for (i, element) in enumerate(flip_dims) if (element == 1)])
inp_flipped = np.flip(inp, axis=flip_dims_inp)
tar_flipped = np.flip(tar, axis=flip_dims_tar)
return (inp_flipped, tar_flipped)
|
class Repr():
'Evaluable string representation of an object'
def __repr__(self):
return f'{self.__class__.__name__}: {self.__dict__}'
|
class FunctionWrapperSingle(Repr):
'A function wrapper that returns a partial for input only.'
def __init__(self, function: Callable, *args, **kwargs):
self.function = partial(function, *args, **kwargs)
def __call__(self, inp: np.ndarray):
return self.function(inp)
|
class FunctionWrapperDouble(Repr):
'A function wrapper that returns a partial for an input-target pair.'
def __init__(self, function: Callable, input: bool=True, target: bool=False, *args, **kwargs):
self.function = partial(function, *args, **kwargs)
self.input = input
self.target = target
def __call__(self, inp: np.ndarray, tar: dict):
if self.input:
inp = self.function(inp)
if self.target:
tar = self.function(tar)
return (inp, tar)
|
class Compose():
'Baseclass - composes several transforms together.'
def __init__(self, transforms: List[Callable]):
self.transforms = transforms
def __repr__(self):
return str([transform for transform in self.transforms])
|
class ComposeDouble(Compose):
'Composes transforms for input-target pairs.'
def __call__(self, inp: np.ndarray, target: dict):
for t in self.transforms:
(inp, target) = t(inp, target)
return (inp, target)
|
class ComposeSingle(Compose):
'Composes transforms for input only.'
def __call__(self, inp: np.ndarray):
for t in self.transforms:
inp = t(inp)
return inp
|
class AlbuSeg2d(Repr):
"\n Wrapper for albumentations' segmentation-compatible 2D augmentations.\n Wraps an augmentation, so it can be used within the provided transform pipeline.\n See https://github.com/albu/albumentations for more information.\n Expected input: (C, spatial_dims)\n Expected target: (spatial_dims) -> No (C)hannel dimension\n "
def __init__(self, albumentation: Callable):
self.albumentation = albumentation
def __call__(self, inp: np.ndarray, tar: np.ndarray):
out_dict = self.albumentation(image=inp, mask=tar)
input_out = out_dict['image']
target_out = out_dict['mask']
return (input_out, target_out)
|
class AlbuSeg3d(Repr):
"\n Wrapper for albumentations' segmentation-compatible 2D augmentations.\n Wraps an augmentation, so it can be used within the provided transform pipeline.\n See https://github.com/albu/albumentations for more information.\n Expected input: (spatial_dims) -> No (C)hannel dimension\n Expected target: (spatial_dims) -> No (C)hannel dimension\n Iterates over the slices of an input-target pair stack and performs the same albumentation function.\n "
def __init__(self, albumentation: Callable):
self.albumentation = A.ReplayCompose([albumentation])
def __call__(self, inp: np.ndarray, tar: np.ndarray):
tar = tar.astype(np.uint8)
input_copy = np.copy(inp)
target_copy = np.copy(tar)
replay_dict = self.albumentation(image=inp[0])['replay']
for (index, (input_slice, target_slice)) in enumerate(zip(inp, tar)):
result = A.ReplayCompose.replay(replay_dict, image=input_slice, mask=target_slice)
input_copy[index] = result['image']
target_copy[index] = result['mask']
return (input_copy, target_copy)
|
class RandomFlip(Repr):
'\n Randomly flips spatial input and target dimensions respectively. Spatial\n dimensions are considered to occur last in the input/target shape and are\n flipped with probability p=0.5 (iid).\n Works for 2D and 3D image-target pairs.\n Expected input: (C, spatial_dims)\n Expected target: (spatial_dims) -> No (C)hannel dimension\n Args:\n ndim_spatial: Number of spatial dimension in input, e.g.\n ndim_spatial=2 for input shape (C, H, W)\n ndim_spatial=3 for input shape (C, D, H, W)\n '
def __init__(self, ndim_spatial):
self.ndim_spatial = ndim_spatial
def __call__(self, inp, target):
flip_dims = [np.random.randint(low=0, high=2) for _ in range(self.ndim_spatial)]
flip_dims_inp = tuple([(i + 1) for (i, element) in enumerate(flip_dims) if (element == 1)])
flip_dims_target = tuple([i for (i, element) in enumerate(flip_dims) if (element == 1)])
inp_flip = np.flip(inp, axis=flip_dims_inp)
target_flip = np.flip(target, axis=flip_dims_target)
return (np.copy(inp_flip), np.copy(target_flip))
|
class Segmentation_UNET(pl.LightningModule):
def __init__(self, model, lr, num_classes, weight_ce, weight_dice, metrics=True):
super().__init__()
self.model = model
self.lr = lr
self.num_classes = num_classes
self.register_buffer('weight_ce', weight_ce)
self.register_buffer('weight_dice', weight_dice)
self.dice_loss = DiceLoss(weight=self.weight_dice)
self.ce_loss = CrossEntropyLoss(weight=self.weight_ce)
self.save_hyperparameters()
self.metrics = metrics
if self.metrics:
self.f1_train = CustomMetric(metric=torchmetrics.functional.f1, metric_name='F1', num_classes=self.num_classes, average='none', mdmc_average='samplewise')
self.f1_valid = CustomMetric(metric=torchmetrics.functional.f1, metric_name='F1', num_classes=self.num_classes, average='none', mdmc_average='samplewise')
self.f1_test = CustomMetric(metric=torchmetrics.functional.f1, metric_name='F1', num_classes=self.num_classes, average='none', mdmc_average='samplewise')
self.iou_train = CustomMetric(metric=torchmetrics.functional.iou, metric_name='IoU', num_classes=self.num_classes, reduction='none')
self.iou_valid = CustomMetric(metric=torchmetrics.functional.iou, metric_name='IoU', num_classes=self.num_classes, reduction='none')
self.iou_test = CustomMetric(metric=torchmetrics.functional.iou, metric_name='IoU', num_classes=self.num_classes, reduction='none')
def shared_step(self, batch):
(x, y, x_name, y_name) = (batch['x'], batch['y'], batch['x_name'], batch['y_name'])
out = self.model(x)
out_soft = torch.nn.functional.softmax(out, dim=1)
ce_loss = self.ce_loss(out, y)
dice_loss = self.dice_loss(out, y)
loss = ((ce_loss + dice_loss) / 2)
return {**batch, 'pred': out_soft, 'loss': loss}
def training_step(self, batch, batch_idx):
shared_step = self.shared_step(batch)
if self.metrics:
self.compute_and_log_metrics_batch(pred=shared_step['pred'], tar=shared_step['y'], name_phase='Train', metrics_module=self.f1_train)
self.compute_and_log_metrics_batch(pred=torchmetrics.utilities.data.to_categorical(shared_step['pred']), tar=shared_step['y'], name_phase='Train', metrics_module=self.iou_train)
return shared_step['loss']
def training_epoch_end(self, outputs):
if self.metrics:
self.compute_and_log_metrics_epoch(name_phase='Train', metrics_module=self.f1_train)
self.compute_and_log_metrics_epoch(name_phase='Train', metrics_module=self.iou_train)
def validation_step(self, batch, batch_idx):
shared_step = self.shared_step(batch)
if self.metrics:
self.compute_and_log_metrics_batch(pred=shared_step['pred'], tar=shared_step['y'], name_phase='Valid', metrics_module=self.f1_valid)
self.compute_and_log_metrics_batch(pred=pl.metrics.utils.to_categorical(shared_step['pred']), tar=shared_step['y'], name_phase='Valid', metrics_module=self.iou_valid)
self.log('checkpoint_valid_f1_epoch', self.f1_valid.get_metrics_batch(mean=True))
def validation_epoch_end(self, outputs):
if self.metrics:
self.compute_and_log_metrics_epoch(name_phase='Valid', metrics_module=self.f1_valid)
self.compute_and_log_metrics_epoch(name_phase='Valid', metrics_module=self.iou_valid)
def test_step(self, batch, batch_idx):
shared_step = self.shared_step(batch)
if self.metrics:
self.compute_and_log_metrics_batch(pred=shared_step['pred'], tar=shared_step['y'], name_phase='Test', metrics_module=self.f1_test, name=shared_step['x_name'])
self.compute_and_log_metrics_batch(pred=pl.metrics.utils.to_categorical(shared_step['pred']), tar=shared_step['y'], name_phase='Test', metrics_module=self.iou_test, name=shared_step['x_name'])
if (shared_step['y'].shape[0] == 1):
self.log_names_batch(name_phase='Test', name=shared_step['x_name'][0])
def test_epoch_end(self, outputs):
if self.metrics:
self.compute_and_log_metrics_epoch(name_phase='Valid', metrics_module=self.f1_test)
self.compute_and_log_metrics_epoch(name_phase='Valid', metrics_module=self.iou_test)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.75, patience=10, min_lr=0)
return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler, 'monitor': 'checkpoint_valid_f1_epoch'}
def compute_and_log_metrics_batch(self, pred, tar, name_phase, metrics_module, name=None):
metrics_module.batch(pred, tar, name=name)
self.logger.experiment.log_metric(f'{name_phase}/{metrics_module}/Batch', metrics_module.get_metrics_batch(mean=True))
for (class_idx, metric) in zip(metrics_module.valid_class, metrics_module.get_metrics_batch(mean=False)):
self.logger.experiment.log_metric(f'{name_phase}/{metrics_module}/Batch/Class/{class_idx}', metric)
def compute_and_log_metrics_epoch(self, name_phase, metrics_module):
for (class_idx, value) in enumerate(metrics_module.get_metrics_epoch()):
self.logger.experiment.log_metric(f'{name_phase}/{metrics_module}/Epoch/Class/{class_idx}', value)
self.logger.experiment.log_metric(f'{name_phase}/{metrics_module}/Epoch', metrics_module.epoch())
def log_names_batch(self, name_phase, name):
self.logger.experiment.log_text(f'{name_phase}/Batch/Names', name)
|
class CustomMetric():
def __init__(self, metric, metric_name, **kwargs):
self.metric = metric
self.metric_name = metric_name
self.kwargs = kwargs
self.scores = []
self.valid_classes = []
self.valid_matrices = []
self.names = []
self.score = None
self.valid_class = None
self.valid_matrix = None
self.name = None
self.last_scores = None
self.last_valid_classes = None
self.last_valid_matrices = None
self.last_names = None
def batch(self, prediction, target, name=None):
self.score = self.metric(prediction, target, **self.kwargs).to('cpu')
self.valid_class = target.unique().to('cpu')
dummy = torch.zeros_like(self.score).to('cpu')
dummy[self.valid_class] = 1
self.valid_matrix = dummy.type(torch.bool).to('cpu')
self.scores.append(self.score)
self.valid_classes.append(self.valid_class)
self.valid_matrices.append(self.valid_matrix)
if name:
self.name = name
self.names.append(self.name)
def get_metrics_batch(self, mean=True):
if mean:
return self.score[self.valid_class].mean()
else:
return self.score[self.valid_class]
def get_metrics_epoch(self, last=False, transpose=True):
if last:
if transpose:
scores = torch.stack(self.last_scores).T
masks = torch.stack(self.last_valid_matrices).T
else:
scores = torch.stack(self.last_scores)
masks = torch.stack(self.last_valid_matrices)
elif transpose:
scores = torch.stack(self.scores).T
masks = torch.stack(self.valid_matrices).T
else:
scores = torch.stack(self.scores)
masks = torch.stack(self.valid_matrices)
filtered = [s[m] for (s, m) in zip(scores, masks)]
return torch.stack([c.mean() for c in filtered])
def epoch(self):
self.last_scores = self.scores
self.last_valid_classes = self.valid_classes
self.last_valid_matrices = self.valid_matrices
self.last_names = self.names
result = self.get_metrics_epoch()
self.reset()
return result.mean()
def reset(self):
self.scores = []
self.valid_classes = []
self.valid_matrices = []
self.names = []
def __repr__(self):
return self.metric_name
|
def enable_gui_qt():
'Performs the magic command %gui qt'
from IPython import get_ipython
ipython = get_ipython()
ipython.magic('gui qt')
|
class DatasetViewer():
def __init__(self, dataset):
self.dataset = dataset
self.index = 0
self.viewer = None
self.image_layer = None
self.label_layer = None
def napari(self):
enable_gui_qt()
if self.viewer:
try:
del self.viewer
except AttributeError:
pass
self.index = 0
self.viewer = napari.Viewer()
self.show_sample()
@self.viewer.bind_key('n')
def next(viewer):
self.increase_index()
self.show_sample()
@self.viewer.bind_key('b')
def prev(viewer):
self.decrease_index()
self.show_sample()
def increase_index(self):
self.index += 1
if (self.index >= len(self.dataset)):
self.index = 0
def decrease_index(self):
self.index -= 1
if (self.index < 0):
self.index = (len(self.dataset) - 1)
def show_sample(self):
sample = self.get_sample_dataset(self.index)
(x, y) = sample
names = self.get_names_dataset(self.index)
(x_name, y_name) = names
(x_name, y_name) = (x_name.name, y_name.name)
x = self.transform_x(x)
y = self.transform_y(y)
if (self.image_layer not in self.viewer.layers):
self.image_layer = self.create_image_layer(x, x_name)
else:
self.update_image_layer(self.image_layer, x, x_name)
if (self.label_layer not in self.viewer.layers):
self.label_layer = self.create_label_layer(y, y_name)
else:
self.update_label_layer(self.label_layer, y, y_name)
self.viewer.reset_view()
def create_image_layer(self, x, x_name):
return self.viewer.add_image(x, name=str(x_name))
def update_image_layer(self, image_layer, x, x_name):
'Replace the data and the name of a given image_layer'
image_layer.data = x
image_layer.name = str(x_name)
def create_label_layer(self, y, y_name):
return self.viewer.add_labels(y, name=str(y_name))
def update_label_layer(self, target_layer, y, y_name):
'Replace the data and the name of a given image_layer'
target_layer.data = y
target_layer.name = str(y_name)
def get_sample_dataset(self, index):
return self.dataset[index]
def get_names_dataset(self, index):
return (self.dataset.inputs[index], self.dataset.targets[index])
def transform_x(self, x):
x = x.cpu().numpy()
if self.check_if_rgb(x):
x = np.moveaxis(x, source=0, destination=(- 1))
x = re_normalize(x)
return x
def transform_y(self, y):
y = y.cpu().numpy()
return y
def check_if_rgb(self, x):
return (True if (x.shape[0] == 3) else False)
|
def plot_training(training_losses, validation_losses, learning_rate, gaussian=True, sigma=2, figsize=(8, 6)):
'\n Returns a loss plot with training loss, validation loss and learning rate.\n '
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.ndimage import gaussian_filter
list_len = len(training_losses)
x_range = list(range(1, (list_len + 1)))
fig = plt.figure(figsize=figsize)
grid = gridspec.GridSpec(ncols=2, nrows=1, figure=fig)
subfig1 = fig.add_subplot(grid[(0, 0)])
subfig2 = fig.add_subplot(grid[(0, 1)])
subfigures = fig.get_axes()
for (i, subfig) in enumerate(subfigures, start=1):
subfig.spines['top'].set_visible(False)
subfig.spines['right'].set_visible(False)
if gaussian:
training_losses_gauss = gaussian_filter(training_losses, sigma=sigma)
validation_losses_gauss = gaussian_filter(validation_losses, sigma=sigma)
linestyle_original = '.'
color_original_train = 'lightcoral'
color_original_valid = 'lightgreen'
color_smooth_train = 'red'
color_smooth_valid = 'green'
alpha = 0.25
else:
linestyle_original = '-'
color_original_train = 'red'
color_original_valid = 'green'
alpha = 1.0
subfig1.plot(x_range, training_losses, linestyle_original, color=color_original_train, label='Training', alpha=alpha)
subfig1.plot(x_range, validation_losses, linestyle_original, color=color_original_valid, label='Validation', alpha=alpha)
if gaussian:
subfig1.plot(x_range, training_losses_gauss, '-', color=color_smooth_train, label='Training', alpha=0.75)
subfig1.plot(x_range, validation_losses_gauss, '-', color=color_smooth_valid, label='Validation', alpha=0.75)
subfig1.title.set_text('Training & validation loss')
subfig1.set_xlabel('Epoch')
subfig1.set_ylabel('Loss')
subfig1.legend(loc='upper right')
subfig2.plot(x_range, learning_rate, color='black')
subfig2.title.set_text('Learning rate')
subfig2.set_xlabel('Epoch')
subfig2.set_ylabel('LR')
return fig
|
def connect(PORT):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', PORT))
sock.listen(1)
(conn, addr) = sock.accept()
return conn
|
def send(conn, data):
coded_data = data.tostring()
conn.sendall(coded_data)
|
def recv(conn):
data = conn.recv(1024)
data_cast = array.array('f', data)
return data_cast
|
def disconnect(conn):
conn.close()
|
def play_game(model_name, env_name, game_queue, reward_queue, index):
'Plays one game with the given model and gym environment\n and returns the final score (i.e. cumulative reward)'
print('Starting process #{}..'.format(index))
if (not args.random):
model = torch.load(model_name, map_location=device)
model.eval()
env = gym.make(env_name, full_action_space=True)
rng = np.random.default_rng()
while (not game_queue.empty()):
try:
game = game_queue.get(False, None)
except Empty:
print('Game queue empty')
return
no_ops = randint(0, args.no_op)
no_ops_done = 0
o = env.reset()
(r, d, i) = (0.0, False, None)
total_reward = 0
total_frames = 0
stack = []
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.uint8))
while True:
if args.display:
env.render()
img = Image.fromarray(o)
img = img.resize((args.width, args.height), Image.BILINEAR)
img = np.asarray(img)
stack.insert(0, img)
while (len(stack) > args.framestack):
stack.pop()
if (len(stack) != args.framestack):
continue
if args.merge:
image_stack = map(Image.fromarray, stack)
img = reduce(ImageChops.lighter, image_stack)
np_stack = np.asarray(img, dtype=np.float32)
np_stack = np.expand_dims(np_stack, axis=0)
else:
np_stack = np.concatenate(stack, axis=2)
np_stack = np.expand_dims(np_stack, axis=0)
np_stack = np_stack.astype(np.float32)
np_stack /= 255
if (no_ops_done < no_ops):
(o, r, d, i) = env.step(0)
no_ops_done += 1
elif (not args.random):
prediction = model(torch.Tensor(np.swapaxes(np_stack, 1, 3)).to(device)).detach().cpu()
prediction = F.softmax(prediction, dim=1)
if (args.action == 'argmax'):
prediction = np.argmax(prediction)
elif (args.action == 'sampling'):
prediction = np.array(prediction[0])
p = (prediction / np.sum(prediction))
prediction = rng.choice(list(range(len(prediction))), p=p)
(o, r, d, i) = env.step(prediction)
elif args.random:
(o, r, d, i) = env.step(np.random.randint(18))
total_reward += r
total_frames += 1
if (d or (total_frames > args.max_frames)):
reward_queue.put(total_reward)
break
print('#{} finished game {}'.format(index, game))
|
def main():
set_start_method('spawn')
for model in args.models:
model_name = os.path.basename(os.path.normpath(model))
results_path = os.path.normpath(args.save)
if (not os.path.exists(results_path)):
os.mkdir(results_path)
results_name = '{}.txt'.format(model_name)
results_file = os.path.normpath(os.path.join(results_path, results_name))
print('Evaluating model {}'.format(model))
rewards = multiprocessing.Manager().Queue(1000000)
games = multiprocessing.Manager().Queue(1000000)
for i in range(args.games):
games.put(i)
procs = []
for i in range(args.processes):
proc = Process(target=play_game, args=(model, args.env, games, rewards, i))
proc.start()
procs.append(proc)
print('Processes started')
for (k, proc) in enumerate(procs):
print('Waiting to join process #{}'.format(k))
proc.join()
print('Joined process #{}'.format(k))
print('Processes joined')
with open(results_file, 'w') as f:
rewards_list = []
while (not rewards.empty()):
r = rewards.get()
rewards_list.append(r)
f.write('{}\n'.format(r))
print(r)
if (len(rewards_list) <= 1):
avg = 0
std = 0
minim = 0
maxim = 0
else:
avg = round(statistics.mean(rewards_list), 1)
std = round(statistics.stdev(rewards_list), 1)
minim = min(rewards_list)
maxim = max(rewards_list)
f.write('Avg: {}'.format(avg))
print('Avg: {}, std: {}, min: {}, max: {}'.format(avg, std, minim, maxim))
|
def main():
model = None
if (not args.random):
model = torch.load(args.model, map_location=device)
model.eval()
c = Connection(start_binary=(not args.dont_start_binary), binary_path=args.binary)
buttons = [buttons[0] for buttons in KEY_MAPPING[args.game]]
num_buttons = len(buttons)
is_playing = False
target_time_per_frame = (1.0 / args.framerate)
frame_time = None
stack = []
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.float32))
print('Ready to play (Page Up + p)...')
recording_id = None
image_directory = None
recorded_data = []
recording_index = 0
while True:
frame_time = time.time()
c.req.allow_user_override = True
c.req.get_keys = True
c.req.get_image = True
c.req.quality = args.quality
c.req.process_name = args.process
response = c.send_request()
if ('page up' in response.pressed_keys):
if (('p' in response.pressed_keys) and (not is_playing)):
is_playing = True
print('Starting to play (stop with Page Up + s)')
print(('Currently playing: ' + str(is_playing)))
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.uint8))
if (args.output is not None):
recording_index = 0
(recording_id, image_directory) = start_recording(args.output, args.game)
recorded_data = []
elif (('s' in response.pressed_keys) and is_playing):
print('Stopped playing. Start with Page Up + p')
is_playing = False
if (args.output is not None):
finish_recording(args.output, args.game, recording_id, recorded_data)
if is_playing:
img = Image.open(io.BytesIO(response.image))
img = img.resize((args.width, args.height), Image.BILINEAR)
img = np.asarray(img, dtype=np.float32)
stack.insert(0, img)
while (len(stack) > args.framestack):
stack.pop()
if (len(stack) != args.framestack):
continue
np_stack = np.concatenate(stack, axis=2)
np_stack = np.expand_dims(np_stack, axis=0)
np_stack = np_stack.astype(np.float32)
np_stack /= 255
prediction = None
if (not args.random):
prediction = model(torch.Tensor(np.swapaxes(np_stack, 1, 3)).to(device)).detach().cpu()[0]
prediction = torch.sigmoid(prediction).numpy()
prediction = (np.random.random(size=prediction.shape) < prediction).astype(np.int)
prediction = prediction.tolist()
else:
prediction = np.random.randint(2, size=num_buttons).tolist()
for i in range(len(buttons)):
if (prediction[i] == 1):
c.req.press_keys.append(buttons[i])
else:
c.req.release_keys.append(buttons[i])
c.req.get_image = False
c.req.get_keys = False
_ = c.send_request()
if (args.output is not None):
image = response.image
with open(os.path.join(image_directory, '{}.jpg'.format(recording_index)), 'wb') as f:
f.write(image)
recorded_data.append({'b': [buttons[i] for i in range(len(buttons)) if prediction[i]]})
recording_index += 1
sleep_time = ((target_time_per_frame - time.time()) + frame_time)
if (sleep_time <= 0.0):
print('[Warning] Can not keep up with the desired framerate.')
sleep_time = 0.0
else:
time.sleep(sleep_time)
|
def play_game(model_name, queue, index):
'Plays one game with the given model and gym environment\n and returns the final score (i.e. cumulative reward)'
print('Starting process #{}..'.format(index))
if (not args.random):
model = torch.load(model_name, map_location=device)
model.eval()
env = vzd.DoomGame()
env.load_config(args.config)
if args.display:
env.set_window_visible(True)
env.set_mode(vzd.Mode.ASYNC_PLAYER)
else:
env.set_mode(vzd.Mode.PLAYER)
env.init()
rng = np.random.default_rng()
for game in range(args.games):
env.new_episode()
o = env.get_state()
(r, d, i) = (0.0, False, None)
total_reward = 0
stack = []
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.uint8))
while True:
img = o.screen_buffer
img = img.transpose([1, 2, 0])
img = Image.fromarray(img)
img = img.resize((args.width, args.height), Image.BILINEAR)
img = np.asarray(img, dtype=np.float32)
stack.insert(0, img)
while (len(stack) > args.framestack):
stack.pop()
if (len(stack) != args.framestack):
continue
np_stack = np.concatenate(stack, axis=2)
np_stack = np.expand_dims(np_stack, axis=0)
np_stack = np_stack.astype(np.float32)
np_stack /= 255
if args.random:
actions_num = env.get_available_buttons_size()
prediction = np.random.randint(2, size=actions_num).tolist()
else:
prediction = model(torch.Tensor(np.swapaxes(np_stack, 1, 3)).to(device)).detach().cpu()[0]
prediction = torch.sigmoid(prediction).numpy()
prediction = (np.random.random(size=prediction.shape) < prediction).astype(np.int)
prediction = prediction.tolist()
r = env.make_action(prediction, args.rate)
d = env.is_episode_finished()
total_reward += r
if d:
queue.put(total_reward)
break
else:
o = env.get_state()
print('#{} finished game {}'.format(index, game))
|
def main():
set_start_method('spawn')
for model in args.models:
model_name = os.path.basename(os.path.normpath(model))
results_path = os.path.normpath(args.save)
if (not os.path.exists(results_path)):
os.mkdir(results_path)
results_name = '{}.txt'.format(model_name)
results_file = os.path.normpath(os.path.join(results_path, results_name))
print('Evaluating model {}'.format(model))
rewards = Queue()
procs = []
for i in range(args.processes):
proc = Process(target=play_game, args=(model, rewards, i))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
with open(results_file, 'w') as f:
rewards_list = []
while (not rewards.empty()):
r = rewards.get()
rewards_list.append(r)
f.write('{}\n'.format(r))
print(r)
if (len(rewards_list) < 1):
avg = 0
else:
avg = (sum(rewards_list) / len(rewards_list))
f.write('Avg: {}'.format(avg))
print('Avg: {}'.format(avg))
|
def get_avg_from_file(file_path):
with open(file_path) as f:
avg_line = f.readlines()[(- 1)]
match = re.match('Avg: (.*)', avg_line)
return float(match.group(1))
|
def get_stdev_from_file(file_path):
values = get_datapoints_from_file(file_path)
return statistics.stdev(values)
|
def get_datapoints_from_file(file_path):
with open(file_path) as f:
lines = f.readlines()
values = []
for line in lines:
try:
values.append(float(line))
except ValueError:
pass
return values
|
def finish_recording(recording_path, env_name, unique_id, data):
'Store recorded data into a json file'
trajectory_file = os.path.join(recording_path, 'trajectories_pressed_buttons', '{}'.format(env_name), '{}.json'.format(unique_id))
with open(trajectory_file, 'w') as f:
json.dump(data, f)
|
def start_recording(recording_path, env_name):
'\n Create and initialize any directories/files\n for recording, and return unique\n ID for this recording (timestamp).\n '
unique_id = str(int(time.time()))
screens_dir = os.path.join(recording_path, 'screens', '{}'.format(env_name), unique_id)
trajectories_dir = os.path.join(recording_path, 'trajectories_pressed_buttons', '{}'.format(env_name))
os.makedirs(screens_dir)
os.makedirs(trajectories_dir, exist_ok=True)
return (unique_id, screens_dir)
|
def main(args):
c = Connection(start_binary=(not args.dont_start_binary), binary_path=args.binary)
record = False
recording_id = None
image_directory = None
recorded_data = []
recording_index = 0
recording_start_time = None
previous_response = None
previous_frame_time = None
frame_time = None
target_time_per_frame = (1.0 / args.framerate)
print('Ready to record (Page Up + r)...')
try:
while True:
frame_time = time.time()
c.req.get_keys = True
c.req.get_mouse = True
c.req.get_image = True
c.req.quality = args.quality
c.req.process_name = args.process_name
response = c.send_request()
if ('page up' in response.pressed_keys):
if ('q' in response.pressed_keys):
if record:
finish_recording(args.output, args.env_name, recording_id, recorded_data)
exit()
if ('r' in response.pressed_keys):
if (record and (recording_index > args.framerate)):
finish_recording(args.output, args.env_name, recording_id, recorded_data)
print('Saved {} frames'.format(recording_index))
elif (record and (recording_index < args.framerate)):
continue
if (not record):
print('Recording started (Page Up + s to stop)...')
print('Or Page Up + r to save current frames.')
record = True
recorded_data = []
previous_response = None
previous_frame_time = None
recording_id = None
recording_index = 0
recording_start_time = time.time()
(recording_id, image_directory) = start_recording(args.output, args.env_name)
continue
elif ('s' in response.pressed_keys):
if record:
record = False
finish_recording(args.output, args.env_name, recording_id, recorded_data)
print('Recording done with {} frames'.format(recording_index))
if record:
image = response.image
with open(os.path.join(image_directory, '{}.jpg'.format(recording_index)), 'wb') as f:
f.write(image)
recording_index += 1
if previous_response:
(x, y) = (previous_response.mouse.x, previous_response.mouse.y)
pressed_keys = tuple(previous_response.pressed_keys)
recording_time_ms = int(((previous_frame_time - recording_start_time) * 1000))
recorded_data.append({'m': (x, y), 'b': pressed_keys, 't': recording_time_ms})
previous_frame_time = frame_time
previous_response = response
sleep_time = ((target_time_per_frame - time.time()) + frame_time)
if (sleep_time <= 0.0):
print('[Warning] Can not keep up with the desired framerate.')
sleep_time = 0.0
else:
time.sleep(sleep_time)
except KeyboardInterrupt:
if record:
print('Saving current data to disk...')
finish_recording(args.output, args.process_name, recording_id, recorded_data)
|
def compress_to_bytes(compress=True, **kwargs):
'\n Compress a dict of numpy arrays with .savez\n and return the bytes.\n\n Parameters:\n compress: If True, compress the bytes using\n compression algorithm (using LZ4)\n kwargs: Numpy arrays that will be stored.\n Fed directly to numpy.savez\n '
bytes_buffer = BytesIO()
return_bytes = None
if compress:
np.savez(bytes_buffer, **kwargs)
return_bytes = lz4.frame.compress(bytes_buffer.getvalue())
else:
np.savez(bytes_buffer, **kwargs)
return_bytes = bytes_buffer.getvalue()
return return_bytes
|
def decompress_to_arrays(array_bytes, compress=True):
'\n Decompress bytearray back to numpy arrays. Inverse\n of `compress_to_bytes`\n\n Parameters:\n bytearray: Bytearray to be decompressed\n compress: If True, bytes were compressed with\n LZ4 and require decompressing.\n '
if compress:
array_bytes = lz4.frame.decompress(array_bytes)
bytes_buffer = BytesIO(array_bytes)
return_arrays = np.load(bytes_buffer)
return return_arrays
|
class AtariDataLoader():
'Keras Sequence where the elements are batches from the Atari dataset'
def __init__(self, directory, game, batch_size=32, stack=3, controls=18, size=(84, 84), percentile=None, top_n=None, augment=False, preload=False, merge=False, dqn=False, json=False, fileformat='png', action_delay=0):
self.dir = directory
self.game = game
self.fileformat = fileformat
self.batch_size = batch_size
self.stack = stack
self.controls = controls
self.size = size
self.traj_path = os.path.join(self.dir, 'trajectories', self.game)
self.screen_path = os.path.join(self.dir, 'screens', self.game)
self.all_trajs = self._get_trajectory_list()
self.n_traj = len(self.all_trajs)
self.augment = augment
self.merge = merge
self.dqn = dqn
self.json = json
self.action_delay = action_delay
self.traj_len = []
self.scores = []
self.total_len = 0
for i in range(self.n_traj):
self.traj_len.append(self._get_samples_in_trajectory(i))
self.scores.append(self._get_sample_score(i))
self.total_len += self.traj_len[i]
self.traj_len_all = self.traj_len[:]
if (percentile is not None):
p = np.percentile(self.scores, percentile)
top = filter((lambda x: (x[1] >= p)), zip(range(self.n_traj), self.scores))
self.traj_len = list(map((lambda x: (x[0], self.traj_len[x[0]])), top))
self.total_len = sum(map((lambda x: x[1]), self.traj_len))
elif (top_n is not None):
top = sorted(zip(range(self.n_traj), self.scores), key=(lambda x: x[1]), reverse=True)[:top_n]
self.traj_len = list(map((lambda x: (x[0], self.traj_len[x[0]])), top))
self.total_len = sum(map((lambda x: x[1]), self.traj_len))
else:
self.traj_len = list(zip(range(self.total_len), self.traj_len))
self.cache = []
if preload:
for batch in range(len(self)):
data = self.get_batch(batch)
b = compress_to_bytes(img=data[0], action=data[1])
self.cache.append(b)
print('Cached {}/{}'.format(batch, len(self)))
print('Preload done!')
def _get_image_stacked(self, traj, id, augments=None):
'Returns time-stacked or merged images from\n the given trajectory and sample ID,\n depending on the value of self.merge\n '
stack = []
shape = None
for i in range(self.stack):
ix = (id - i)
if (ix >= 0):
stack.insert(0, self._get_image(traj, ix, augments))
if (shape is None):
shape = stack[0].shape
else:
stack.insert(0, np.zeros(shape, dtype=np.uint8))
if self.merge:
stack = map(Image.fromarray, stack)
img = reduce(ImageChops.lighter, stack)
return np.asarray(img, dtype=np.uint8)
else:
return np.concatenate(stack, axis=2)
@lru_cache(maxsize=int(1000000.0))
def _get_image(self, traj, id, augments=None):
'Returns image from the given trajectory and sample ID as\n a numpy array\n '
traj_name = self.all_trajs[traj]
filename = '{}.{}'.format(id, self.fileformat)
path = os.path.join(self.dir, 'screens', self.game, traj_name, filename)
img = Image.open(path)
img.load()
if (augments is not None):
if ('shadow' in augments):
draw = Draw(img, 'RGBA')
rect_color = (0, 0, 0, augments['shadow'][0])
rect_w = augments['shadow'][3]
rect_h = augments['shadow'][4]
rect_x = augments['shadow'][1]
rect_y = augments['shadow'][2]
draw.rectangle([(rect_x - (rect_w / 2)), (rect_y - (rect_h / 2)), (rect_x + (rect_w / 2)), (rect_y + (rect_h / 2))], rect_color)
if ('brightness' in augments):
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(augments['brightness'])
if ('rotate' in augments):
img = img.rotate(augments['rotate'])
if ('shear' in augments):
raise NotImplementedError
if (('tx' in augments) and ('ty' in augments)):
img = ImageChops.offset(img, xoffset=augments['tx'], yoffset=augments['ty'])
if (('zx' in augments) and ('zy' in augments)):
raise NotImplementedError
if (('flip' in augments) and augments['flip']):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img = img.resize(self.size, Image.BILINEAR)
img = np.asarray(img, dtype=np.uint8)
return img
def _flip_controls(self, control, game=None):
"Flips the controls horizontally, i.e. switches left and right buttons.\n Since qbert has diagonal movement, flipping the controls is\n more complicated, and the 'game' parameter must be set to 'qbert'."
controls = ['NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT', 'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE', 'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE']
control_name = controls[control]
new_control_name = control_name
if (game == 'qbert'):
if (control_name == 'UP'):
new_control_name = 'LEFT'
elif (control_name == 'RIGHT'):
new_control_name = 'DOWN'
elif (control_name == 'LEFT'):
new_control_name = 'UP'
elif (control_name == 'DOWN'):
new_control_name = 'RIGHT'
elif (control_name == 'UPRIGHT'):
new_control_name = 'DOWNLEFT'
elif (control_name == 'DOWNLEFT'):
new_control_name = 'UPRIGHT'
elif (control_name == 'UPFIRE'):
new_control_name = 'LEFTFIRE'
elif (control_name == 'RIGHTFIRE'):
new_control_name = 'DOWNFIRE'
elif (control_name == 'LEFTFIRE'):
new_control_name = 'UPFIRE'
elif (control_name == 'DOWNFIRE'):
new_control_name = 'RIGHTFIRE'
elif (control_name == 'UPRIGHTFIRE'):
new_control_name = 'DOWNLEFTFIRE'
elif (control_name == 'DOWNLEFTFIRE'):
new_control_name = 'UPRIGHTFIRE'
else:
control_name = controls[control]
if ('RIGHT' in control_name):
new_control_name = control_name.replace('RIGHT', 'LEFT')
elif ('LEFT' in control_name):
new_control_name = control_name.replace('LEFT', 'RIGHT')
return controls.index(new_control_name)
@lru_cache(maxsize=128)
def _get_data_lines(self, traj):
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
return f.read().splitlines()
def _get_data(self, traj, id, flip=False):
'Returns a list with the following contents:\n [frame, reward, score, terminal, action, last]\n '
lines = self._get_data_lines(traj)
num_frames = (len(lines) - 2)
data = lines[(id + 2)].split(',')
data = [s.strip() for s in data]
for i in range(5):
if (i == 3):
data[i] = (True if (data[i].lower() == 'true') else False)
else:
data[i] = int(float(data[i]))
if flip:
data[4] = self._flip_controls(data[4], game=('qbert' if (self.game == 'qbert') else None))
if (id >= (num_frames - 1)):
last = 1
else:
last = 0
data.append(last)
return list(data)
@lru_cache(maxsize=128)
def _get_json(self, traj):
traj_name = '{}.json'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
return json.load(f)
def _get_data_json(self, traj, id, flip=False):
'Returns a list with the following contents:\n [frame, reward, score, terminal, action, last]\n '
data = self._get_json(traj)['steps']
last_id = (len(data) - 1)
data = data[id]
return [id, data['r'], 0, (id == last_id), data['a'], (id == last_id)]
def _get_trajectory_list(self):
'Returns a sorted list of all trajectory names'
traj_datas = set(map((lambda x: x.split('.')[0]), os.listdir(self.traj_path)))
traj_screens = set(os.listdir(self.screen_path))
trajs = (traj_datas & traj_screens)
return list(sorted(trajs, key=int))
def _get_num_of_trajectories(self):
'Returns the number of trajectories in this dataset'
trajectories = os.listdir(os.path.join(self.dir, 'trajectories', self.game))
return len(trajectories)
def _get_sample_score(self, traj):
'Returns the final score of the given trajectory ID'
if self.json:
traj_name = '{}.json'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
steps = json.load(f)['steps']
score = 0
for step in steps:
score += step['r']
return int(score)
else:
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
lines = f.read().splitlines()
return int(float(lines[(- 1)].split(',')[2]))
def _get_samples_in_trajectory(self, traj):
'Returns the number of samples in the given trajectory ID'
if self.json:
traj_name = '{}.json'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
lines = len(json.load(f)['steps'])
return lines
else:
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
lines = f.read().splitlines()
return (int(lines[(- 1)].split(',')[0]) + 1)
def _get_index_traj_and_sample(self, index):
'Returns the corresponding trajectory ID and sample ID for the\n given index'
total = 0
for (i, t_len) in self.traj_len:
if (index < (total + t_len)):
return (i, (index - total))
total += t_len
def __len__(self):
return int((self.total_len / self.batch_size))
def get_batch(self, samples):
batch_x = []
batch_y = []
if self.dqn:
batch_x_next = []
batch_reward = []
batch_done = []
for sample in samples:
(traj, ix) = self._get_index_traj_and_sample(sample)
flip = False
if self.augment:
flip = choice([True, False])
augments = {'shadow': (randrange(128, 255), uniform(0, ATARI_W), uniform(0, ATARI_H), uniform(10, 100), uniform(10, 100)), 'brightness': uniform(0.5, 1.5), 'rotate': uniform((- 2), 2), 'tx': randrange((- 5), 5), 'ty': randrange((- 5), 5), 'flip': flip}
else:
augments = None
if self.json:
data = self._get_data_json(traj, ix, flip)
else:
data = self._get_data(traj, ix, flip)
if (self.action_delay != 0):
action_ix = (ix + self.action_delay)
if (action_ix >= self.traj_len_all[traj]):
action_ix = (self.traj_len_all[traj] - 1)
if (action_ix < 0):
action_ix = 0
if self.json:
delayed_data = self._get_data_json(traj, action_ix, flip)
else:
delayed_data = self._get_data(traj, action_ix, flip)
data[4] = delayed_data[4]
if (self.dqn and (data[5] == 1) and (data[3] == 0)):
continue
batch_x.append(self._get_image_stacked(traj, ix, augments))
if self.json:
batch_y.append(np.array([data[4]]))
else:
batch_y.append(np.eye(self.controls)[data[4]])
if self.dqn:
batch_reward.append(data[1])
batch_done.append(data[5])
if (not data[5]):
batch_x_next.append(self._get_image_stacked(traj, (ix + 1), augments))
else:
batch_x_next.append(np.zeros(batch_x[0].shape, dtype=np.uint8))
if (self.dqn == False):
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8))
else:
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8), np.array(batch_x_next, dtype=np.uint8), np.array(batch_reward), np.array(batch_done, dtype=np.uint8))
|
class AtariDataLoaderProcess(multiprocessing.Process):
'Process that runs a single AtariDataLoader instance'
def __init__(self, request_queue, response_queue, dataloader_args):
self.loader = AtariDataLoader(**dataloader_args)
self.request_queue = request_queue
self.response_queue = response_queue
super().__init__()
def __len__(self):
return len(self.loader)
def run(self):
while True:
response = self.loader.get_batch(self.request_queue.get())
self.response_queue.put(response)
|
class MultiprocessAtariDataLoader():
'Creates multiple dataloader processes and serves data from them\n as an iterator\n \n Note: The iterator can return batches in any order, but is guaranteed\n to return every batch exactly once.\n '
def __init__(self, dataloader_args, workers):
super().__init__()
self.request_queue = multiprocessing.Manager().Queue()
self.queue = multiprocessing.Queue(maxsize=workers)
loader = AtariDataLoader(**dataloader_args)
self.batch_size = loader.batch_size
self.sample_length = loader.total_len
self.length = len(loader)
self.shape = loader._get_image(0, 0).shape
self.loaders = []
for i in range(workers):
new_loader = AtariDataLoaderProcess(self.request_queue, self.queue, dataloader_args)
self.loaders.append(new_loader)
for i in self.loaders:
i.start()
def stop(self):
for i in self.loaders:
i.terminate()
def __len__(self):
return self.length
def __next__(self):
if (self.iters < self.length):
response = self.queue.get()
self.iters += 1
return response
else:
raise StopIteration
def __iter__(self):
self.iters = 0
samples = list(range(self.sample_length))
shuffle(samples)
for i in range(self.length):
if ((i % 1000) == 0):
print('Adding batch {} to queue'.format(i))
batch = []
for _ in range(self.batch_size):
batch.append(samples.pop())
self.request_queue.put(batch)
return self
|
class AtariHeadDataloader():
def __init__(self, directory, batch_size=32, stack=3, controls=18, size=(84, 84), percentile=None, top_n=None, augment=False, preload=False, merge=False, dqn=False, action_delay=0, print_stats=False):
self.batch_size = batch_size
self.stack = stack
self.controls = controls
self.size = size
self.merge = merge
self.dqn = dqn
self.action_delay = action_delay
self.directory = directory
self.all_trajs = self._get_trajectory_list()
self.n_traj = len(self.all_trajs)
self.traj_len = []
for traj in range(len(self.all_trajs)):
self.traj_len.append(self._get_samples_in_trajectory(traj))
self.total_len = sum(self.traj_len)
def _get_trajectory_list(self):
'Returns a sorted list of all trajectory names'
names = os.listdir(self.directory)
names = list(filter((lambda x: x.endswith('.txt')), names))
names = list(map((lambda x: x[:(- 4)]), names))
names = sorted(names)
return names
def _get_samples_in_trajectory(self, traj):
'Returns the number of samples in the given trajectory ID'
lines = self._get_data_lines(traj)
return (len(lines) - 1)
def _get_index_traj_and_sample(self, index):
'Returns the corresponding trajectory ID and sample ID for the\n given index'
total = 0
for (i, t_len) in enumerate(self.traj_len):
if (index < (total + t_len)):
return (i, (index - total))
total += t_len
def _get_frame_id(self, traj, index):
'Returns the frame_id field for the given trajectory and index'
lines = self._get_data_lines(traj)
return lines[(index + 1)].split(',')[0]
def _get_image_stacked(self, traj, id):
'Returns time-stacked or merged images from\n the given trajectory and sample ID,\n depending on the value of self.merge\n '
stack = []
shape = None
for i in range(self.stack):
ix = (id - i)
if (ix >= 0):
stack.insert(0, self._get_image(traj, ix))
if (shape is None):
shape = stack[0].shape
else:
stack.insert(0, np.zeros(shape, dtype=np.uint8))
if self.merge:
stack = map(Image.fromarray, stack)
img = reduce(ImageChops.lighter, stack)
return np.asarray(img, dtype=np.uint8)
else:
return np.concatenate(stack, axis=2)
def _get_image(self, traj, index):
traj_name = self.all_trajs[traj]
frame_id = self._get_frame_id(traj, index)
filename = '{}.png'.format(frame_id)
path = os.path.join(self.directory, traj_name, filename)
img = Image.open(path)
img.load()
img = img.resize(self.size, Image.BILINEAR)
img = np.asarray(img, dtype=np.uint8)
return img
@lru_cache(maxsize=128)
def _get_data_lines(self, traj):
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.directory, traj_name)) as f:
return f.read().splitlines()
def _get_data(self, traj, id):
'Returns a list with the following contents:\n [frame, reward, score, terminal, action, last]\n '
lines = self._get_data_lines(traj)
num_frames = (len(lines) - 1)
data = lines[(id + 1)].split(',')[:6]
data = [s.strip() for s in data]
try:
data[2] = int(data[2])
except ValueError:
data[2] = (- 1)
try:
data[4] = int(data[4])
except ValueError:
data[4] = 0
try:
data[5] = int(data[5])
except ValueError:
data[5] = 0
if (id >= (num_frames - 1)):
last = 1
else:
last = 0
data.append(last)
return [id, data[4], data[2], False, data[5], last]
def __len__(self):
return int((self.total_len / self.batch_size))
def get_batch(self, samples):
batch_x = []
batch_y = []
if self.dqn:
batch_x_next = []
batch_reward = []
batch_done = []
for sample in samples:
(traj, ix) = self._get_index_traj_and_sample(sample)
data = self._get_data(traj, ix)
if (self.action_delay != 0):
action_ix = (ix + self.action_delay)
if (action_ix >= self.traj_len[traj]):
action_ix = (self.traj_len[traj] - 1)
if (action_ix < 0):
action_ix = 0
delayed_data = self._get_data(traj, action_ix)
data[4] = delayed_data[4]
if (self.dqn and (data[5] == 1) and (data[3] == 0)):
continue
batch_x.append(self._get_image_stacked(traj, ix))
batch_y.append(np.eye(self.controls)[data[4]])
if self.dqn:
batch_reward.append(data[1])
batch_done.append(data[5])
if (not data[5]):
batch_x_next.append(self._get_image_stacked(traj, (ix + 1)))
else:
batch_x_next.append(np.zeros(batch_x[0].shape, dtype=np.uint8))
if (self.dqn == False):
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8))
else:
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8), np.array(batch_x_next, dtype=np.uint8), np.array(batch_reward), np.array(batch_done, dtype=np.uint8))
|
class AtariDataLoaderProcess(multiprocessing.Process):
'Process that runs a single AtariDataLoader instance'
def __init__(self, request_queue, response_queue, dataloader_args):
self.loader = AtariHeadDataloader(**dataloader_args)
self.request_queue = request_queue
self.response_queue = response_queue
super().__init__()
def __len__(self):
return len(self.loader)
def run(self):
while True:
response = self.loader.get_batch(self.request_queue.get())
self.response_queue.put(response)
|
class MultiprocessAtariHeadDataLoader():
'Creates multiple dataloader processes and serves data from them\n as an iterator\n \n Note: The iterator can return batches in any order, but is guaranteed\n to return every batch exactly once.\n '
def __init__(self, dataloader_args, workers):
super().__init__()
self.request_queue = multiprocessing.Manager().Queue()
self.queue = multiprocessing.Queue(maxsize=workers)
loader = AtariHeadDataloader(**dataloader_args)
self.batch_size = loader.batch_size
self.sample_length = loader.total_len
self.length = len(loader)
self.shape = loader._get_image(0, 0).shape
self.loaders = []
for i in range(workers):
new_loader = AtariDataLoaderProcess(self.request_queue, self.queue, dataloader_args)
self.loaders.append(new_loader)
for i in self.loaders:
i.start()
def stop(self):
for i in self.loaders:
i.terminate()
def __len__(self):
return self.length
def __next__(self):
if (self.iters < self.length):
response = self.queue.get()
self.iters += 1
return response
else:
raise StopIteration
def __iter__(self):
self.iters = 0
samples = list(range(self.sample_length))
shuffle(samples)
for i in range(self.length):
if ((i % 1000) == 0):
print('Adding batch {} to queue'.format(i))
batch = []
for _ in range(self.batch_size):
batch.append(samples.pop())
self.request_queue.put(batch)
return self
|
def main(args):
input_data = None
with open(args.input) as f:
input_data = json.load(f)
key_mapping = KEY_MAPPING[args.game]
button_representatives = [buttons[0] for buttons in key_mapping]
new_data = {'allowed_buttons': button_representatives, 'steps': None}
new_steps = []
for step in input_data:
new_step = {'r': 0.0, 't': step['t']}
pressed_buttons = step['b']
new_action = [int(any([(button in pressed_buttons) for button in buttons])) for buttons in key_mapping]
new_step['a'] = new_action
new_steps.append(new_step)
new_data['steps'] = new_steps
with open(args.output, 'w') as f:
json.dump(new_data, f)
|
def human_normalized_score(score, random, human, stdev=None):
norm_score = ((100 * (score - random)) / (human - random))
if (stdev is not None):
upper = ((100 * ((score + stdev) - random)) / (human - random))
lower = ((100 * ((score - stdev) - random)) / (human - random))
return (norm_score, (upper - norm_score), (norm_score - lower))
else:
return (norm_score, 0, 0)
|
def figure_nodelay_atari():
with open('results.json') as f:
results = json.load(f)
atari_games = ['Ms. Pac-Man', 'Video Pinball', 'Q*bert', "Montezuma's Revenge", 'Space Invaders']
(_, axs) = plt.subplots(len(atari_games), 1, sharex=True, figsize=(6, 8))
for (k, game) in enumerate(atari_games):
labels = []
means = []
stdevs_low = []
stdevs_high = []
for dataset in ['Top 5%', 'Top 50%', 'All', 'Atari-HEAD']:
if (dataset not in results['bc'][game]):
continue
labels.append('{}'.format(dataset))
mean = results['bc'][game][dataset]['mean']
stdev = results['bc'][game][dataset]['stdev']
(norm_mean, norm_upper, norm_lower) = human_normalized_score(mean, results['random'][game]['mean'], results['human'][game]['mean'], stdev)
means.append(norm_mean)
stdevs_low.append(norm_lower)
stdevs_high.append(norm_upper)
stdevs = [stdevs_low, stdevs_high]
axs[k].invert_yaxis()
axs[k].set_yticks(range(len(labels)))
axs[k].set_yticklabels(labels)
axs[k].tick_params(axis='both', which='major')
if (k != (len(atari_games) - 1)):
plt.setp(axs[k].get_xticklabels(), visible=False)
else:
axs[k].set_xlabel('% of human score')
axs[k].set_xlim(left=(- 10), right=35)
axs[k].set_title(game, fontsize='medium')
axs[k].barh(range(len(labels)), means, xerr=stdevs)
axs[k].grid(b=True, which='major', axis='x', color='#999999', linestyle='-', linewidth=0.25)
axs[k].set_axisbelow(True)
plt.tight_layout()
plt.savefig('figure_atari.pdf', dpi=400, bbox_inches='tight', pad_inches=0)
plt.savefig('figure_atari.png', dpi=400, bbox_inches='tight', pad_inches=0)
|
def figure_nodelay():
with open('results.json') as f:
results = json.load(f)
games = results['bc'].keys()
atari_games = ['Ms. Pac-Man', 'Video Pinball', 'Q*bert', "Montezuma's Revenge", 'Space Invaders']
games = [game for game in games if (game not in atari_games)]
(_, ax) = plt.subplots(1, figsize=(6, 3.25))
labels = []
means = []
stdevs_low = []
stdevs_high = []
for game in games:
labels.append('{}'.format(game))
mean = results['bc'][game]['All']['mean']
stdev = results['bc'][game]['All']['stdev']
(norm_mean, norm_upper, norm_lower) = human_normalized_score(mean, results['random'][game]['mean'], results['human'][game]['mean'], stdev)
means.append(norm_mean)
stdevs_low.append(norm_lower)
stdevs_high.append(norm_upper)
stdevs = [stdevs_low, stdevs_high]
ax.invert_yaxis()
ax.set_yticks(range(len(labels)))
ax.set_yticklabels(labels, fontsize=8.5)
ax.tick_params(axis='both', which='major')
ax.set_xlabel('% of human score')
ax.barh(range(len(labels)), means, xerr=stdevs)
ax.grid(b=True, which='major', axis='x', color='#999999', linestyle='-', linewidth=0.25)
ax.set_axisbelow(True)
plt.tight_layout()
plt.savefig('figure_all.pdf', dpi=400, bbox_inches='tight', pad_inches=0)
plt.savefig('figure_all.png', dpi=400, bbox_inches='tight', pad_inches=0)
|
def figure_delay():
with open('results.json') as f:
results = json.load(f)
(_, axs) = plt.subplots(2, 5, figsize=(12, 5), sharex=True)
coolwarm = cm.get_cmap('coolwarm', 9)
colors = [coolwarm(x) for x in np.linspace(0, 1, 9)]
for row in range(2):
if (row == 0):
dataset = 'atarigc_95'
else:
dataset = 'atarihead'
games = results['delay_{}'.format(dataset)].keys()
for (k, game) in enumerate(games):
game_name = game.replace('\n(Atari-HEAD)', '').replace('\n(Atari GC)', '')
labels = []
means = []
stdevs_low = []
stdevs_high = []
for delay in ['-100', '-10', '-5', '-2', '0', '2', '5', '10', '100']:
mean = results['delay_{}'.format(dataset)][game][delay]['mean']
stdev = results['delay_{}'.format(dataset)][game][delay]['stdev']
(norm_mean, norm_upper, norm_lower) = human_normalized_score(mean, results['random'][game_name]['mean'], results['human'][game_name]['mean'], stdev)
means.append(norm_mean)
stdevs_low.append(norm_lower)
stdevs_high.append(norm_upper)
labels.append(delay)
axs[(row, k)].bar(range(len(labels)), means, yerr=[stdevs_low, stdevs_high], width=1.0, color=colors)
axs[(row, k)].set_xticks(range(len(labels)))
axs[(row, k)].set_xticklabels(labels, rotation='vertical')
axs[(row, k)].tick_params(axis='x', which='major')
if (k == 0):
axs[(row, k)].set_ylabel('% of human score')
axs[(row, k)].set_title(game, fontsize='medium')
plt.tight_layout()
plt.savefig('figure_delay.pdf', dpi=400, bbox_inches='tight', pad_inches=0)
plt.savefig('figure_delay.png', dpi=400, bbox_inches='tight', pad_inches=0)
|
def figure_learning():
def get_avg_from_file(file_path):
with open(file_path) as f:
avg_line = f.readlines()[(- 1)]
match = re.match('Avg: (.*)', avg_line)
return float(match.group(1))
def get_stdev_from_file(file_path):
values = get_datapoints_from_file(file_path)
return statistics.stdev(values)
def get_datapoints_from_file(file_path):
with open(file_path) as f:
lines = f.readlines()
values = []
for line in lines:
try:
values.append(float(line))
except ValueError:
pass
return values
with open('results/space_invaders_all_2-history.json', 'r') as f:
history = json.load(f)
(_, axs) = plt.subplots(1, 2, figsize=(6, 3))
axs[0].plot(history['loss'], label='loss')
axs[0].set_title('Training loss', fontsize='medium')
axs[0].set_ylabel('Loss')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(range(10))
axs[0].set_xticklabels([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
repeat = 2
r = re.compile('(.*)_{}_([0-9]{{1,4}})\\.pt\\.txt'.format(repeat))
files = []
path = os.path.normpath('results/')
for entry in os.listdir(path):
full_entry = os.path.join(path, entry)
if os.path.isfile(full_entry):
match = r.match(entry)
if ((match is not None) and (match.group(1) == 'space_invaders_all')):
epoch = int(match.group(2))
files.append((epoch, get_avg_from_file(full_entry), get_stdev_from_file(full_entry), get_datapoints_from_file(full_entry)))
files.sort(key=(lambda x: x[0]))
(x, y, yerr, points) = zip(*files)
x = list(x)
y = list(y)
yerr = list(yerr)
for (epoch, entry, stdev, _) in files:
print('{}: {} (std {})'.format(epoch, entry, stdev))
for (i, v) in enumerate(x):
for _y in points[i]:
plt.scatter(v, _y, marker='_', c='#00000028', linewidths=1)
axs[1].errorbar(x, y, yerr=yerr)
axs[1].set_title('Evaluation score', fontsize='medium')
axs[1].set_xlabel('Epoch')
axs[1].set_ylabel('Score')
axs[1].set_xticks([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
axs[1].set_xticklabels([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
plt.tight_layout()
plt.savefig('figure_learning.pdf', dpi=400, bbox_inches='tight', pad_inches=0)
plt.savefig('figure_learning.png', dpi=400, bbox_inches='tight', pad_inches=0)
|
def main(args):
scores = []
for filepath in args.inputs:
json_data = None
with open(filepath) as f:
json_data = json.load(f)
rewards = [step['r'] for step in json_data['steps']]
scores.append(sum(rewards))
print('Individual scores: ')
pprint(scores)
print('Mean: {:.3f}. Std: {:.3f}'.format(np.mean(scores), np.std(scores)))
|
class Mnih2015(nn.Module):
'CNN head similar to one used in Mnih 2015\n (Human-level control through deep reinforcement learning, Mnih 2015)'
def __init__(self, image_shape, num_channels, num_actions):
super(Mnih2015, self).__init__()
self.num_actions = num_actions
self.conv1 = nn.Conv2d(num_channels, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1)
c_out = self.conv3(self.conv2(self.conv1(torch.randn(1, num_channels, *image_shape))))
self.conv3_size = np.prod(c_out.shape)
print('conv3: {}'.format(self.conv3_size))
self.fc1 = nn.Linear(self.conv3_size, 512)
self.fc2 = nn.Linear(512, num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view((- 1), self.conv3_size)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
class Connection():
'Automatically starts the binary and creates a socket connection to it.\n\n When started with the default arguments, will start the binary on an open\n port and connect to it.\n\n If start_binary is set to False, the binary will\n not be automatically started, and connection will instead be made to the\n given address and port.\n\n Once connection has been made, the req member will be a protobuf Request\n class as defined in messages.proto. This member can be edited to set the\n message fields for the next request.\n\n The send_request() method will send the current request to the binary.\n '
def __init__(self, address='localhost', port=None, start_binary=True, binary_path='main'):
self.req = messages_pb2.Request()
if (port is None):
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(('', 0))
(_, port) = tcp.getsockname()
tcp.close()
if start_binary:
try:
if (platform.system() == 'Windows'):
subprocess.Popen([binary_path, '-p', str(port)], stdout=subprocess.DEVNULL)
else:
subprocess.Popen([binary_path, '-p', str(port)], stdout=subprocess.DEVNULL)
except OSError:
print('Starting the binary failed')
sys.exit()
for _ in range(10):
try:
self.s = socket.create_connection((address, port))
break
except ConnectionRefusedError:
time.sleep(0.1)
continue
self.s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
def send_request(self):
'Send the Request message stored in this.req and resets\n it to default values.\n Returns the Response object received from the binary,\n or False if decoding the incoming message failed.\n '
serialized = self.req.SerializeToString()
self.req = messages_pb2.Request()
msg_len = len(serialized)
sent = self.s.send(msg_len.to_bytes(4, 'big'))
total_sent = 0
while (total_sent < msg_len):
sent = self.s.send(serialized[total_sent:])
total_sent += sent
data = b''
while (len(data) < 4):
received = self.s.recv((4 - len(data)))
if (len(received) == 0):
raise ConnectionResetError('Connection was closed')
data += received
msg_len = int.from_bytes(data, 'big')
data = b''
while (len(data) < msg_len):
received = self.s.recv((msg_len - len(data)))
if (len(received) == 0):
raise ConnectionResetError('Connection was closed')
data += received
try:
resp_msg = messages_pb2.Response()
resp_msg.ParseFromString(data)
return resp_msg
except google.protobuf.message.DecodeError as e:
print('DecodeError in reponse: {}'.format(e))
return False
|
def predict(q):
q_tokens = word_tokenize(q)
q_tokens = lower_list(q_tokens)
q_tokens = find_ngrams(vocab, q_tokens, 100000)
print('q_tokens:', q_tokens)
vec_q = [w2i[w] for w in q_tokens if (w in w2i)]
q_pad_len = max(0, (max_query_len - len(vec_q)))
vec_q += ([0] * q_pad_len)
vec_q = np.array(vec_q)
vec_q = np.reshape(vec_q, (1, len(vec_q)))
(data_k, data_v) = load_kv_dataset([(None, q_tokens, None)], kv_pairs, stopwords)
vec_k = vectorize_kv(data_k, 2, max_mem_size, w2i)
vec_v = vectorize_kv(data_v, 1, max_mem_size, w2i)
int_predict = model.predict([vec_k, vec_v, vec_q], batch_size=1, verbose=0)
print('A:', i2w_label[np.argmax(int_predict[0])])
|
def MemNNKV(mem_key_len, mem_val_len, mem_size, query_maxlen, vocab_size, embd_size, answer_size):
print('mem_size:', mem_size)
print('q_max', query_maxlen)
print('embd_size', embd_size)
print('vocab_size', vocab_size)
print('-----------')
key = Input((mem_size, mem_key_len), name='Key_Input')
val = Input((mem_size, mem_val_len), name='Val_Input')
question = Input((query_maxlen,), name='Question_Input')
shared_embd_A = Embedding(input_dim=vocab_size, output_dim=embd_size)
key_encoded = shared_embd_A(key)
key_encoded = BatchNormalization()(key_encoded)
key_encoded = Lambda((lambda x: K.sum(x, axis=2)))(key_encoded)
val_encoded = shared_embd_A(val)
val_encoded = BatchNormalization()(val_encoded)
val_encoded = Lambda((lambda x: K.sum(x, axis=2)))(val_encoded)
question_encoded = shared_embd_A(question)
question_encoded = BatchNormalization()(question_encoded)
question_encoded = Lambda((lambda x: K.sum(x, axis=1)))(question_encoded)
q = question_encoded
for h in range(2):
ph = dot([q, key_encoded], axes=(1, 2))
ph = Activation('softmax')(ph)
o = dot([ph, val_encoded], axes=(1, 1))
print('o', o.shape)
R = Dense(embd_size, input_shape=(embd_size,), name=('R_Dense_h' + str((h + 1))))
q = R(add([q, o]))
q = BatchNormalization()(q)
answer = Dense(answer_size, name='last_Dense')(q)
answer = BatchNormalization()(answer)
preds = Activation('softmax')(answer)
model = Model([key, val, question], preds)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
|
def save_pickle(d, path):
print('save pickle to', path)
with open(path, mode='wb') as f:
pickle.dump(d, f)
|
def load_pickle(path):
print('load', path)
with open(path, mode='rb') as f:
return pickle.load(f)
|
def lower_list(word_list):
return [w.lower() for w in word_list]
|
def load_entities(path):
with open(path, 'r') as f:
lines = f.readlines()
entities = [e.lower().rstrip() for e in lines]
return list(set(entities))
|
def find_ngrams(token_dict, text, n):
" See: https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/dict.py#L31\n token_dict: {'hello world', 'ol boy'}\n text: ['hello', 'world', 'buddy', 'ol', 'boy']\n n: max n of n-gram\n ret: ['hello world', 'buddy', 'ol boy']\n "
'Breaks text into ngrams that appear in ``token_dict``.'
if (n <= 1):
return text
saved_tokens = []
search_tokens = text[:]
next_search = []
while (len(search_tokens) >= n):
ngram = ' '.join(search_tokens[:n])
if (ngram in token_dict):
sub_n = min(len(next_search), (n - 1))
saved_tokens.extend(find_ngrams(token_dict, next_search, sub_n))
next_search.clear()
saved_tokens.append(ngram)
search_tokens = search_tokens[n:]
else:
next_search.append(search_tokens.pop(0))
remainder = (next_search + search_tokens)
sub_n = min(len(remainder), (n - 1))
saved_tokens.extend(find_ngrams(token_dict, remainder, sub_n))
return saved_tokens
|
def load_task(fpath):
print('load', fpath)
with open(fpath, encoding='utf-8') as f:
lines = f.readlines()
data = []
for (i, l) in enumerate(lines):
l = l.rstrip()
(turn, left) = l.split(' ', 1)
if ('\t' in left):
(q, a) = left.split('\t', 1)
q = q.split('?', 1)[1]
q = q.split(' 1:')[1:]
q = lower_list(q)
a = a.split(', ')
a = lower_list(a)
data.append((q, a))
return data
|
def vectorize(data, w2i, query_maxlen, w2i_label, use_multi_label=False):
(Q, A) = ([], [])
for (question, answer) in data:
q = [w2i[w] for w in question if (w in w2i)]
q = q[:query_maxlen]
q_pad_len = max(0, (query_maxlen - len(q)))
q += ([0] * q_pad_len)
y = np.zeros(len(w2i_label))
if use_multi_label:
for a in answer:
y[w2i_label[a]] = 1
else:
y[w2i_label[answer[0]]] = 1
Q.append(q)
A.append(y)
Q = np.array(Q, dtype=np.uint32)
A = np.array(A, dtype='byte')
return (Q, A)
|
def load_kv_pairs(path, token_dict, max_token_length, is_save_pickle=False):
'load key-value paris from KB'
rel = ['directed_by', 'written_by', 'starred_actors', 'release_year', 'has_genre', 'has_tags', 'in_language']
kv_pairs = []
with open(path, 'r') as f:
lines = f.readlines()
for (i, l) in enumerate(lines):
if ((i % 5000) == 0):
print('load_kv_pairs:', i, '/', len(lines))
if (l == '\n'):
continue
(turn, left) = l.rstrip().split(' ', 1)
for r in rel:
if (r in left):
(k, v) = ([], [])
tmp = left.split(r)
lhs = tmp[0].rstrip().lower()
k.append(lhs)
k.append(r)
rhs = tmp[1].strip().lower()
vals = rhs.split(', ')
for v in vals:
kv_pairs.append((k, [v]))
k_r = [v, ('!' + r)]
v_r = [lhs]
kv_pairs.append((k_r, v_r))
break
if is_save_pickle:
save_pickle(kv_pairs, 'pickle/mov_kv_pairs.pickle')
return kv_pairs
|
def vectorize_kv(data, max_mem_len, max_mem_size, w2i):
all_vec_list = []
for (i, kv_list) in enumerate(data):
if ((i % 5000) == 0):
print('vectorize_kv:', i, '/', len(data))
vec_list = []
for kv in kv_list[:(max_mem_len + 100)]:
vec = [w2i[e] for e in kv if (e in w2i)]
vec = vec[:max_mem_len]
mem_pad_len = max(0, (max_mem_len - len(vec)))
vec = (vec + ([0] * mem_pad_len))
vec_list.append(vec)
vec_list = vec_list[:max_mem_size]
mem_pad_size = max(0, (max_mem_size - len(vec_list)))
for _ in range(mem_pad_size):
vec_list.append(([0] * max_mem_len))
all_vec_list.append(vec_list)
return np.array(all_vec_list, dtype=np.uint32)
|
def load_kv_dataset(data, kv_pairs, stopwords):
print('---', len(data), len(kv_pairs))
(data_k, data_v) = ([], [])
for (i, (q, _)) in enumerate(data):
if ((i % 100) == 0):
print('load_kv_dataset:', i, '/', len(data))
(k_list, v_list) = ([], [])
for w in q:
if (w not in stopwords):
for (kv_ind, (k, v)) in enumerate(kv_pairs):
if (w in k):
k_list.append(k)
v_list.append(v)
if (len(k_list) == 0):
print('==================no kv!')
print(q)
if (len(k_list) > 100):
print('==================too many kv! > 100')
print(q)
print(len(k_list))
data_k.append(k_list)
data_v.append(v_list)
return (data_k, data_v)
|
def get_stop_words(data, freq, token_dict, max_token_length, is_save_pickle):
bow = {}
for (i, (q, _)) in enumerate(data):
if ((i % 2000) == 0):
print(i, '/', len(data))
for qq in q:
q_tokens = find_ngrams(token_dict, qq.split(' '), max_token_length)
for w in q_tokens:
if (w not in bow):
bow[w] = 0
else:
bow[w] += 1
stopwords = [k for (k, v) in bow.items() if (v >= freq)]
if is_save_pickle:
save_pickle(stopwords, 'pickle/mov_stopwords.pickle')
return stopwords
|
def filter_data(data, data_k, data_v, kv_min, kv_max):
indices = []
for (i, k) in enumerate(data_k):
if ((len(k) > kv_min) and (len(k) <= kv_max)):
indices.append(i)
data = [data[i] for i in indices]
data_k = [data_k[i] for i in indices]
data_v = [data_v[i] for i in indices]
return (data, data_k, data_v)
|
class History(Callback):
def on_train_begin(self, logs={}):
self.result = []
def on_epoch_end(self, epoch, logs={}):
global log_path
logs['epoch'] = epoch
self.result.append(logs)
with open(log_path, 'wt') as f:
f.write(json.dumps(self.result, indent=4, sort_keys=True))
|
class TrainingInstance(object):
'A single training instance (sentence pair).'
def __init__(self, tokens):
self.tokens = tokens
self.input_tokens = tokens
self.target_tokens = tokens
def __str__(self):
s = ''
s += ('tokens: %s\n' % ' '.join([tokenization.printable_text(x) for x in self.tokens]))
s += '\n'
return s
def __repr__(self):
return self.__str__()
|
def write_instance_to_example_files(instances, word_to_id, max_seq_length, output_files):
'Create TF example files from `TrainingInstance`s.'
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = [word_to_id[token] for token in instance.input_tokens]
target_ids = [word_to_id[token] for token in instance.target_tokens]
input_mask = ([1] * len(input_ids))
assert (len(input_ids) <= max_seq_length)
while (len(input_ids) < max_seq_length):
input_ids.append(0)
target_ids.append(0)
input_mask.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(target_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(input_ids)
features['target_ids'] = create_int_feature(target_ids)
features['input_mask'] = create_int_feature(input_mask)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = ((writer_index + 1) % len(writers))
total_written += 1
if (inst_index < 20):
tf.logging.info('*** Example ***')
tf.logging.info(('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens])))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(('%s: %s' % (feature_name, ' '.join([str(x) for x in values]))))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written)
|
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
|
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
|
def create_training_instances(all_tokens, vocab_words, max_seq_length, rng):
'Create `TrainingInstance`s from raw text.'
rng.shuffle(all_tokens)
instances = []
print('Process of "create_training_instances"')
for tokens in all_tokens:
instances.append(create_instances_from_sentence(tokens, max_seq_length, rng))
rng.shuffle(instances)
print('finised')
return instances
|
def create_instances_from_sentence(tokens, max_seq_length, rng):
'Creates `TrainingInstance`s for a single sentence.'
max_num_tokens = (max_seq_length - 2)
assert (len(tokens) >= 1)
if (len(tokens) >= max_num_tokens):
truncate_seq(tokens, max_num_tokens, rng)
if (tokens[0] is not '[SOS]'):
tokens.insert(0, '[SOS]')
if (tokens[(- 1)] is not '[EOS]'):
tokens.append('[EOS]')
instance = TrainingInstance(tokens)
return instance
|
def truncate_seq(tokens, max_num_tokens, rng):
'Truncates a sequence to a maximum sequence length.'
while True:
total_length = len(tokens)
if (total_length <= max_num_tokens):
break
trunc_tokens = tokens
assert (len(trunc_tokens) >= 1)
if (rng.random() < 0.5):
del trunc_tokens[0]
else:
trunc_tokens.pop()
|
def read_all_sentences(input_files):
all_sentences = []
for input_file in input_files:
with open(input_file, 'r') as reader:
for line in reader.readlines():
line = line.strip()
if (not line):
continue
else:
all_sentences.append(line)
return all_sentences
|
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
'Creates an optimizer training op.'
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
learning_rate = tf.train.polynomial_decay(learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=1.0, cycle=False)
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = (global_steps_float / warmup_steps_float)
warmup_learning_rate = (init_lr * warmup_percent_done)
is_warmup = tf.cast((global_steps_int < warmup_steps_int), tf.float32)
learning_rate = (((1.0 - is_warmup) * learning_rate) + (is_warmup * warmup_learning_rate))
optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)
new_global_step = (global_step + 1)
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return (train_op, learning_rate)
|
class AdamWeightDecayOptimizer(tf.train.Optimizer):
'A basic Adam optimizer that includes "correct" L2 weight decay.'
def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, name='AdamWeightDecayOptimizer'):
'Constructs a AdamWeightDecayOptimizer.'
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
'See base class.'
assignments = []
for (grad, param) in grads_and_vars:
if ((grad is None) or (param is None)):
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(name=(param_name + '/adam_m'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
v = tf.get_variable(name=(param_name + '/adam_v'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
next_m = (tf.multiply(self.beta_1, m) + tf.multiply((1.0 - self.beta_1), grad))
next_v = (tf.multiply(self.beta_2, v) + tf.multiply((1.0 - self.beta_2), tf.square(grad)))
update = (next_m / (tf.sqrt(next_v) + self.epsilon))
if self._do_use_weight_decay(param_name):
update += (self.weight_decay_rate * param)
update_with_lr = (self.learning_rate * update)
next_param = (param - update_with_lr)
assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
'Whether to use L2 weight decay for `param_name`.'
if (not self.weight_decay_rate):
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if (re.search(r, param_name) is not None):
return False
return True
def _get_variable_name(self, param_name):
'Get the variable name from the tensor name.'
m = re.match('^(.*):\\d+$', param_name)
if (m is not None):
param_name = m.group(1)
return param_name
|
def model_fn_builder(config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
'Returns `model_fn` closure for TPUEstimator.'
def model_fn(features, labels, mode, params):
'The `model_fn` for TPUEstimator.'
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info((' name = %s, shape = %s' % (name, features[name].shape)))
input_ids = features['input_ids']
target_ids = features['target_ids']
input_mask = features['input_mask']
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(config=config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, use_one_hot_embeddings=use_one_hot_embeddings)
(lm_loss, lm_example_loss, lm_log_probs) = get_lm_output(config, model.get_sequence_output(), model.get_embedding_table(), target_ids, input_mask)
total_loss = lm_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if (var.name in initialized_variable_names):
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if (mode == tf.estimator.ModeKeys.TRAIN):
(train_op, _lr) = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
tf.summary.scalar('learning_rate', _lr)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif (mode == tf.estimator.ModeKeys.EVAL):
def metric_fn(lm_example_loss, lm_log_probs):
'Computes the loss and accuracy of the model.'
lm_log_probs = tf.reshape(lm_log_probs, [(- 1), lm_log_probs.shape[(- 1)]])
lm_predictions = tf.argmax(lm_log_probs, axis=(- 1), output_type=tf.int32)
lm_example_loss = tf.reshape(lm_example_loss, [(- 1)])
lm_mean_loss = tf.metrics.mean(values=lm_example_loss)
return {'lm_loss': lm_mean_loss}
eval_metrics = (metric_fn, [lm_example_loss, lm_log_probs])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
raise ValueError(('Only TRAIN and EVAL modes are supported: %s' % mode))
return output_spec
return model_fn
|
def get_lm_output(config, input_tensor, output_weights, label_ids, label_mask):
'Get loss and log probs for the LM.'
input_shape = modeling.get_shape_list(input_tensor, expected_rank=3)
input_tensor = tf.reshape(input_tensor, [(input_shape[0] * input_shape[1]), input_shape[2]])
with tf.variable_scope('cls/predictions'):
with tf.variable_scope('transform'):
input_tensor = tf.layers.dense(input_tensor, units=config.hidden_size, activation=modeling.get_activation(config.hidden_act), kernel_initializer=modeling.create_initializer(config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
output_bias = tf.get_variable('output_bias', shape=[config.vocab_size], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=(- 1))
label_ids = tf.reshape(label_ids, [(- 1)])
one_hot_labels = tf.one_hot(label_ids, depth=config.vocab_size, dtype=tf.float32)
per_example_loss = (- tf.reduce_sum((log_probs * one_hot_labels), axis=[(- 1)]))
label_mask = tf.reshape(label_mask, [(input_shape[0] * input_shape[1])])
loss_mask = tf.dtypes.cast(label_mask, tf.float32)
per_example_loss = tf.math.multiply(per_example_loss, loss_mask)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
|
def input_fn_builder(input_files, max_seq_length, is_training, num_cpu_threads=4):
'Creates an `input_fn` closure to be passed to TPUEstimator.'
def input_fn(params):
'The actual input function.'
batch_size = params['batch_size']
name_to_features = {'input_ids': tf.FixedLenFeature([max_seq_length], tf.int64), 'target_ids': tf.FixedLenFeature([max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([max_seq_length], tf.int64)}
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
cycle_length = min(num_cpu_threads, len(input_files))
d = d.apply(tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
d = d.repeat()
d = d.apply(tf.contrib.data.map_and_batch((lambda record: _decode_record(record, name_to_features)), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True))
return d
return input_fn
|
def _decode_record(record, name_to_features):
'Decodes a record to a TensorFlow example.'
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if (t.dtype == tf.int64):
t = tf.to_int32(t)
example[name] = t
return example
|
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if ((not FLAGS.do_train) and (not FLAGS.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
config = modeling.BertConfig.from_json_file(FLAGS.config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
src = FLAGS.config_file
dst = os.path.join(FLAGS.output_dir, FLAGS.config_file.split('/')[(- 1)])
os.system(f'cp {src} {dst}')
src = os.path.join(FLAGS.config_file.split('/')[0], config.vocab_file)
dst = os.path.join(FLAGS.output_dir, config.vocab_file)
os.system(f'cp {src} {dst}')
input_files = []
for input_pattern in FLAGS.input_file.split(','):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info('*** Input Files ***')
for input_file in input_files:
tf.logging.info((' %s' % input_file))
eval_input_files = []
for eval_input_pattern in FLAGS.eval_input_file.split(','):
eval_input_files.extend(tf.gfile.Glob(eval_input_pattern))
tf.logging.info('*** Eval Files ***')
for eval_input_file in eval_input_files:
tf.logging.info((' %s' % eval_input_file))
tpu_cluster_resolver = None
if (FLAGS.use_tpu and FLAGS.tpu_name):
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(config=config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu)
estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info('***** Running training *****')
tf.logging.info(' Batch size = %d', FLAGS.train_batch_size)
train_input_fn = input_fn_builder(input_files=input_files, max_seq_length=FLAGS.max_seq_length, is_training=True)
if FLAGS.do_eval:
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
eval_input_fn = input_fn_builder(input_files=eval_input_files, max_seq_length=FLAGS.max_seq_length, is_training=False)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
else:
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info('***** Running evaluation *****')
tf.logging.info(' Batch size = %d', FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(input_files=eval_input_files, max_seq_length=FLAGS.max_seq_length, is_training=False)
result = estimator.evaluate(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_input_file = os.path.join(FLAGS.output_dir, 'eval_results.txt')
with tf.gfile.GFile(output_eval_input_file, 'w') as writer:
tf.logging.info('***** Eval results *****')
for key in sorted(result.keys()):
tf.logging.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
|
class TestingInstance(object):
'A single test instance (sentence pair).'
def __init__(self, tokens):
self.tokens = tokens
self.input_tokens = tokens
self.target_tokens = tokens
def __str__(self):
s = ''
s += ('tokens: %s\n' % ' '.join([tokenization.printable_text(x) for x in self.tokens]))
s += '\n'
return s
def __repr__(self):
return self.__str__()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.