repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
STR | STR-master/utils/conv_type.py | from torch.nn import init
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import math
from args import args as parser_args
import numpy as np
DenseConv = nn.Conv2d
def sparseFunction(x, s, activation=torch.relu, f=torch.sigmoid):
return torch.sign(x)*activation(torch.abs(x)-f(s))
def initialize_sInit():
if parser_args.sInit_type == "constant":
return parser_args.sInit_value*torch.ones([1, 1])
class STRConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.activation = torch.relu
if parser_args.sparse_function == 'sigmoid':
self.f = torch.sigmoid
self.sparseThreshold = nn.Parameter(initialize_sInit())
else:
self.sparseThreshold = nn.Parameter(initialize_sInit())
def forward(self, x):
# In case STR is not training for the hyperparameters given in the paper, change sparseWeight to self.sparseWeight if it is a problem of backprop.
# However, that should not be the case according to graph computation.
sparseWeight = sparseFunction(self.weight, self.sparseThreshold, self.activation, self.f)
x = F.conv2d(
x, sparseWeight, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
def getSparsity(self, f=torch.sigmoid):
sparseWeight = sparseFunction(self.weight, self.sparseThreshold, self.activation, self.f)
temp = sparseWeight.detach().cpu()
temp[temp!=0] = 1
return (100 - temp.mean().item()*100), temp.numel(), f(self.sparseThreshold).item()
class ChooseEdges(autograd.Function):
@staticmethod
def forward(ctx, weight, prune_rate):
output = weight.clone()
_, idx = weight.flatten().abs().sort()
p = int(prune_rate * weight.numel())
# flat_oup and output access the same memory.
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class DNWConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
print(f"=> Setting prune rate to {prune_rate}")
def forward(self, x):
w = ChooseEdges.apply(self.weight, self.prune_rate)
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
def GMPChooseEdges(weight, prune_rate):
output = weight.clone()
_, idx = weight.flatten().abs().sort()
p = int(prune_rate * weight.numel())
# flat_oup and output access the same memory.
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
return output
class GMPConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
self.curr_prune_rate = 0.0
print(f"=> Setting prune rate to {prune_rate}")
def set_curr_prune_rate(self, curr_prune_rate):
self.curr_prune_rate = curr_prune_rate
def forward(self, x):
w = GMPChooseEdges(self.weight, self.curr_prune_rate)
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
| 3,471 | 30.853211 | 154 | py |
STR | STR-master/utils/logging.py | import abc
import tqdm
from torch.utils.tensorboard import SummaryWriter
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch, tqdm_writer=True):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
if not tqdm_writer:
print("\t".join(entries))
else:
tqdm.tqdm.write("\t".join(entries))
def write_to_tensorboard(
self, writer: SummaryWriter, prefix="train", global_step=None
):
for meter in self.meters:
avg = meter.avg
val = meter.val
if meter.write_val:
writer.add_scalar(
f"{prefix}/{meter.name}_val", val, global_step=global_step
)
if meter.write_avg:
writer.add_scalar(
f"{prefix}/{meter.name}_avg", avg, global_step=global_step
)
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
class Meter(object):
@abc.abstractmethod
def __init__(self, name, fmt=":f"):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def update(self, val, n=1):
pass
@abc.abstractmethod
def __str__(self):
pass
class AverageMeter(Meter):
""" Computes and stores the average and current value """
def __init__(self, name, fmt=":f", write_val=True, write_avg=True):
self.name = name
self.fmt = fmt
self.reset()
self.write_val = write_val
self.write_avg = write_avg
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class VarianceMeter(Meter):
def __init__(self, name, fmt=":f", write_val=False):
self.name = name
self._ex_sq = AverageMeter(name="_subvariance_1", fmt=":.02f")
self._sq_ex = AverageMeter(name="_subvariance_2", fmt=":.02f")
self.fmt = fmt
self.reset()
self.write_val = False
self.write_avg = True
@property
def val(self):
return self._ex_sq.val - self._sq_ex.val ** 2
@property
def avg(self):
return self._ex_sq.avg - self._sq_ex.avg ** 2
def reset(self):
self._ex_sq.reset()
self._sq_ex.reset()
def update(self, val, n=1):
self._ex_sq.update(val ** 2, n=n)
self._sq_ex.update(val, n=n)
def __str__(self):
return ("{name} (var {avg" + self.fmt + "})").format(
name=self.name, avg=self.avg
)
| 3,167 | 25.621849 | 78 | py |
STR | STR-master/utils/builder.py | from args import args
import math
import torch
import torch.nn as nn
import utils.conv_type
import utils.bn_type
class Builder(object):
def __init__(self, conv_layer, bn_layer, first_layer=None):
self.conv_layer = conv_layer
self.bn_layer = bn_layer
self.first_layer = first_layer or conv_layer
def conv(self, kernel_size, in_planes, out_planes, stride=1, first_layer=False):
conv_layer = self.first_layer if first_layer else self.conv_layer
if first_layer:
print(f"==> Building first layer with {args.first_layer_type}")
if kernel_size == 3:
conv = conv_layer(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
elif kernel_size == 1:
conv = conv_layer(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
elif kernel_size == 5:
conv = conv_layer(
in_planes,
out_planes,
kernel_size=5,
stride=stride,
padding=2,
bias=False,
)
elif kernel_size == 7:
conv = conv_layer(
in_planes,
out_planes,
kernel_size=7,
stride=stride,
padding=3,
bias=False,
)
else:
return None
self._init_conv(conv)
return conv
def conv2d(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
):
return self.conv_layer(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
)
def conv3x3(self, in_planes, out_planes, stride=1, first_layer=False):
"""3x3 convolution with padding"""
c = self.conv(3, in_planes, out_planes, stride=stride, first_layer=first_layer)
return c
def conv1x1(self, in_planes, out_planes, stride=1, first_layer=False):
"""1x1 convolution with padding"""
c = self.conv(1, in_planes, out_planes, stride=stride, first_layer=first_layer)
return c
def conv7x7(self, in_planes, out_planes, stride=1, first_layer=False):
"""7x7 convolution with padding"""
c = self.conv(7, in_planes, out_planes, stride=stride, first_layer=first_layer)
return c
def conv5x5(self, in_planes, out_planes, stride=1, first_layer=False):
"""5x5 convolution with padding"""
c = self.conv(5, in_planes, out_planes, stride=stride, first_layer=first_layer)
return c
def batchnorm(self, planes, last_bn=False, first_layer=False):
return self.bn_layer(planes)
def activation(self):
if args.nonlinearity == "relu":
return (lambda: nn.ReLU(inplace=True))()
else:
raise ValueError(f"{args.nonlinearity} is not an initialization option!")
def _init_conv(self, conv):
if args.init == "signed_constant":
fan = nn.init._calculate_correct_fan(conv.weight, args.mode)
if args.scale_fan:
fan = fan * (1 - args.prune_rate)
gain = nn.init.calculate_gain(args.nonlinearity)
std = gain / math.sqrt(fan)
conv.weight.data = conv.weight.data.sign() * std
elif args.init == "unsigned_constant":
fan = nn.init._calculate_correct_fan(conv.weight, args.mode)
if args.scale_fan:
fan = fan * (1 - args.prune_rate)
gain = nn.init.calculate_gain(args.nonlinearity)
std = gain / math.sqrt(fan)
conv.weight.data = torch.ones_like(conv.weight.data) * std
elif args.init == "kaiming_normal":
if args.scale_fan:
fan = nn.init._calculate_correct_fan(conv.weight, args.mode)
fan = fan * (1 - args.prune_rate)
gain = nn.init.calculate_gain(args.nonlinearity)
std = gain / math.sqrt(fan)
with torch.no_grad():
conv.weight.data.normal_(0, std)
else:
nn.init.kaiming_normal_(
conv.weight, mode=args.mode, nonlinearity=args.nonlinearity
)
elif args.init == "standard":
nn.init.kaiming_uniform_(conv.weight, a=math.sqrt(5))
else:
raise ValueError(f"{args.init} is not an initialization option!")
def get_builder():
print("==> Conv Type: {}".format(args.conv_type))
print("==> BN Type: {}".format(args.bn_type))
conv_layer = getattr(utils.conv_type, args.conv_type)
bn_layer = getattr(utils.bn_type, args.bn_type)
if args.first_layer_type is not None:
first_layer = getattr(utils.conv_type, args.first_layer_type)
print(f"==> First Layer Type {args.first_layer_type}")
else:
first_layer = None
builder = Builder(conv_layer=conv_layer, bn_layer=bn_layer, first_layer=first_layer)
return builder
| 5,356 | 30.327485 | 88 | py |
STR | STR-master/utils/eval_utils.py | import torch
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| 563 | 28.684211 | 88 | py |
STR | STR-master/utils/net_utils.py | from functools import partial
import os
import pathlib
import shutil
import math
import torch
import torch.nn as nn
def save_checkpoint(state, is_best, filename="checkpoint.pth", save=False):
filename = pathlib.Path(filename)
if not filename.parent.exists():
os.makedirs(filename.parent)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, str(filename.parent / "model_best.pth"))
if not save:
os.remove(filename)
def get_lr(optimizer):
return optimizer.param_groups[0]["lr"]
def accumulate(model, f):
acc = 0.0
for child in model.children():
acc += accumulate(child, f)
acc += f(model)
return acc
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class MaskL1RegLoss(nn.Module):
def __init__(self, temperature=1.0):
super().__init__()
self.temperature = temperature
def forward(self, model):
l1_accum = accumulate(model, self.l1_of_mask)
return l1_accum
def l1_of_mask(self, m):
if hasattr(m, "mask"):
return (self.temperature * m.mask).sigmoid().sum()
else:
return 0.0
| 1,847 | 21.536585 | 75 | py |
STR | STR-master/data/utils.py | import torch
from torch.utils.data.dataset import Dataset
def one_batch_dataset(dataset, batch_size):
print("==> Grabbing a single batch")
perm = torch.randperm(len(dataset))
one_batch = [dataset[idx.item()] for idx in perm[:batch_size]]
class _OneBatchWrapper(Dataset):
def __init__(self):
self.batch = one_batch
def __getitem__(self, index):
return self.batch[index]
def __len__(self):
return len(self.batch)
return _OneBatchWrapper()
| 525 | 21.869565 | 66 | py |
STR | STR-master/data/imagenet.py | import os
import torch
from torchvision import datasets, transforms
import torch.multiprocessing
import h5py
import os
import numpy as np
torch.multiprocessing.set_sharing_strategy("file_system")
class ImageNet:
def __init__(self, args):
super(ImageNet, self).__init__()
data_root = os.path.join(args.data, "imagenet")
use_cuda = torch.cuda.is_available()
# Data loading code
kwargs = {"num_workers": args.workers, "pin_memory": True} if use_cuda else {}
# Data loading code
traindir = os.path.join(data_root, "train")
valdir = os.path.join(data_root, "val")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
)
self.train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs
)
self.val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=args.batch_size,
shuffle=False,
**kwargs
)
class TinyImageNet:
def __init__(self, args):
super(TinyImageNet, self).__init__()
data_root = os.path.join(args.data, "tiny_imagenet")
use_cuda = torch.cuda.is_available()
kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transforms = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_dataset = H5DatasetOld(data_root + '/train.h5', transform=train_transforms)
test_dataset = H5DatasetOld(data_root + '/val.h5', transform=test_transforms)
self.train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs
)
self.val_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs
)
class H5Dataset(torch.utils.data.Dataset):
def __init__(self, h5_file, transform=None):
self.transform = transform
self.dataFile = None
self.h5_file = h5_file
def __len__(self):
datasetNames = list(self.dataFile.keys())
return len(self.dataFile[datasetNames[0]])
def __getitem__(self, idx):
if self.dataFile is None:
self.dataFile = h5py.File(self.h5_file, 'r')
data = self.dataFile[list(self.dataFile.keys())[0]][idx]
label = self.dataFile[list(self.dataFile.keys())[1]][idx]
if self.transform:
data = self.transform(data)
return (data, label)
class H5DatasetOld(torch.utils.data.Dataset):
def __init__(self, h5_file, transform=None):
self.transform = transform
self.dataFile = h5py.File(h5_file, 'r')
# self.h5_file = h5_file
def __len__(self):
datasetNames = list(self.dataFile.keys())
return len(self.dataFile[datasetNames[0]])
def __getitem__(self, idx):
# if self.dataFile is None:
# self.dataFile = h5py.File(self.h5_file, 'r')
data = self.dataFile[list(self.dataFile.keys())[0]][idx]
label = self.dataFile[list(self.dataFile.keys())[1]][idx]
if self.transform:
data = self.transform(data)
return (data, label) | 4,309 | 30.691176 | 89 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/severity_scan.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/severity_scan.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader=True):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0:
train_dataset = instantiate(cfg.train)
else:
train_dataset = None
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
with omegaconf.open_dict(cfg):
feature_extractor = instantiate(cfg.ft, num_gpus=cfg.num_gpus, is_leader=is_leader)
feature_extractor.train()
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch',
num_gpus=cfg.num_gpus)
if os.path.exists(cfg.feature_file):
feature_dict = {k : v for k, v in np.load(cfg.feature_file).items()}
else:
feature_dict = {}
indices = np.load(cfg.ft_corrupt.indices_file)
for aug in cfg.aug_string.split("--"):
if len(aug.split("-")) > 1:
#log.info("Severity provided in corrupt.aug_string will be weighted by given severity.")
sev = aug.split("-")[1]
if len(sev.split("_")) > 1:
low = float(sev.split("_")[0])
high = float(sev.split("_")[1])
else:
low = 0.0
high = float(sev)
sev_factor = (high - low) * cfg.severity / 10 + low
else:
sev_factor = cfg.severity
aug = aug.split("-")[0]
aug_string = "{}-{}".format(aug, sev_factor)
if aug_string in feature_dict:
continue
with omegaconf.open_dict(cfg.corrupt):
corrupt_dataset = instantiate(cfg.corrupt, aug_string=aug_string)
err = test_net(model=model,
test_dataset=corrupt_dataset,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
output_name=aug_string,
num_gpus=cfg.num_gpus)
with omegaconf.open_dict(cfg.ft_corrupt):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug_string)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
feature = extract_features(feature_extractor=feature_extractor,
dataset=ft_corrupt_dataset,
batch_size=cfg.ft_corrupt.batch_size,
loader_params=cfg.data_loader,
average=True,
num_gpus=cfg.num_gpus)
feature_dict[aug_string] = feature
if is_leader:
np.savez(cfg.feature_file, **feature_dict)
if __name__=="__main__":
run()
| 4,768 | 33.810219 | 100 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/test_cifar10.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/test_cifar10.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
test_dataset = instantiate(cfg.test)
checkpoint = torch.load(cfg.weights, map_location='cpu')
model.load_state_dict(checkpoint['model_state'])
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| 1,308 | 25.18 | 65 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/train_imagenet.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0 and cfg.train.weights is None:
print("Loading training set...")
train_dataset = instantiate(cfg.train)
else:
print("Skipping loading the training dataset, 0 epochs of training to perform "
" or pre-trained weights provided.")
train_dataset = None
print("Loading test set...")
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
print("Training...")
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
| 3,301 | 29.859813 | 87 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/closest_augs.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
from overlap.utils import logging as lu
log = logging.getLogger(__name__)
def individual_sort(aug_dists):
out = []
included = set()
arg_sort = np.argsort(aug_dists, axis=0)
sort = np.sort(aug_dists, axis=0)
for row in range(len(arg_sort)):
curr_arg = arg_sort[row]
curr_dists = sort[row]
sorted_args = curr_arg[np.argsort(curr_dists)]
for i in sorted_args:
if i not in included:
out.append(i)
included.add(i)
return np.array(out)
@hydra.main(config_path="conf/closest_augs.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
lr_policy = instantiate(cfg.optim.lr_policy)
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Transforms found, loading feature extractor is unnecessary. Skipping.")
else:
feature_extractor = instantiate(cfg.ft)
feature_extractor.train()
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Transforms found, feature extraction is unnecessary. Skipping.")
elif cfg.aug_feature_file and os.path.exists(cfg.aug_feature_file):
log.info("Found feature file. Loading from {}".format(cfg.aug_feature_file))
data = np.load(cfg.aug_feature_file)
augmentation_features = data['features']
indices = data['indices']
transforms = data['transforms']
else:
ft_augmentation_dataset = instantiate(cfg.ft_augmentation)
transforms = ft_augmentation_dataset.transform_list
indices = np.random.choice(np.arange(len(ft_augmentation_dataset)), size=cfg.num_images, replace=False)
ft_augmentation_dataset = ft_augmentation_dataset.serialize(indices)
augmentation_features = extract_features(feature_extractor,
ft_augmentation_dataset,
cfg.ft_augmentation.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
if cfg.aug_feature_file:
np.savez(cfg.aug_feature_file,
features=augmentation_features,
indices=indices,
transforms=transforms)
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Found transform file. Loading from {}.".format(cfg.transform_file))
sorted_transforms = np.load(cfg.transform_file)
else:
aug_strings = cfg.ft_corrupt.aug_string.split("--")
distances = np.zeros((len(augmentation_features), len(aug_strings)))
for i, aug in enumerate(aug_strings):
with omegaconf.open_dict(cfg):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug)
if cfg.num_corrupt_images and i==0:
indices = np.random.choice(np.arange(len(ft_corrupt_dataset)), size=cfg.num_corrupt_images, replace=False)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
corruption_features = extract_features(feature_extractor,
ft_corrupt_dataset,
cfg.ft_corrupt.batch_size,
cfg.data_loader,
average=True)
corruption_features = corruption_features.reshape(1, -1)
dists = np.linalg.norm(augmentation_features - corruption_features, axis=-1)
distances[:,i] = dists
sorted_dist_args = individual_sort(distances)
sorted_transforms = transforms[sorted_dist_args]
if cfg.transform_file:
np.save(cfg.transform_file, sorted_transforms)
train_dataset = instantiate(cfg.train)
if cfg.selection_type == 'closest':
train_dataset.transform_list = sorted_transforms[cfg.offset:cfg.offset+cfg.num_transforms]
elif cfg.selection_type == 'farthest':
train_dataset.transform_list = sorted_transforms[-cfg.offset-cfg.num_transforms:-cfg.offset]\
if cfg.offset != 0 else sorted_transforms[-cfg.num_transforms:]
else:
train_dataset.transform_list = sorted_transforms[np.random.choice(np.arange(len(sorted_transforms)), size=cfg.num_transforms, replace=False)]
test_dataset = instantiate(cfg.test)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch')
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| 6,088 | 41.284722 | 149 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/severity_scan_imagenet.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/severity_scan_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader=True):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0:
train_dataset = instantiate(cfg.train)
else:
train_dataset = None
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
with omegaconf.open_dict(cfg):
feature_extractor = instantiate(cfg.ft, num_gpus=cfg.num_gpus, is_leader=is_leader)
feature_extractor.train()
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch',
num_gpus=cfg.num_gpus)
if os.path.exists(cfg.feature_file):
feature_dict = {k : v for k, v in np.load(cfg.feature_file).items()}
else:
feature_dict = {}
indices = np.load(cfg.ft_corrupt.indices_file)
for aug in cfg.aug_string.split("--"):
if len(aug.split("-")) > 1:
#log.info("Severity provided in corrupt.aug_string will be weighted by given severity.")
sev = aug.split("-")[1]
if len(sev.split("_")) > 1:
low = float(sev.split("_")[0])
high = float(sev.split("_")[1])
else:
low = 0.0
high = float(sev)
sev_factor = (high - low) * cfg.severity / 10 + low
else:
sev_factor = cfg.severity
aug = aug.split("-")[0]
aug_string = "{}-{}".format(aug, sev_factor)
if aug_string in feature_dict:
continue
with omegaconf.open_dict(cfg.corrupt):
corrupt_dataset = instantiate(cfg.corrupt, aug_string=aug_string)
err = test_net(model=model,
test_dataset=corrupt_dataset,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
output_name=aug_string,
num_gpus=cfg.num_gpus)
with omegaconf.open_dict(cfg.ft_corrupt):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug_string)
if cfg.ft_corrupt.params.num_transforms is not None:
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
else:
ft_corrupt_dataset = torch.utils.data.Subset(ft_corrupt_dataset, indices)
feature = extract_features(feature_extractor=feature_extractor,
dataset=ft_corrupt_dataset,
batch_size=cfg.ft_corrupt.batch_size,
loader_params=cfg.data_loader,
average=True,
num_gpus=cfg.num_gpus)
feature_dict[aug_string] = feature
if is_leader:
np.savez(cfg.feature_file, **feature_dict)
if __name__=="__main__":
run()
| 4,942 | 34.307143 | 100 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/train_imagenet_jsd.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net_jsd import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_imagenet_jsd.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0 and cfg.train.weights is None:
print("Loading training set...")
train_dataset = instantiate(cfg.train)
else:
print("Skipping loading the training dataset, 0 epochs of training to perform "
" or pre-trained weights provided.")
train_dataset = None
print("Loading test set...")
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
print("Training...")
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader,
jsd_num=cfg.train.params.jsd_num,
jsd_alpha=cfg.train.jsd_alpha
)
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
| 3,398 | 30.183486 | 87 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/feature_corrupt_error.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
from overlap.utils import logging as lu
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/feature_corrupt_error.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
feature_extractor = instantiate(cfg.ft)
feature_extractor.train()
if cfg.aug_feature_file and os.path.exists(cfg.aug_feature_file):
log.info("Found feature file. Loading from {}".format(cfg.aug_feature_file))
data = np.load(cfg.aug_feature_file)
augmentation_features = data['features']
indices = data['indices']
else:
ft_augmentation_dataset = instantiate(cfg.ft_augmentation)
indices = np.random.choice(np.arange(len(ft_augmentation_dataset)), size=cfg.num_images, replace=False)
ft_augmentation_dataset = ft_augmentation_dataset.serialize(indices)
augmentation_features = extract_features(feature_extractor,
ft_augmentation_dataset,
cfg.ft_augmentation.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
#nf, lf = augmentation_features.shape
#augmentation_features = np.mean(augmentation_features.reshape(len(indices), nf//len(indices), lf), axis=0)
if cfg.aug_feature_file:
np.savez(cfg.aug_feature_file, features=augmentation_features, indices=indices)
aug_strings = cfg.ft_corrupt.aug_string.split("--")
for aug in aug_strings:
with omegaconf.open_dict(cfg):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
corruption_features = extract_features(feature_extractor,
ft_corrupt_dataset,
cfg.ft_corrupt.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
nf, lf = corruption_features.shape
#corruption_features = np.mean(corruption_features.reshape(len(indices), nf//len(indices), lf), axis=0)
augmentation_features = augmentation_features.reshape(-1, 1, lf)
corruption_features = corruption_features.reshape(1, -1, lf)
mean_aug = np.mean(augmentation_features.reshape(-1,lf), axis=0)
mean_corr = np.mean(corruption_features.reshape(-1,lf), axis=0)
mmd = np.linalg.norm(mean_aug-mean_corr, axis=0)
msd = np.min(np.linalg.norm(augmentation_features.reshape(-1,lf)-mean_corr.reshape(1,lf),axis=1),axis=0)
stats = {"_type" : aug,
"mmd" : str(mmd),
"msd" : str(msd),
}
lu.log_json_stats(stats)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch')
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| 4,670 | 40.336283 | 115 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/train_cifar10_jsd.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net_jsd import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
from pathlib import Path
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_cifar10_jsd.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
jsd_num=cfg.train.params.jsd_num,
jsd_alpha=cfg.train.jsd_alpha
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| 1,841 | 27.338462 | 65 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/train_cifar10.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
from pathlib import Path
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_cifar10.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| 1,744 | 26.698413 | 65 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/test_imagenet.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/test_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
print("Loading test set...")
test_dataset = instantiate(cfg.test)
checkpoint = torch.load(cfg.weights, map_location='cpu')
if cfg.num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
| 2,623 | 27.835165 | 73 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/train_net_jsd.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
import os
import time
import datetime
import torch.nn as nn
import torch.nn.functional as F
log = logging.getLogger(__name__)
def eta_str(eta_td):
"""Converts an eta timedelta to a fixed-width string format."""
days = eta_td.days
hrs, rem = divmod(eta_td.seconds, 3600)
mins, secs = divmod(rem, 60)
return '{0:02},{1:02}:{2:02}:{3:02}'.format(days, hrs, mins, secs)
def train_net(model, optimizer, train_dataset,
batch_size,
max_epoch,
loader_params,
lr_policy,
checkpoint_folder='checkpoints',
name=None,
save_period=1,
weights=None,
num_gpus=1,
is_leader=True,
jsd_num=3,
jsd_alpha=12.0):
chpk_pre = 'model_epoch_'
if name is not None:
chpk_pre = name + "_" + chpk_pre
chpk_post = '.pyth'
if os.path.exists(checkpoint_folder):
checkpoints = [c for c in os.listdir(checkpoint_folder) if chpk_post in c and chpk_pre == "_".join(c.split("_")[:-1]) +"_"]
else:
checkpoints = []
if weights:
checkpoint = torch.load(weights, map_location='cpu')
log.info("Pretrained weights provided. Loading model from {} and skipping training.".format(weights))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
return model
elif checkpoints:
last_checkpoint_name = os.path.join(checkpoint_folder, sorted(checkpoints)[-1])
checkpoint = torch.load(last_checkpoint_name, map_location='cpu')
log.info("Loading model from {}".format(last_checkpoint_name))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
start_epoch = checkpoint['epoch'] + 1
else:
start_epoch = 1
if train_dataset is None:
return model
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=True
)
for i in range(start_epoch, max_epoch+1):
log.info("Starting epoch {}/{}".format(i, max_epoch))
time_start = time.time()
if sampler:
sampler.set_epoch(i)
train_epoch(model, optimizer, loader, lr_policy, i, num_gpus, jsd_num=jsd_num, jsd_alpha=jsd_alpha)
time_stop = time.time()
seconds_taken = (time_stop - time_start)
eta_td = datetime.timedelta(seconds=int(seconds_taken*(max_epoch-i)))
log.info("Seconds taken: {:.2f}, Time remaining: {}".format(seconds_taken, eta_str(eta_td)))
if (i % save_period == 0 or i == max_epoch) and is_leader:
if num_gpus > 1:
m = model.module
else:
m = model
checkpoint = {
'epoch' : i,
'model_state' : m.state_dict(),
'optimizer_state' : optimizer.state_dict()
}
checkpoint_file = "{:s}{:04d}{:s}".format(chpk_pre, i, chpk_post)
if not os.path.exists(checkpoint_folder):
os.mkdir(checkpoint_folder)
checkpoint_file = os.path.join(checkpoint_folder, checkpoint_file)
log.info("Saving model to {}".format(checkpoint_file))
torch.save(checkpoint, checkpoint_file)
class JSDLoss(nn.Module):
def __init__(self, alpha=12.0, num=3):
super(JSDLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss()
self.alpha = alpha
self.num = num
def forward(self, preds, labels):
if not self.training:
return self.cross_entropy(preds, labels)
bs, nc = preds.size()
preds = preds.view(bs//self.num, self.num, nc)
clean_loss = self.cross_entropy(preds[:,0,:],labels)
p_preds = F.softmax(preds, dim=2)
p_mixture = torch.clamp(torch.mean(p_preds, dim=1, keepdim=True), 1e-7, 1).log()
p_mixture = p_mixture.repeat(1,self.num,1)
jsd_loss = F.kl_div(p_mixture, p_preds, reduction='batchmean') / self.num
return clean_loss + self.alpha * jsd_loss
def train_epoch(model, optimizer, loader, lr_policy, epoch, num_gpus=1, jsd_num=3, jsd_alpha=12.0):
lr = lr_policy(epoch-1)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model.train()
loss_fun = JSDLoss(alpha=jsd_alpha,num=jsd_num).cuda()
loss_fun.train()
avg_loss = 0.0
num_correct = 0
num_total = 0
num_batches = 0
for cur_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
bs, c, h, w = inputs.size()
inputs = inputs.view(bs*jsd_num, c//jsd_num, h, w) # Unpack jsd images
preds = model(inputs)
loss = loss_fun(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
preds = preds.view(bs, jsd_num, -1)
preds = preds[:,0,:]
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
torch.distributed.all_reduce(loss)
avg_loss += loss.item()
num_correct += correct.item()
num_total += labels.size(0) * num_gpus
num_batches += num_gpus
avg_loss /= num_batches
err = 100 * (1 - num_correct / num_total)
log.info("Avg loss: {:.3f}, Avg err: {:.3f}".format(avg_loss, err))
| 6,417 | 37.202381 | 135 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/feature_extractor.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from hydra.utils import instantiate
from .train_net import train_net
class Network(object):
def __init__(self, model_cfg, optim_cfg, dataset_cfg, data_loader, num_gpus=1, is_leader=True):
cur_device = torch.cuda.current_device()
self.model = instantiate(model_cfg).cuda(device=cur_device)
if num_gpus > 1:
self.model = torch.nn.parallel.DistributedDataParallel(
module=self.model,
device_ids=[cur_device],
output_device=cur_device
)
self.optimizer = instantiate(optim_cfg, self.model.parameters())
if optim_cfg.max_epoch > 0:
self.dataset = instantiate(dataset_cfg)
else:
self.dataset = None
self.batch_size = dataset_cfg.batch_size
self.max_epoch = optim_cfg.max_epoch
self.loader_params = data_loader
self.lr_policy = instantiate(optim_cfg.lr_policy)
self.save_period = dataset_cfg.checkpoint_period
self.weights = dataset_cfg.weights
self.num_gpus = num_gpus
self.is_leader = is_leader
def train(self):
train_net(self.model,
self.optimizer,
self.dataset,
self.batch_size,
self.max_epoch,
self.loader_params,
self.lr_policy,
save_period=self.save_period,
name='ft',
weights=self.weights,
num_gpus=self.num_gpus,
is_leader=self.is_leader
)
self.model.eval()
def extract(self, x):
preds = self.model(x)
if self.num_gpus > 1:
return self.model.module.features
else:
return self.model.features
| 2,017 | 32.081967 | 99 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/test_net.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
log = logging.getLogger(__name__)
def test_net(model, test_dataset, batch_size, loader_params, output_name='test_epoch', num_gpus=1):
model.eval()
sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
num_correct = 0
num_total = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(test_dataset))
stats = {'_type' : output_name, 'top1_err' : err}
lu.log_json_stats(stats)
return err
| 1,338 | 30.880952 | 99 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/extract_features.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
import numpy as np
import os
log = logging.getLogger(__name__)
def distributed_gather_features(curr_features, batch_size, num_gpus):
gather_list = [torch.zeros((batch_size, curr_features.size(-1)), device=curr_features.device)\
for i in range(num_gpus)]
count = curr_features.size(0)
if count < batch_size:
curr_features = torch.cat((curr_features, torch.zeros((batch_size - count, curr_features.size(-1)), device=curr_features.device)), dim=0)
torch.distributed.all_gather(gather_list, curr_features)
count = torch.Tensor([count]).cuda()
torch.distributed.all_reduce(count)
count = int(count.item())
# Here we use that the distributed data sampler interleaves sampling across replicas
curr_features = torch.stack(gather_list, dim=1).reshape(-1, curr_features.size(-1))
curr_features = curr_features[:count,:]
return curr_features
def extract_features(feature_extractor, dataset, batch_size, loader_params, average=True, num_gpus=1, average_num=None, preemption_protection=False, is_leader=True):
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=False)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
features = None
count = 0
starting_iter = -1
if preemption_protection and os.path.exists('feature_extraction.tmp.npz'):
data = np.loadz('feature_extraction.tmp.npz')
features = torch.Tensor(data['features']).cuda()
count = data['count']
starting_iter = data['curr_iter']
for curr_iter, (inputs, labels) in enumerate(loader):
if preemption_protection and curr_iter <= starting_iter:
continue
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
curr_features = feature_extractor.extract(inputs)
if average and average_num is None:
curr_features = torch.sum(curr_features, dim=0)
if num_gpus > 1:
torch.distributed.all_reduce(curr_features)
features = (features + curr_features.detach().cpu()) if features is not None else curr_features.detach().cpu()
elif average:
num_features = len(dataset) // average_num
if num_gpus > 1:
curr_features = distributed_gather_features(curr_features, batch_size, num_gpus)
if features is None:
features = torch.zeros(num_features, curr_features.size(-1))
if count + curr_features.size(0) > num_features:
remainder = count + curr_features.size(0) - num_features
features[count:, :] += curr_features[:num_features-count,:].detach().cpu()
offset = 0
while remainder > num_features:
features += curr_features[offset+num_features-count:2*num_features-count+offset].detach().cpu()
offset += num_features
remainder -= num_features
features[:remainder,:] += curr_features[offset+num_features-count:,:].detach().cpu()
count = remainder
else:
features[count:count+curr_features.size(0),:] += curr_features.detach().cpu()
count += curr_features.size(0)
count = count % num_features
else:
if num_gpus > 1:
curr_features = distributed_gather_features(curr_features, batch_size, num_gpus)
if features is None:
features = torch.zeros(len(dataset), curr_features.size(-1))
features[count:count+curr_features.size(0),:] = curr_features.detach().cpu()
count += curr_features.size(0)
if preemption_protection and curr_iter % 5000 == 0 and is_leader:
np.savez('feature_extraction.tmp.npz', features=features.detach().cpu().numpy(), count=count, curr_iter=curr_iter)
if average and average_num is None:
features /= len(dataset)
elif average:
features /= average_num
return features.detach().cpu().numpy()
| 4,564 | 43.320388 | 165 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/test_corrupt_net.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
from omegaconf import open_dict
from .augmentations.utils import aug_finder
from hydra.utils import instantiate
import numpy as np
import os
import pickle
log = logging.getLogger(__name__)
def test_corrupt_net(model, corrupt_cfg, batch_size, loader_params, aug_string=None, mCE_denom=None, clean_err=None, imagenetc_grouping=True, num_gpus=1, log_name=None):
model.eval()
if aug_string is None:
augs = aug_finder.get_augs_by_tag(['imagenet_c'])
severities = [1,2,3,4,5]
augs = ["{}-{}".format(a.name, s) for a in augs for s in severities]
else:
augs = aug_string.split("--")
if log_name is not None and os.path.exists(log_name):
prestats = lu.load_json_stats(log_name)
else:
prestats = None
errs = []
for aug in augs:
if prestats is not None and len(lu.parse_json_stats(prestats, row_type=aug, key='top1_err')) > 0:
continue
with open_dict(corrupt_cfg):
corrupt_dataset = instantiate(corrupt_cfg, aug_string=aug)
sampler = torch.utils.data.distributed.DistributedSampler(corrupt_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
corrupt_dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
num_correct = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(corrupt_dataset))
stats = {'_type' : aug, 'top1_err' : err}
lu.log_json_stats(stats)
errs.append(err)
# Calculating records
if mCE_denom is not None:
mCE_denom = pickle.load(open(os.path.join(os.path.dirname(__file__), '../baseline_data/', mCE_denom), 'rb'))
errs = np.array(errs)
aug_names = [a.split("-")[0] for a in augs]
unique_aug_names = list(set(aug_names))
avg_errs = [np.mean(errs[[i for i, a in enumerate(aug_names) if a==u]]) for u in unique_aug_names]
avg_errs = np.array(avg_errs)
mCE = None
rmCE = None
if mCE_denom:
mCE = [100 * avg_errs[i] / mCE_denom[a] for i, a in enumerate(unique_aug_names)]
mCE = np.array(mCE)
if clean_err:
rmCE = [100 * (avg_errs[i] - clean_err) / (mCE_denom[a] - mCE_denom['clean'])\
for i, a in enumerate(unique_aug_names)]
rmCE = np.array(rmCE)
for i, a in enumerate(unique_aug_names):
stats = {'_type' : a + '-avg', 'top1_err' : avg_errs[i]}
if mCE is not None:
stats['mCE'] = mCE[i]
if rmCE is not None:
stats['rmCE'] = rmCE[i]
lu.log_json_stats(stats)
if imagenetc_grouping:
for aug_type in ['blur', 'digital', 'noise', 'weather', 'extra']:
aug_indices = [i for i, a in enumerate(unique_aug_names)\
if aug_type in aug_finder.get_aug_by_name(a).tags]
err_for_type = np.mean(avg_errs[aug_indices])
stats = {'_type' : aug_type + '-avg', 'top1_err' : err_for_type}
if mCE is not None:
mCE_for_type = np.mean(mCE[aug_indices])
stats['mCE'] = mCE_for_type
if rmCE is not None:
rmCE_for_type = np.mean(rmCE[aug_indices])
stats['rmCE'] = rmCE_for_type
lu.log_json_stats(stats)
if imagenetc_grouping:
indices = [i for i, a in enumerate(unique_aug_names)\
if 'extra' not in aug_finder.get_aug_by_name(a).tags]
else:
indices = [i for i, a in enumerate(unique_aug_names)]
overall_avg = np.mean(avg_errs[indices])
stats = {'_type' : 'overall-avg', 'top1_err' : overall_avg}
if mCE is not None:
overall_mCE = np.mean(mCE[indices])
stats['mCE'] = overall_mCE
if rmCE is not None:
overall_rmCE = np.mean(rmCE[indices])
stats['rmCE'] = overall_rmCE
lu.log_json_stats(stats)
| 4,659 | 37.196721 | 169 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/datasets.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import augmentations as aug
from .augmentations.utils.converters import NumpyToTensor, PilToNumpy
from .augmentations.utils.aug_finder import get_augs_by_tag, parse_aug_string, get_aug_by_name
from .augmentations.utils.severity import sample_level, int_parameter, float_parameter
from .augmentations import pil, compositions, obscure, patch_gaussian, standard_augmentations
import torchvision as tv
import torch
import numpy as np
import os
from PIL import Image, ImageOps
CIFAR_MEAN = [125.3/255, 123.0/255, 113.9/255]
CIFAR_STD = [63.0/255, 62.1/255, 66.7/255]
#This is in RGB order since that is the standard for PIL
IM_MEAN = [0.485, 0.456, 0.406]
IM_STD = [0.229, 0.224, 0.225]
class Cifar10Base(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, augmentation=None, transform_file=None):
assert split in ['train', 'test'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
self.transform_weights = None
if self.train_aug:
train_transform = [
tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomCrop(im_size, padding=4)
]
else:
train_transform = []
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
if augmentation is None:
self.aug = aug.identity.Identity()
else:
self.aug = augmentation
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
])
if transform_file:
transforms = np.load(transform_file)
self.transform_list = transforms[:num_transforms]\
if num_transforms is not None else transforms
elif num_transforms:
self.transform_list = self.build_transform_list(num_transforms)
else:
self.transform_list = None
self.dataset = tv.datasets.CIFAR10(data_path, self.train, download=False)
def build_transform_list(self, num_transforms):
transforms = [self.aug.convert_to_numpy(self.aug.sample_parameters()) for i in range(num_transforms)]
return np.stack(transforms, axis=0)
def get_random_transform(self):
if self.transform_list is None:
return self.aug.sample_parameters()
elif self.transform_weights is None:
params = self.transform_list[np.random.randint(low=0, high=len(self.transform_list))]
return self.aug.convert_from_numpy(params)
else:
index = np.random.choice(np.arange(len(self.transform_list)), p=self.transform_weights)
params = self.transform_list[index]
return self.aug.convert_from_numpy(params)
def __getitem__(self, index):
pre_im, label = self.dataset[index]
pre_im = self.pretransform(pre_im)
params = self.get_random_transform()
return self.posttransform(self.aug.transform(pre_im, **params)), label
def __len__(self):
return len(self.dataset)
def fixed_transform(self, index, transform_index):
assert self.transform_list is not None, "Must have a fixed transform list to generate fixed transforms."
im, label = self.dataset[index]
im = self.pretransform(im)
params = self.aug.convert_from_numpy(self.transform_list[transform_index])
im = self.aug.transform(im, **params)
return self.posttransform(im), label
def serialize(self, indices=None):
'''
Returns a new dataset that is all fixed transforms in order,
applied to each index in order.
'''
class SerialDataset(torch.utils.data.Dataset):
def __init__(self, dataset, indices=None):
self.dataset = dataset
self.indices = indices
def __getitem__(self, index):
im_idx = index // len(self.dataset.transform_list)
im_idx = self.indices[im_idx] if self.indices is not None else im_idx
param_idx = index % len(self.dataset.transform_list)
return self.dataset.fixed_transform(im_idx, param_idx)
def __len__(self):
if self.indices is not None:
return len(self.indices) * len(self.dataset.transform_list)
else:
return len(self.dataset) * len(self.dataset.transform_list)
return SerialDataset(self, indices)
class Cifar10Augmix(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, width=3, depth=3, random_depth=True, prob_coeff=1.0,
severity=3, transform_file=None):
self.aug_string = aug_string
self.width = width
self.depth = depth
self.prob_coeff = prob_coeff
self.random_depth = random_depth
self.severity = severity
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.Augmix(
augmentation_list=augs,
width=width,
max_depth=depth,
random_depth=random_depth,
prob_coeff=prob_coeff
)
super(Cifar10Augmix, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, transform_file=transform_file)
class Cifar10RandomSample(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, severity=3, weights=None):
self.aug_string = aug_string
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.RandomSample(
augmentation_list=augs,
weights=weights
)
super(Cifar10RandomSample, self).__init__(data_path, split, im_size, train_aug, num_transforms, augmentation)
class Cifar10Corruption(Cifar10Base):
'''
Corruptions are different in three ways: they sample at fixed max intensity
of randomly between a low value and some maximum, they generate
fixed transforms in order and balanced (and can give the corruption/severity
of a given transform index), and have the metadata for the frost corruption.
'''
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, frost_path=None, include_extra=True, random_transforms=False):
self.include_extra = include_extra
self.random_transforms = random_transforms
if aug_string is not None:
self.aug_list = parse_aug_string(aug_string, im_size, max_intensity=True, frost_path=frost_path)
else:
augs = get_augs_by_tag(['imagenet_c'], [] if include_extra else ['extra'])
severities = [1,2,3,4,5]
self.aug_list = [a(severity=s, im_size=im_size, max_intensity=True, frost_path=frost_path)\
for a in augs for s in severities]
augmentation = compositions.RandomSample(
augmentation_list=self.aug_list
)
super(Cifar10Corruption, self).__init__(data_path, split, im_size, train_aug, num_transforms, augmentation)
def build_transform_list(self, num_transforms):
if self.random_transforms:
return super(Cifar10Corruption, self).build_transform_list(num_transforms)
assert num_transforms % len(self.aug_list) == 0,\
"The total number of augs needs to divide into the total number of transforms."
transform_list = None
for i in range(num_transforms):
transform_idx = i // (num_transforms // len(self.aug_list))
transform_params = self.aug_list[transform_idx].sample_parameters()
curr_record = self.aug.convert_to_numpy({
'idx' : transform_idx,
'transform_params' : transform_params
}).reshape(1,-1)
transform_list = np.concatenate((transform_list, curr_record), axis=0)\
if transform_list is not None else curr_record
return transform_list
def get_corruption(self, transform_index):
aug_type_index = transform_index // (len(self.transform_list) // len(self.aug_list))
return self.aug_list[aug_type_index].name, self.aug_list[aug_type_index].severity
class Cifar10AutoAugment(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, subpolicy_list=None, add_cutout=False, transform_file=None):
def stom(low, high, sev):
return sev / 10 * (high - low) + low
size = im_size
init = lambda transform : transform(0, size)
tn = 150/331 * im_size
if subpolicy_list is None:
subpolicy_list = [
[(init(pil.Invert), 0.1, None, None), (init(pil.Contrast), 0.2, stom(0,0.9,6), 1)],
[(init(pil.Rotate), 0.7, stom(0,30,2), 0), (init(pil.TranslateX), 0.3, stom(0,tn,9), 0)],
[(init(pil.Sharpness), 0.8, stom(0,0.9,1), 1), (init(pil.Sharpness), 0.9, stom(0,0.9,3), 1)],
[(init(pil.ShearY), 0.5, stom(0,0.3,8), 0), (init(pil.TranslateY), 0.7, stom(0,tn,9), 0)],
[(init(pil.AutoContrast), 0.5, None, None), (init(pil.Equalize), 0.9, None, None)],
[(init(pil.ShearY), 0.2, stom(0,0.3,7), 0), (init(pil.Posterize), 0.3, int(stom(4,8,7)), None)],
[(init(pil.ColorBalance), 0.4, stom(0,0.9,3),1), (init(pil.Brightness), 0.6, stom(0,0.9,7),1)],
[(init(pil.Sharpness), 0.3, stom(0,0.9,9),1), (init(pil.Brightness), 0.7, stom(0,0.9,9),1)],
[(init(pil.Equalize), 0.6, None, None), (init(pil.Equalize), 0.5, None, None)],
[(init(pil.Contrast), 0.6, stom(0,0.9,7),1), (init(pil.Sharpness), 0.6, stom(0,0.9,5),1)],
[(init(pil.ColorBalance), 0.7, stom(0,0.9,7),1), (init(pil.TranslateX), 0.5, stom(0,tn,8),0)],
[(init(pil.Equalize), 0.3, None, None), (init(pil.AutoContrast), 0.4, None, None)],
[(init(pil.TranslateY), 0.4, stom(0,tn,3),0), (init(pil.Sharpness), 0.2, stom(0,0.9,6),1)],
[(init(pil.Brightness), 0.9, stom(0,0.9,6),1), (init(pil.ColorBalance), 0.2, stom(0,0.9,8),1)],
[(init(pil.Solarize), 0.5, stom(256,0,2),None), (init(pil.Invert), 0.0, None,None)],
[(init(pil.Equalize), 0.2, None, None), (init(pil.AutoContrast), 0.6, None, None)],
[(init(pil.Equalize), 0.2, None, None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.ColorBalance), 0.9, stom(0,0.9,9),1), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.AutoContrast), 0.8, None, None), (init(pil.Solarize), 0.2, stom(256,0,8), None)],
[(init(pil.Brightness), 0.1, stom(0,0.9,3),1), (init(pil.ColorBalance), 0.7, stom(0,0.9,0),1)],
[(init(pil.Solarize), 0.4, stom(256,0,5), None), (init(pil.AutoContrast), 0.9, None, None)],
[(init(pil.TranslateY), 0.9, stom(0,tn,9), None), (init(pil.TranslateY), 0.7, stom(0,tn,9),0)],
[(init(pil.AutoContrast), 0.9, None, None), (init(pil.Solarize), 0.8, stom(256,0,3), None)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Invert), 0.1, None, None)],
[(init(pil.TranslateY), 0.7, stom(0,tn,9), 0), (init(pil.AutoContrast), 0.9, None, None)]
]
aug = compositions.AutoAugment(subpolicy_list)
if add_cutout:
cutout = obscure.CutOut(severity=10, im_size=im_size, max_intensity=True)
aug = compositions.ComposeSerially([aug, cutout])
super(Cifar10AutoAugment, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, transform_file=transform_file)
class Cifar10PatchGaussian(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, transform_file=None, patch_width=25, patch_sigma=1.0, max_width=True):
if patch_width is not None:
aug = patch_gaussian.PatchGaussian(
severity=None,
im_size=im_size,
max_intensity=max_width,
sigma=patch_sigma,
width=patch_width
)
else:
aug = patch_gaussian.Gaussian(
severity = patch_sigma * 10,
im_size=im_size,
max_intensity=max_width
)
if train_aug or (split=='train' and train_aug is None):
train = standard_augmentations.Cifar10CropAndFlip(severity=None, im_size=im_size)
aug = compositions.ComposeSerially([aug, train])
super(Cifar10PatchGaussian, self).__init__(data_path, split, im_size, False, num_transforms,
aug, transform_file=transform_file)
class ImageNetBase(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, augmentation=None, rgb_to_bgr=True):
assert split in ['train', 'val'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
data_path = os.path.join(data_path, split)
if self.train_aug:
train_transform = [
tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip(),
]
else:
train_transform = [
tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)
]
def RGB_to_BGR(image):
return image[[2,1,0],:,:]
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
if augmentation is None:
self.aug = aug.identity.Identity()
else:
self.aug = augmentation
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)] +
([RGB_to_BGR] if rgb_to_bgr else []) #PyCls imagenet models are trained in BGR input order
)
self.transform_list = self.build_transform_list(num_transforms)\
if num_transforms is not None else None
self.dataset = tv.datasets.ImageFolder(data_path, None)
def build_transform_list(self, num_transforms):
transforms = [self.aug.convert_to_numpy(self.aug.sample_parameters()) for i in range(num_transforms)]
return np.stack(transforms, axis=0)
def get_random_transform(self):
if self.transform_list is None:
return self.aug.sample_parameters()
else:
params = self.transform_list[np.random.randint(low=0, high=len(self.transform_list))]
return self.aug.convert_from_numpy(params)
def __getitem__(self, index):
pre_im, label = self.dataset[index]
pre_im = self.pretransform(pre_im)
params = self.get_random_transform()
return self.posttransform(self.aug.transform(pre_im, **params)), label
def __len__(self):
return len(self.dataset)
def fixed_transform(self, index, transform_index):
assert self.transform_list is not None, "Must have a fixed transform list to generate fixed transforms."
im, label = self.dataset[index]
im = self.pretransform(im)
params = self.aug.convert_from_numpy(self.transform_list[transform_index])
im = self.aug.transform(im, **params)
return self.posttransform(im), label
def serialize(self, indices=None):
'''
Returns a new dataset that is all fixed transforms in order,
applied to each index in order.
'''
class SerialDataset(torch.utils.data.Dataset):
def __init__(self, dataset, indices=None):
self.dataset = dataset
self.indices = indices
def __getitem__(self, index):
im_idx = index // len(self.dataset.transform_list)
im_idx = self.indices[im_idx] if self.indices is not None else im_idx
param_idx = index % len(self.dataset.transform_list)
return self.dataset.fixed_transform(im_idx, param_idx)
def __len__(self):
if self.indices is not None:
return len(self.indices) * len(self.dataset.transform_list)
else:
return len(self.dataset) * len(self.dataset.transform_list)
return SerialDataset(self, indices)
class ImageNetCorruption(ImageNetBase):
'''
Corruptions are different in three ways: they sample at fixed max intensity
of randomly between a low value and some maximum, they generate
fixed transforms in order and balanced (and can give the corruption/severity
of a given transform index), and have the metadata for the frost corruption.
'''
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, frost_path=None, include_extra=True, rgb_to_bgr=True):
self.include_extra = include_extra
if aug_string is not None:
self.aug_list = parse_aug_string(aug_string, im_size, max_intensity=True, frost_path=frost_path)
else:
augs = get_augs_by_tag(['imagenet_c'], [] if include_extra else ['extra'])
severities = [1,2,3,4,5]
self.aug_list = [a(severity=s, im_size=im_size, max_intensity=True, frost_path=frost_path)\
for a in augs for s in severities]
augmentation = compositions.RandomSample(
augmentation_list=self.aug_list
)
super(ImageNetCorruption, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, rgb_to_bgr)
def build_transform_list(self, num_transforms):
assert num_transforms % len(self.aug_list) == 0,\
"The total number of augs needs to divide into the total number of transforms."
transform_list = None
for i in range(num_transforms):
transform_idx = i // (num_transforms // len(self.aug_list))
transform_params = self.aug_list[transform_idx].sample_parameters()
curr_record = self.aug.convert_to_numpy({
'idx' : transform_idx,
'transform_params' : transform_params
}).reshape(1,-1)
transform_list = np.concatenate((transform_list, curr_record), axis=0)\
if transform_list is not None else curr_record
return transform_list
def get_corruption(self, transform_index):
aug_type_index = transform_index // (len(self.transform_list) // len(self.aug_list))
return self.aug_list[aug_type_index].name, self.aug_list[aug_type_index].severity
class ImageNetPatchGaussian(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, patch_width=250, patch_sigma=1.0, max_width=False, rgb_to_bgr=True):
if patch_width is not None:
aug = patch_gaussian.PatchGaussian(
severity=None,
im_size=im_size,
max_intensity=max_width,
sigma=patch_sigma,
width=patch_width
)
else:
aug = patch_gaussian.Gaussian(
severity = patch_sigma * 10,
im_size=im_size,
max_intensity=max_width
)
super(ImageNetPatchGaussian, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, rgb_to_bgr=rgb_to_bgr)
class ImageNetAutoAugment(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, subpolicy_list=None, rgb_to_bgr=True):
def stom(low, high, sev):
return sev / 10 * (high - low) + low
size = im_size
init = lambda transform : transform(0, size)
tn = 150/331 * im_size
if subpolicy_list is None:
subpolicy_list = [
[(init(pil.Posterize), 0.4, int(stom(4,8,8)), None), (init(pil.Rotate), 0.6, stom(0,30,9),0)],
[(init(pil.Solarize), 0.6, stom(256,0,5), None), (init(pil.AutoContrast), 0.6, None,None)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Posterize), 0.6, int(stom(4,8,7)), None), (init(pil.Posterize), 0.6, int(stom(4,8,6)),None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Solarize), 0.2, stom(256,0,4),None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Rotate), 0.8, stom(0,30,8),0)],
[(init(pil.Solarize), 0.6, stom(256,0,3), None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Posterize), 0.8, int(stom(4,8,5)), None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.Rotate), 0.2, stom(0,30,3), 0), (init(pil.Solarize), 0.6, stom(256,0,8),None)],
[(init(pil.Equalize), 0.6, None, None), (init(pil.Posterize), 0.4, int(stom(4,8,6)),None)],
[(init(pil.Rotate), 0.8, stom(0,30,8), 0), (init(pil.ColorBalance), 0.4, stom(0,0.9,0),1)],
[(init(pil.Rotate), 0.4, stom(0,30,9), 0), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Equalize), 0.0, None, None), (init(pil.Equalize), 0.8, None, None)],
[(init(pil.Invert), 0.6, None, None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.6, stom(0,0.9,4), 1), (init(pil.Contrast), 1.0, stom(0,0.9,8),1)],
[(init(pil.Rotate), 0.8, stom(0,30,8), 0), (init(pil.ColorBalance), 1.0, stom(0,0.9,2),1)],
[(init(pil.ColorBalance), 0.8, stom(0,0.9,8), 1), (init(pil.Solarize), 0.8, stom(256,0,7),None)],
[(init(pil.Sharpness), 0.4, stom(0,0.9,7), 1), (init(pil.Invert), 0.6, None, None)],
[(init(pil.ShearX), 0.6, stom(0,0.9,5), 1), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.4, stom(0,0.9,0), 1), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Solarize), 0.2, stom(256,0,4),None)],
[(init(pil.Solarize), 0.6, stom(256,0,5), None), (init(pil.AutoContrast), 0.6, None, None)],
[(init(pil.Invert), 0.6, None, None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.6, stom(0,0.9,4), 1), (init(pil.Contrast), 1.0, stom(0,0.9,8),1)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Equalize), 0.6, None, None)],
]
aug = compositions.AutoAugment(subpolicy_list)
super(ImageNetAutoAugment, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, rgb_to_bgr=rgb_to_bgr)
class ImageNetAugmix(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, width=3, depth=3, random_depth=True, prob_coeff=1.0,
severity=3, rgb_to_bgr=True):
self.aug_string = aug_string
self.width = width
self.depth = depth
self.prob_coeff = prob_coeff
self.random_depth = random_depth
self.severity = severity
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.Augmix(
augmentation_list=augs,
width=width,
max_depth=depth,
random_depth=random_depth,
prob_coeff=prob_coeff
)
super(ImageNetAugmix, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, rgb_to_bgr=rgb_to_bgr)
class Cifar10AugmixJSD(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=True,
augmix_width=3, augmix_depth=3, augmix_random_depth=True,
augmix_prob_coeff=1.0, augmix_severity=3,
jsd_num=3):
self.jsd_num = jsd_num
self.split = split
self.train = True if split=='train' else False
train_transform = [tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomCrop(im_size, padding=4)]\
if (self.train and train_aug) else []
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
self.posttransform = tv.transforms.Compose([NumpyToTensor(), tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)])
aug_list = [
pil.ShearX(augmix_severity, im_size=im_size),
pil.ShearY(augmix_severity, im_size=im_size),
pil.TranslateX(augmix_severity, im_size=im_size),
pil.TranslateY(augmix_severity, im_size=im_size),
pil.Rotate(augmix_severity, im_size=im_size),
pil.Equalize(augmix_severity, im_size=im_size),
pil.AutoContrast(augmix_severity, im_size=im_size),
pil.Solarize(augmix_severity, im_size=im_size),
pil.Posterize(augmix_severity, im_size=im_size)
]
self.aug = compositions.Augmix(
augmentation_list=aug_list,
width=augmix_width,
max_depth=augmix_depth,
random_depth=augmix_random_depth,
prob_coeff=augmix_prob_coeff
)
self.dataset = tv.datasets.CIFAR10(data_path, self.train, transform=None, download=False)
def __getitem__(self, index):
im, label = self.dataset[index]
im = self.pretransform(im)
im_one = self.posttransform(im)
ims = [self.posttransform(self.aug(im)) for i in range(self.jsd_num-1)]
c, h, w = im_one.size()
out = torch.stack([im_one] + ims, dim=0).view(c * self.jsd_num, h, w)
return out, label
def __len__(self):
return len(self.dataset)
class ImageNetAugmixJSD(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, RGB_to_BGR=True, mixture_width=3, mixture_depth=-1, aug_severity=1, aug_prob_coeff=1, jsd_num=3):
self.split = split
self.train = True if split=='train' else False
self.im_size = im_size
self.RGB_to_BGR = RGB_to_BGR
self.train_transform = tv.transforms.Compose(
[tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip()])
self.test_transform = tv.transforms.Compose(
[tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)])
self.preprocess = tv.transforms.Compose(
[tv.transforms.ToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)])
data_path = os.path.join(data_path, split)
self.transform = self.train_transform if self.train else self.test_transform
self.dataset = tv.datasets.ImageFolder(data_path, None)
self.width = mixture_width
self.depth = mixture_depth
self.severity = aug_severity
self.prob_coeff = aug_prob_coeff
self.im_size = im_size
self.num = jsd_num
self.augmentations = [
self.rotate,
self.shear_x,
self.shear_y,
self.translate_x,
self.translate_y,
self.autocontrast,
self.posterize,
self.equalize,
self.solarize,
]
def _prepare_im(self, im):
im = self.preprocess(im)
if self.RGB_to_BGR:
im = im[[2,1,0],:,:]
return im
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
im, label = self.dataset[index]
im = self.transform(im)
ims = [self._prepare_im(im)] + [self.augment(im) for i in range(1,self.num)]
im = np.concatenate(ims, axis=0)
return im, label
def augment(self, im):
ws = np.float32(
np.random.dirichlet([self.prob_coeff] * self.width))
m = np.float32(np.random.beta(self.prob_coeff, self.prob_coeff))
mix = torch.zeros_like(self._prepare_im(im))
for i in range(self.width):
image_aug = im.copy()
depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(self.augmentations)
image_aug = op(image_aug, self.severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * self._prepare_im(image_aug)
mixed = (1 - m) * self._prepare_im(im) + m * mix
return mixed
def autocontrast(self, pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(self, pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(self, pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(self, pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(self, pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(self, pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(self, pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(self, pil_img, level):
level = int_parameter(sample_level(level), self.im_size / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(self, pil_img, level):
level = int_parameter(sample_level(level), self.im_size / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
class ImageNetSIN(torch.utils.data.Dataset):
def __init__(self, in_data_path, sin_data_path, split, im_size, train_aug=None, rgb_to_bgr=True):
assert split in ['train', 'val'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
in_data_path = os.path.join(in_data_path, split)
sin_data_path = os.path.join(sin_data_path, split)
if self.train_aug:
train_transform = [
tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip(),
]
else:
train_transform = [
tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)
]
def RGB_to_BGR(image):
return image[[2,1,0],:,:]
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)] +
([RGB_to_BGR] if rgb_to_bgr else []) #PyCls imagenet models are trained in BGR input order
)
self.transform = tv.transforms.Compose([
self.pretransform,
self.posttransform
])
self.dataset = torch.utils.data.ConcatDataset([
tv.datasets.ImageFolder(in_data_path, self.transform),
tv.datasets.ImageFolder(sin_data_path, self.transform)
])
def __getitem__(self, idx):
return self.dataset[idx]
def __len__(self):
return len(self.dataset)
| 33,803 | 45.117326 | 157 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/models.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResHead(nn.Module):
"""ResNet head."""
def __init__(self, w_in, nc):
super(ResHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(w_in, nc, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
self.features = x
x = self.fc(x)
return x
class BottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1"""
def __init__(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
super(BottleneckTransform, self).__init__()
self._construct(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _construct(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
(str1x1, str3x3) = (stride, 1) if stride_1x1 else (1, stride)
# 1x1, BN, ReLU
self.a = nn.Conv2d(
w_in, w_b, kernel_size=1,
stride=str1x1, padding=0, bias=False
)
self.a_bn = torch.nn.BatchNorm2d(w_b, **bn_params)
self.a_relu = nn.ReLU(inplace=relu_inplace)
# 3x3, BN, ReLU
self.b = nn.Conv2d(
w_b, w_b, kernel_size=3,
stride=str3x3, padding=1, groups=num_gs, bias=False
)
self.b_bn = torch.nn.BatchNorm2d(w_b, **bn_params)
self.b_relu = nn.ReLU(inplace=relu_inplace)
# 1x1, BN
self.c = nn.Conv2d(
w_b, w_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.c_bn = torch.nn.BatchNorm2d(w_out, **bn_params)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBlock(nn.Module):
"""Residual block: x + F(x)"""
def __init__(
self, w_in, w_out, stride, w_b, num_gs, bn_params, stride_1x1, relu_inplace
):
super(ResBlock, self).__init__()
self._construct(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _add_skip_proj(self, w_in, w_out, stride, bn_params):
self.proj = nn.Conv2d(
w_in, w_out, kernel_size=1,
stride=stride, padding=0, bias=False
)
self.bn = torch.nn.BatchNorm2d(w_out, **bn_params)
def _construct(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(w_in, w_out, stride, bn_params)
self.f = BottleneckTransform(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
self.relu = nn.ReLU(relu_inplace)
def forward(self, x):
fx = self.f(x)
if self.proj_block:
x = self.bn(self.proj(x))
x = x + fx
x = self.relu(x)
return x
class ResStage(nn.Module):
"""Stage of ResNet."""
def __init__(self, w_in, w_out, stride, d, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
super(ResStage, self).__init__()
self._construct(w_in, w_out, stride, d, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _construct(self, w_in, w_out, stride, d, w_b, num_gs, bn_params, stride_1x1, relu_inplace):
# Construct the blocks
for i in range(d):
# Stride and w_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
# Construct the block
res_block = ResBlock(
b_w_in, w_out, b_stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace
)
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class ResStem(nn.Module):
"""Stem of ResNet."""
def __init__(self, w_in, w_out, bn_params, relu_inplace):
super(ResStem, self).__init__()
self._construct_imagenet(w_in, w_out, bn_params, relu_inplace)
def _construct_imagenet(self, w_in, w_out, bn_params, relu_inplace):
# 7x7, BN, ReLU, maxpool
self.conv = nn.Conv2d(
w_in, w_out, kernel_size=7,
stride=2, padding=3, bias=False
)
self.bn = torch.nn.BatchNorm2d(w_out, **bn_params)
self.relu = nn.ReLU(relu_inplace)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResNetPycls(nn.Module):
"""ResNeXt model."""
def __init__(self, depth=50, width_factor=1, num_groups=1, width_per_group=64,
num_classes=1000, bn_params={'eps':1e-5, 'momentum':0.1, 'affine':True},
stride_1x1=False, relu_inplace=True, final_gamma=True
):
super(ResNetPycls, self).__init__()
self.depth = depth
self.width = width_factor
self.ng = num_groups
self.width_per_group = width_per_group
self.num_classes = num_classes
self.bn_params = bn_params
self.stride_1x1 = stride_1x1
self.relu_inplace = relu_inplace
self._construct_imagenet()
def init_weights(m, cfg):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
if hasattr(m, 'bias') and m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.BatchNorm2d):
zero_init_gamma = (
hasattr(m, 'final_bn') and m.final_bn and
final_gamma
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
self.apply(lambda m : init_weights(m, final_gamma))
def _construct_imagenet(self):
# Retrieve the number of blocks per stage
(d1, d2, d3, d4) = _IN_STAGE_DS[self.depth]
# Compute the initial bottleneck width
num_gs = self.ng
w_b = self.width_per_group * num_gs
w1, w2, w3, w4 = [self.width * w for w in [256, 512, 1024, 2048]]
# Stem: (N, 3, 224, 224) -> (N, 64, 56, 56)
self.stem = ResStem(w_in=3, w_out=64, bn_params=self.bn_params, relu_inplace=self.relu_inplace)
# Stage 1: (N, 64, 56, 56) -> (N, 256, 56, 56)
self.s1 = ResStage(
w_in=64, w_out=w1, stride=1, d=d1,
w_b=w_b, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 2: (N, 256, 56, 56) -> (N, 512, 28, 28)
self.s2 = ResStage(
w_in=w1, w_out=w2, stride=2, d=d2,
w_b=w_b * 2, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 3: (N, 512, 56, 56) -> (N, 1024, 14, 14)
self.s3 = ResStage(
w_in=w2, w_out=w3, stride=2, d=d3,
w_b=w_b * 4, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 4: (N, 1024, 14, 14) -> (N, 2048, 7, 7)
self.s4 = ResStage(
w_in=w3, w_out=w4, stride=2, d=d4,
w_b=w_b * 8, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Head: (N, 2048, 7, 7) -> (N, num_classes)
self.head = ResHead(w_in=w4, nc=self.num_classes)
def forward(self, x):
for module in self.children():
x = module(x)
if isinstance(module, ResHead):
self.features = module.features
return x
| 8,640 | 35.459916 | 103 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/train_net.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
import os
import time
import datetime
log = logging.getLogger(__name__)
def eta_str(eta_td):
"""Converts an eta timedelta to a fixed-width string format."""
days = eta_td.days
hrs, rem = divmod(eta_td.seconds, 3600)
mins, secs = divmod(rem, 60)
return '{0:02},{1:02}:{2:02}:{3:02}'.format(days, hrs, mins, secs)
def train_net(model, optimizer, train_dataset,
batch_size,
max_epoch,
loader_params,
lr_policy,
checkpoint_folder='checkpoints',
name=None,
save_period=1,
weights=None,
num_gpus=1,
is_leader=True):
chpk_pre = 'model_epoch_'
if name is not None:
chpk_pre = name + "_" + chpk_pre
chpk_post = '.pyth'
if os.path.exists(checkpoint_folder):
checkpoints = [c for c in os.listdir(checkpoint_folder) if chpk_post in c and chpk_pre == "_".join(c.split("_")[:-1]) +"_"]
else:
checkpoints = []
if weights:
checkpoint = torch.load(weights, map_location='cpu')
log.info("Pretrained weights provided. Loading model from {} and skipping training.".format(weights))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
return model
elif checkpoints:
last_checkpoint_name = os.path.join(checkpoint_folder, sorted(checkpoints)[-1])
checkpoint = torch.load(last_checkpoint_name, map_location='cpu')
log.info("Loading model from {}".format(last_checkpoint_name))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
start_epoch = checkpoint['epoch'] + 1
else:
start_epoch = 1
if train_dataset is None:
return model
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=True
)
for i in range(start_epoch, max_epoch+1):
log.info("Starting epoch {}/{}".format(i, max_epoch))
time_start = time.time()
if sampler:
sampler.set_epoch(i)
train_epoch(model, optimizer, loader, lr_policy, i, num_gpus)
time_stop = time.time()
seconds_taken = (time_stop - time_start)
eta_td = datetime.timedelta(seconds=int(seconds_taken*(max_epoch-i)))
log.info("Seconds taken: {:.2f}, Time remaining: {}".format(seconds_taken, eta_str(eta_td)))
if (i % save_period == 0 or i == max_epoch) and is_leader:
if num_gpus > 1:
m = model.module
else:
m = model
checkpoint = {
'epoch' : i,
'model_state' : m.state_dict(),
'optimizer_state' : optimizer.state_dict()
}
checkpoint_file = "{:s}{:04d}{:s}".format(chpk_pre, i, chpk_post)
if not os.path.exists(checkpoint_folder):
os.mkdir(checkpoint_folder)
checkpoint_file = os.path.join(checkpoint_folder, checkpoint_file)
log.info("Saving model to {}".format(checkpoint_file))
torch.save(checkpoint, checkpoint_file)
def train_epoch(model, optimizer, loader, lr_policy, epoch, num_gpus=1):
lr = lr_policy(epoch-1)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model.train()
loss_fun = torch.nn.CrossEntropyLoss().cuda()
avg_loss = 0.0
num_correct = 0
num_total = 0
num_batches = 0
for cur_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
loss = loss_fun(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
torch.distributed.all_reduce(loss)
avg_loss += loss.item()
num_correct += correct.item()
num_total += labels.size(0) * num_gpus
num_batches += num_gpus
avg_loss /= num_batches
err = 100 * (1 - num_correct / num_total)
log.info("Avg loss: {:.3f}, Avg err: {:.3f}".format(avg_loss, err))
| 5,251 | 35.727273 | 135 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/wideresnet.py | # This source code is adapted from code licensed under the MIT license
# found in third_party/wideresnet_license from the root directory of
# this source tree.
"""WideResNet implementation (https://arxiv.org/abs/1605.07146)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
"""Basic ResNet block."""
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = drop_rate
self.is_in_equal_out = (in_planes == out_planes)
self.conv_shortcut = (not self.is_in_equal_out) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.is_in_equal_out:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.is_in_equal_out:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.drop_rate > 0:
out = F.dropout(out, p=self.drop_rate, training=self.training)
out = self.conv2(out)
if not self.is_in_equal_out:
return torch.add(self.conv_shortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
"""Layer container for blocks."""
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
drop_rate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, drop_rate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
drop_rate):
layers = []
for i in range(nb_layers):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, drop_rate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
"""WideResNet class."""
def __init__(self, depth, num_classes, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
self.depth = depth
self.widen_factor = widen_factor
n_channels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(
3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1,
drop_rate)
# 2nd block
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2,
drop_rate)
# 3rd block
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2,
drop_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(n_channels[3], num_classes)
self.n_channels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.n_channels)
self.features = out #Expose penultimate layer for access as features
return self.fc(out)
# Stage depths for ImageNet models
_IN_STAGE_DS = {
18: (2, 2, 2, 2),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
| 4,461 | 29.986111 | 79 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/augmentations/imagenetc.py | # This source code is adapted from code licensed under the license at
# third_party/imagenetc_license from the root directory of the repository
# Originally available: github.com/hendrycks/robustness
# Modifications Copyright (c) Facebook, Inc. and its affiliates,
# licensed under the MIT license found in the LICENSE file in the root
# directory of this source tree.
from .base import Augmentation
import pickle
import torch
import torch.utils.data
# Per-channel mean and SD values in BGR order
_MEAN = [125.3, 123.0, 113.9]
_SD = [63.0, 62.1, 66.7]
import os
from PIL import Image
import os.path
import time
import torch
import torchvision.datasets as dset
import torchvision.transforms as trn
import torch.utils.data as data
import numpy as np
from PIL import Image
# /////////////// Distortion Helpers ///////////////
import skimage as sk
from skimage.filters import gaussian
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
import warnings
warnings.simplefilter("ignore", UserWarning)
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(seed, mapsize, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
random_state = np.random.RandomState(seed=seed)
def wibbledmean(array):
return array / 4 + wibble * random_state.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / zoom_factor))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
# /////////////// End Distortion Helpers ///////////////
# /////////////// Distortions ///////////////
def gaussian_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [0.04, 0.06, .08, .09, .10][int(severity) - 1]
else:
c = [.08, .12, 0.18, 0.26, 0.38][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(x + random_state.normal(size=x.shape, scale=c), 0, 1) * 255
def shot_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [500, 250, 100, 75, 50][int(severity) - 1]
else:
c = [60, 25, 12, 5, 3][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(random_state.poisson(x * c) / c, 0, 1) * 255
def impulse_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [.01, .02, .03, .05, .07][int(severity) - 1]
else:
c = [.03, .06, .09, 0.17, 0.27][int(severity) - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c, seed=seed)
return np.clip(x, 0, 1) * 255
def speckle_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [.06, .1, .12, .16, .2][int(severity) - 1]
else:
c = [.15, .2, 0.35, 0.45, 0.6][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(x + x * random_state.normal(size=x.shape, scale=c), 0, 1) * 255
def gaussian_blur(x, im_size, severity=1):
if im_size == 32:
c = [.4, .6, 0.7, .8, 1][int(severity) - 1]
else:
c = [1, 2, 3, 4, 6][int(severity) - 1]
x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)
return np.clip(x, 0, 1) * 255
def glass_blur(x, im_size, seed, severity=1):
# sigma, max_delta, iterations
if im_size == 32:
c = [(0.05,1,1), (0.25,1,1), (0.4,1,1), (0.25,1,2), (0.4,1,2)][int(severity) - 1]
else:
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for i in range(c[2]):
for h in range(im_size - c[1], c[1], -1):
for w in range(im_size - c[1], c[1], -1):
dx, dy = random_state.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255
def defocus_blur(x, im_size, severity=1):
if im_size == 32:
c = [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (1, 0.2), (1.5, 0.1)][int(severity) - 1]
else:
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][int(severity) - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x32x32 -> 32x32x3
return np.clip(channels, 0, 1) * 255
def motion_blur(x, im_size, angle, severity=1):
if im_size == 32:
c = [(6,1), (6,1.5), (6,2), (8,2), (9,2.5)][int(severity) - 1]
else:
c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][int(severity) - 1]
output = BytesIO()
x = Image.fromarray(x)
x.save(output, format='PNG')
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=angle)
x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED)
if x.shape != (im_size, im_size):
return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
def zoom_blur(x, im_size, severity=1):
if im_size == 32:
c = [np.arange(1, 1.06, 0.01), np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.01), np.arange(1, 1.26, 0.01)][int(severity) - 1]
else:
c = [np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)][int(severity) - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, im_size, seed, severity=1):
if im_size == 32:
c = [(.2,3), (.5,3), (0.75,2.5), (1,2), (1.5,1.75)][int(severity) - 1]
mapsize = 32
else:
c = [(1.5, 2), (2, 2), (2.5, 1.7), (2.5, 1.5), (3, 1.4)][int(severity) - 1]
mapsize = 256
x = np.array(x) / 255.
max_val = x.max()
x += c[0] * plasma_fractal(wibbledecay=c[1], seed=seed, mapsize=mapsize)[:im_size, :im_size][..., np.newaxis]
return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
def frost(x, im_size, frost_path, image_idx, crop_pos, severity=1):
if im_size == 32:
c = [(1, 0.2), (1, 0.3), (0.9, 0.4), (0.85, 0.4), (0.75, 0.45)][int(severity) - 1]
else:
c = [(1, 0.4),
(0.8, 0.6),
(0.7, 0.7),
(0.65, 0.7),
(0.6, 0.75)][int(severity) - 1]
idx = image_idx
filename = ['./frost1.png', './frost2.png', './frost3.png', './frost4.jpg', './frost5.jpg', './frost6.jpg'][idx]
filename = os.path.join(frost_path, filename)
frost = cv2.imread(filename)
if im_size == 32:
frost = cv2.resize(frost, (0, 0), fx=0.2, fy=0.2)
# randomly crop and convert to rgb
#x_start, y_start = np.random.randint(0, frost.shape[0] - 32), np.random.randint(0, frost.shape[1] - 32)
x_start, y_start = crop_pos[0], crop_pos[1]
frost = frost[x_start:x_start + im_size, y_start:y_start + im_size][..., [2, 1, 0]]
return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255)
def snow(x, im_size, seed, severity=1):
if im_size == 32:
c = [(0.1,0.2,1,0.6,8,3,0.95),
(0.1,0.2,1,0.5,10,4,0.9),
(0.15,0.3,1.75,0.55,10,4,0.9),
(0.25,0.3,2.25,0.6,12,6,0.85),
(0.3,0.3,1.25,0.65,14,12,0.8)][int(severity) - 1]
else:
c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),
(0.2, 0.3, 2, 0.5, 12, 4, 0.7),
(0.55, 0.3, 4, 0.9, 12, 8, 0.7),
(0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),
(0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x, dtype=np.float32) / 255.
snow_layer = random_state.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=random_state.uniform(-135, -45))
snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(im_size, im_size, 1) * 1.5 + 0.5)
return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def spatter(x, im_size, seed, severity=1):
if im_size == 32:
c = [(0.62,0.1,0.7,0.7,0.5,0),
(0.65,0.1,0.8,0.7,0.5,0),
(0.65,0.3,1,0.69,0.5,0),
(0.65,0.1,0.7,0.69,0.6,1),
(0.65,0.1,0.5,0.68,0.6,1)][int(severity) - 1]
else:
c = [(0.65, 0.3, 4, 0.69, 0.6, 0),
(0.65, 0.3, 3, 0.68, 0.6, 0),
(0.65, 0.3, 2, 0.68, 0.5, 0),
(0.65, 0.3, 1, 0.65, 1.5, 1),
(0.67, 0.4, 1, 0.65, 1.5, 1)][int(severity) - 1]
x = np.array(x, dtype=np.float32) / 255.
random_state = np.random.RandomState(seed=seed)
liquid_layer = random_state.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
# ker = np.array([[-1,-2,-3],[-2,0,0],[-3,0,1]], dtype=np.float32)
# ker -= np.mean(ker)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CV_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1])), axis=2)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255
else:
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# m = np.abs(m) ** (1/c[4])
# mud brown
color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),
42 / 255. * np.ones_like(x[..., :1]),
20 / 255. * np.ones_like(x[..., :1])), axis=2)
color *= m[..., np.newaxis]
x *= (1 - m[..., np.newaxis])
return np.clip(x + color, 0, 1) * 255
def contrast(x, im_size, severity=1):
if im_size == 32:
c = [.75, .5, .4, .3, 0.15][int(severity) - 1]
else:
c = [0.4, .3, .2, .1, .05][int(severity) - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
return np.clip((x - means) * c + means, 0, 1) * 255
def brightness(x, im_size, severity=1):
if im_size == 32:
c = [.05, .1, .15, .2, .3][int(severity) - 1]
else:
c = [.1, .2, .3, .4, .5][int(severity) - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def saturate(x, im_size, severity=1):
if im_size == 32:
c = [(0.3, 0), (0.1, 0), (1.5, 0), (2, 0.1), (2.5, 0.2)][int(severity) - 1]
else:
c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][int(severity) - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def jpeg_compression(x, im_size, severity=1):
if im_size == 32:
c = [80, 65, 58, 50, 40][int(severity) - 1]
else:
c = [25, 18, 15, 10, 7][int(severity) - 1]
x = Image.fromarray(x)
output = BytesIO()
x.save(output, 'JPEG', quality=c)
x = PILImage.open(output)
return x
def pixelate(x, im_size, severity=1):
if im_size == 32:
c = [0.95, 0.9, 0.85, 0.75, 0.65][int(severity) - 1]
else:
c = [0.6, 0.5, 0.4, 0.3, 0.25][int(severity) - 1]
x = Image.fromarray(x)
x = x.resize((int(im_size * c), int(im_size * c)), PILImage.BOX)
x = x.resize((im_size, im_size), PILImage.BOX)
return x
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, im_size, seed, severity=1):
IMSIZE = im_size
if im_size == 32:
c = [(IMSIZE*0, IMSIZE*0, IMSIZE*0.08),
(IMSIZE*0.05, IMSIZE*0.2, IMSIZE*0.07),
(IMSIZE*0.08, IMSIZE*0.06, IMSIZE*0.06),
(IMSIZE*0.1, IMSIZE*0.04, IMSIZE*0.05),
(IMSIZE*0.1, IMSIZE*0.03, IMSIZE*0.03)][int(severity) - 1]
else:
c = [(244 * 2, 244 * 0.7, 244 * 0.1), # 244 should have been 224, but ultimately nothing is incorrect
(244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02),
(244 * 0.07, 244 * 0.01, 244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
image = np.array(image, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = (gaussian(random_state.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dy = (gaussian(random_state.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
class GaussianNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'gaussian_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(gaussian_noise(image, self.im_size, seed, severity=self.severity))
class ShotNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'shot_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(shot_noise(image, self.im_size, seed, severity=self.severity))
class ImpulseNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'impulse_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(impulse_noise(image, self.im_size, seed, severity=self.severity))
class SpeckleNoise(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'speckle_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(speckle_noise(image, self.im_size, seed, severity=self.severity))
class ElasticTransform(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'elastic_transform'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(elastic_transform(image, self.im_size, seed, severity=self.severity))
class GlassBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'glass_blur'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(glass_blur(image, self.im_size, seed, severity=self.severity))
class Snow(Augmentation):
tags = ['imagenet_c', 'weather']
name = 'snow'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(snow(image, self.im_size, seed, severity=self.severity))
class Spatter(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'spatter'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(spatter(image, self.im_size, seed, severity=self.severity))
class Fog(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'fog'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(fog(image, self.im_size, seed, severity=self.severity))
class ZoomBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'zoom_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(zoom_blur(image, self.im_size, severity=self.severity))
class Pixelate(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'pixelate'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(pixelate(image, self.im_size, severity=self.severity))
class JPEGCompression(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'jpeg_compression'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(jpeg_compression(image, self.im_size, severity=self.severity))
class Contrast(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'contrast'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(contrast(image, self.im_size, severity=self.severity))
class Brightness(Augmentation):
tags = ['imagenet_c', 'weather']
name = 'brightness'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(brightness(image, self.im_size, severity=self.severity))
class MotionBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'motion_blur'
def sample_parameters(self):
angle = np.random.uniform(-45,45)
return {'angle' : angle}
def transform(self, image, angle):
return np.uint8(motion_blur(image, self.im_size, angle=angle, severity=self.severity))
class GaussianBlur(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'gaussian_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(gaussian_blur(image, self.im_size, severity=self.severity))
class Frost(Augmentation):
tags = ['imagenet_c', 'path_required', 'weather']
name = 'frost'
def __init__(self, severity, im_size, record=False, max_intensity=False, frost_path=None):
super().__init__(severity, im_size, record, max_intensity)
self.frost_path = frost_path
def sample_parameters(self):
image_idx = np.random.randint(5)
filename = ['./frost1.png', './frost2.png', './frost3.png', './frost4.jpg', './frost5.jpg', './frost6.jpg'][image_idx]
filename = os.path.join(self.frost_path, filename)
frost = cv2.imread(filename)
if self.im_size == 32:
frost = cv2.resize(frost, (0, 0), fx=0.2, fy=0.2)
x_start, y_start = np.random.randint(0, frost.shape[0] - self.im_size), np.random.randint(0, frost.shape[1] - self.im_size)
return {'image_idx' : image_idx, 'crop_pos' : (x_start, y_start)}
def transform(self, image, image_idx, crop_pos):
return np.uint8(frost(image, self.im_size, frost_path=self.frost_path, image_idx=image_idx, crop_pos=crop_pos, severity=self.severity))
def convert_to_numpy(self, params):
return np.array([params['image_idx']] + list( params['crop_pos']))
def convert_from_numpy(self, numpy_record):
return {'image_idx' : int(numpy_record[0]), 'crop_pos' : tuple(numpy_record[1:].astype(np.int).tolist())}
class DefocusBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'defocus_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(defocus_blur(image, self.im_size, severity=self.severity))
class Saturate(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'saturate'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(saturate(image, self.im_size, severity=self.severity))
| 25,528 | 31.940645 | 143 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/experiments/overlap/augmentations/utils/converters.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from PIL import Image
import torch
class PilToNumpy(object):
def __init__(self, as_float=False, scaled_to_one=False):
self.as_float = as_float
self.scaled_to_one = scaled_to_one
assert (not scaled_to_one) or (as_float and scaled_to_one),\
"Must output a float if rescaling to one."
def __call__(self, image):
if not self.as_float:
return np.array(image).astype(np.uint8)
elif notself.scaled_to_one:
return np.array(image).astype(np.float32)
else:
return np.array(image).astype(np.float32) / 255
class NumpyToPil(object):
def __init__(self):
pass
def __call__(self, image):
return Image.fromarray(image)
class NumpyToTensor(object):
def __init__(self, HWC_to_CHW=True, bit_to_float=True):
self.HWC_to_CHW = HWC_to_CHW
self.bit_to_float = bit_to_float
pass
def __call__(self, image):
image = image.astype(np.float32)
if self.bit_to_float:
image /= 255
if self.HWC_to_CHW:
image = image.transpose(2,0,1)
return torch.Tensor(image)
| 1,351 | 29.044444 | 68 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/imagenet_c_bar/test_c_bar.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from transform_finder import build_transform
import torch
import torchvision as tv
from utils.converters import PilToNumpy, NumpyToTensor
CIFAR_MEAN = [125.3/255, 123.0/255, 113.9/255]
CIFAR_STD = [63.0/255, 62.1/255, 66.7/255]
#This is in RGB order since that is the standard for PIL
IM_MEAN = [0.485, 0.456, 0.406]
IM_STD = [0.229, 0.224, 0.225]
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = []
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions.extend([(vals[0], float(v)) for v in vals[1:]])
return corruptions
@torch.no_grad()
def test_c_bar(
model,
dataset_type,
dataset_path,
batch_size,
corruption_string=None,
loader_kwargs={},
logger=None,
calculate_averages=True,
distributed=False,
num_gpus=1
):
assert dataset_type in ['imagenet', 'cifar'],\
"Only ImageNet and CIFAR-10 are supported."
if corruption_string is None:
corruption_filename = 'imagenet_c_bar.csv' if dataset_type=='imagenet'\
else 'cifar10_c_bar.csv'
corruptions = read_corruption_csv(corruption_filename)
else:
corruptions = [(c.split("-")[0], float(c.split("-")[1])) for c in corruption_string.split("--")]
results = {}
for name, severity in corruptions:
if dataset_type=='imagenet':
transform = tv.transforms.Compose([
tv.transforms.Resize(256),
tv.transforms.CenterCrop(224),
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type=dataset_type),
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)
])
path = os.path.join(dataset_path, 'val')
dataset = tv.datasets.ImageFolder(path, transform=transform)
elif dataset_type=='cifar':
transform = tv.transforms.Compose([
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type=dataset_type),
NumpyToTensor(),
tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
])
dataset = tv.datasets.CIFAR10(dataset_path, train=False, download=False, transform=transform)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)\
if distributed and num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
drop_last=False,
**loader_kwargs
)
num_correct = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if distributed and num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(dataset))
corruption_string = "{}-{:.2f}".format(name, severity)
if logger:
logger.info("Top1 Error for {}: {:.2f}".format(corruption_string, err))
results[corruption_string] = err
if calculate_averages:
import numpy as np
unique_corruption_names = list(set([c.split("-")[0] for c in results]))
avg_errs = {"{}-avg".format(u) : np.mean([results[c] for c in results if c.split("-")[0]==u])
for u in unique_corruption_names}
overall_err = np.mean(list(results.values()))
results.update(avg_errs)
results['overall-avg'] = overall_err
if logger:
for k,v in avg_errs.items():
logger.info("Top1 Error for {}: {:.2f}".format(k,v))
logger.info("Average Top1 Error: {}".format(overall_err))
return results
| 4,285 | 36.596491 | 105 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/imagenet_c_bar/make_cifar10_c_bar.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchvision as tv
from transform_finder import build_transform
from utils.converters import PilToNumpy, NumpyToPil
import os
import numpy as np
import torch
parser = argparse.ArgumentParser(description="Make CIFAR-10-C-Bar")
parser.add_argument('--cifar_dir', type=str, required=True,
help='The path to the CIFAR-10 dataset. This path should contain '
'the folder cifar-10-batches-py/')
parser.add_argument('--out_dir', type=str, default='.',
help='The path to where CIFAR-10-C will be saved.')
parser.add_argument('--num_workers', type=int, default=10,
help='The number of workers to build images with.')
parser.add_argument('--batch_size', type=int, default=200,
help='Batch size of torch data loader used to parallelize '
'data processing.')
parser.add_argument('--seed', type=int, default=0,
help='The random seed used to generate corruptions.')
parser.add_argument('--corruption_file', type=str, default='imagenet_c_bar.csv',
help='A file that specifies which corruptions in which severities '
'to produce. Path is relative to the script.')
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = {}
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions[vals[0]] = [float(v) for v in vals[1:]]
return corruptions
def main():
args = parser.parse_args()
dataset_path = args.cifar_dir
out_dir = os.path.join(args.out_dir, 'CIFAR-10-C-Bar')
bs = args.batch_size
if not os.path.exists(out_dir):
os.mkdir(out_dir)
file_dir = os.path.dirname(os.path.realpath(__file__))
corruption_csv = os.path.join(file_dir, 'cifar10_c_bar.csv')
corruptions = read_corruption_csv(corruption_csv)
for name, severities in corruptions.items():
data = np.zeros((len(severities)*10000, 32, 32, 3)).astype(np.uint8)
labels = np.zeros(len(severities)*10000).astype(np.int)
for i, severity in enumerate(severities):
print("Starting {}-{:.2f}...".format(name, severity))
transform = tv.transforms.Compose([
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type='cifar'),
])
dataset = tv.datasets.CIFAR10(dataset_path, train=False, download=False, transform=transform)
loader = torch.utils.data.DataLoader(
dataset,
shuffle=False,
sampler=None,
drop_last=False,
pin_memory=False,
num_workers=args.num_workers,
batch_size=bs
)
for j, (im, label) in enumerate(loader):
if im.size(0)==bs:
data[i*10000+j*bs:i*10000+bs*(j+1),:,:,:] = im.numpy().astype(np.uint8)
labels[i*10000+j*bs:i*10000+bs*(j+1)] = label.numpy()
else:
data[i*10000+j:,:,:,:] = im.numpy().astype(np.uint8)
labels[i*10000+j:] = label.numpy()
out_file = os.path.join(out_dir, name + ".npy")
print("Saving {} to {}.".format(name, out_file))
np.save(out_file, data)
labels_file = os.path.join(out_dir, "labels.npy")
np.save(labels_file, labels)
if __name__=="__main__":
main()
| 3,650 | 37.840426 | 105 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/imagenet_c_bar/make_imagenet_c_bar.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchvision as tv
from transform_finder import build_transform
from utils.converters import PilToNumpy, NumpyToPil
import os
import numpy as np
import torch
from PIL import Image
parser = argparse.ArgumentParser(description="Make CIFAR-10-C-Bar")
parser.add_argument('--imagenet_dir', type=str, required=True,
help='The path to the ImageNet dataset. This path should contain '
'the folder val/')
parser.add_argument('--out_dir', type=str, default='.',
help='The path to where ImageNet-C will be saved.')
parser.add_argument('--num_workers', type=int, default=10,
help='The number of workers to build images with.')
parser.add_argument('--batch_size', type=int, default=100,
help='Batch size of torch data loader used to parallelize '
'data processing.')
parser.add_argument('--corruption_file', type=str, default='imagenet_c_bar.csv',
help='A file that specifies which corruptions in which severities '
'to produce. Path is relative to the script.')
parser.add_argument('--seed', type=int, default=0,
help='The random seed used to generate corruptions.')
class SavingDataset(tv.datasets.ImageFolder):
def __init__(self, root, out_dir, transform=None):
super(SavingDataset, self).__init__(root, transform=transform)
self.out_dir = out_dir
def __getitem__(self, index):
image, label = super(SavingDataset, self).__getitem__(index)
class_name = self.classes[label]
out_dir = os.path.join(self.out_dir, class_name)
try:
os.mkdir(out_dir)
except FileExistsError:
pass
file_name = os.path.basename(self.samples[index][0])
save_path = os.path.join(out_dir, file_name)
Image.fromarray(np.uint8(image)).save(save_path, quality=85, optimize=True)
return image, label
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = {}
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions[vals[0]] = [float(v) for v in vals[1:]]
return corruptions
def main():
args = parser.parse_args()
dataset_path = args.imagenet_dir
corruption_file = args.corruption_file
out_dir = os.path.join(args.out_dir, 'ImageNet-C-Bar')
np.random.seed(args.seed)
bs = args.batch_size
if not os.path.exists(out_dir):
os.mkdir(out_dir)
file_dir = os.path.dirname(os.path.realpath(__file__))
corruption_csv = os.path.join(file_dir, corruption_file)
corruptions = read_corruption_csv(corruption_csv)
for name, severities in corruptions.items():
corruption_dir = os.path.join(out_dir, name)
if not os.path.exists(corruption_dir):
os.mkdir(corruption_dir)
for i, severity in enumerate(severities):
severity_dir = os.path.join(corruption_dir, "{:.2f}".format(severity))
if not os.path.exists(severity_dir):
os.mkdir(severity_dir)
print("Starting {}-{:.2f}...".format(name, severity))
transform = tv.transforms.Compose([
tv.transforms.Resize(256),
tv.transforms.CenterCrop(224),
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type='imagenet'),
])
path = os.path.join(dataset_path, 'val')
dataset = SavingDataset(path, severity_dir, transform=transform)
loader = torch.utils.data.DataLoader(
dataset,
shuffle=False,
sampler=None,
drop_last=False,
pin_memory=False,
num_workers=args.num_workers,
batch_size=bs
)
for j, (im, label) in enumerate(loader):
if (j+1) % 10 == 0:
print("Completed {}/{}".format(j, len(loader)))
if __name__=="__main__":
main()
| 4,256 | 36.342105 | 87 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/imagenet_c_bar/utils/converters.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from PIL import Image
import torch
class PilToNumpy(object):
def __init__(self, as_float=False, scaled_to_one=False):
self.as_float = as_float
self.scaled_to_one = scaled_to_one
assert (not scaled_to_one) or (as_float and scaled_to_one),\
"Must output a float if rescaling to one."
def __call__(self, image):
if not self.as_float:
return np.array(image).astype(np.uint8)
elif notself.scaled_to_one:
return np.array(image).astype(np.float32)
else:
return np.array(image).astype(np.float32) / 255
class NumpyToPil(object):
def __init__(self):
pass
def __call__(self, image):
return Image.fromarray(image)
class NumpyToTensor(object):
def __init__(self, HWC_to_CHW=True, bit_to_float=True):
self.HWC_to_CHW = HWC_to_CHW
self.bit_to_float = bit_to_float
pass
def __call__(self, image):
image = image.astype(np.float32)
if self.bit_to_float:
image /= 255
if self.HWC_to_CHW:
image = image.transpose(2,0,1)
return torch.Tensor(image)
| 1,351 | 29.044444 | 68 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/notebook_utils/training_loop.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def train_model(model, dataset, num_workers, gpu_device):
max_epochs = 100
batch_size = 128
base_lr = 0.1
# Cosine learning rate decay
def get_lr(cur_epoch):
return 0.5 * base_lr * (1.0 + np.cos(np.pi * cur_epoch / max_epochs))
optim = torch.optim.SGD(model.parameters(),
lr=base_lr,
nesterov=True,
momentum=0.9,
weight_decay=0.0005,
dampening=0.0,
)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=128,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True
)
loss_fun = torch.nn.CrossEntropyLoss().cuda(device=gpu_device)
model.train()
epoch_loss = 0
for cur_epoch in range(max_epochs):
#Set learning rate for current epoch
for param_group in optim.param_groups:
param_group['lr'] = get_lr(cur_epoch)
for inputs, labels in dataloader:
inputs = inputs.cuda(device=gpu_device)
labels = labels.cuda(device=gpu_device, non_blocking=True)
preds = model(inputs)
loss = loss_fun(preds, labels)
optim.zero_grad()
loss.backward()
optim.step()
epoch_loss += loss.item()
epoch_loss /= len(dataloader)
print("Completed epoch {}. Average training loss: {}".format(cur_epoch+1, epoch_loss))
model.eval()
| 1,811 | 27.3125 | 95 | py |
augmentation-corruption-fbr_main | augmentation-corruption-fbr_main/notebook_utils/wideresnet.py | # This source code is adapted from code licensed under the MIT license
# found in third_party/wideresnet_license from the root directory of
# this source tree.
"""WideResNet implementation (https://arxiv.org/abs/1605.07146)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class BasicBlock(nn.Module):
"""Basic ResNet block."""
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = drop_rate
self.is_in_equal_out = (in_planes == out_planes)
self.conv_shortcut = (not self.is_in_equal_out) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.is_in_equal_out:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.is_in_equal_out:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.drop_rate > 0:
out = F.dropout(out, p=self.drop_rate, training=self.training)
out = self.conv2(out)
if not self.is_in_equal_out:
return torch.add(self.conv_shortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
"""Layer container for blocks."""
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
drop_rate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, drop_rate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
drop_rate):
layers = []
for i in range(nb_layers):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, drop_rate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
"""WideResNet class."""
def __init__(self, depth, num_classes, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
self.depth = depth
self.widen_factor = widen_factor
n_channels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(
3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1,
drop_rate)
# 2nd block
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2,
drop_rate)
# 3rd block
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2,
drop_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(n_channels[3], num_classes)
self.n_channels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.n_channels)
self.features = out #Expose penultimate layer for access as features
return self.fc(out)
# Stage depths for ImageNet models
_IN_STAGE_DS = {
18: (2, 2, 2, 2),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
| 4,479 | 30.111111 | 79 | py |
jupyter-book | jupyter-book-master/tests/test_config.py | # from pathlib import Path
import jsonschema
import pytest
import sphinx as sphinx_build
from jupyter_book.cli.main import sphinx
from jupyter_book.config import get_final_config, validate_yaml
pytest_plugins = "pytester"
SPHINX_VERSION = f".sphinx{sphinx_build.version_info[0]}"
@pytest.mark.parametrize(
"user_config",
[
{},
{"title": "hallo"},
{"html": {"extra_footer": ""}},
{"execute": {"execute_notebooks": "cache"}},
{
"parse": {
"myst_enable_extensions": ["linkify"],
"myst_dmath_double_inline": True,
}
},
{"latex": {"latex_documents": {"targetname": "book.tex", "title": "other"}}},
{"launch_buttons": {"binderhub_url": "other"}},
{"repository": {"url": "other"}},
{"exclude_patterns": ["new"]},
{
"sphinx": {
"extra_extensions": ["other"],
"local_extensions": {"helloworld": "./ext"},
"config": {
"html_theme_options": {
"launch_buttons": {"binderhub_url": "other"},
"analytics": {"google_analytics_id": ""},
},
"html_theme": "other",
"new": "value",
},
}
},
{
"sphinx": {
"extra_extensions": ["other"],
"local_extensions": {"helloworld": "./ext"},
"recursive_update": True,
"config": {
"html_theme_options": {
"launch_buttons": {"binderhub_url": "other"}
},
"html_theme": "other",
"new": "value",
},
}
},
],
ids=[
"empty",
"title",
"html.extra_footer",
"execute.method",
"extended_syntax",
"latex_doc",
"launch_buttons",
"repository",
"exclude_patterns",
"sphinx-default",
"sphinx-recurse",
],
)
def test_get_final_config(user_config, data_regression):
cli_config = {"latex_individualpages": False}
final_config, metadata = get_final_config(
user_yaml=user_config,
cli_config=cli_config,
validate=True,
raise_on_invalid=True,
)
data_regression.check(
{"_user_config": user_config, "final": final_config, "metadata": metadata}
)
def test_validate_yaml():
with pytest.raises(jsonschema.ValidationError):
validate_yaml({"title": 1}, raise_on_errors=True)
assert "Warning" in validate_yaml({"title": 1}, raise_on_errors=False)
assert validate_yaml({"title": ""}, raise_on_errors=False) is None
def test_config_sphinx_command_only_build_toc_files(
cli, temp_with_override, file_regression
):
temp_with_override.joinpath("_config.yml").write_text(
"only_build_toc_files: True\n", encoding="utf8"
)
temp_with_override.joinpath("_config.yml").write_text(
"exclude_patterns: [test_config/*]\n", encoding="utf8"
)
temp_with_override.joinpath("_toc.yml").write_text("root: intro\n", encoding="utf8")
cli.invoke(sphinx, temp_with_override.as_posix())
assert temp_with_override.joinpath("conf.py").exists()
output = temp_with_override.joinpath("conf.py").read_text(encoding="utf8")
file_regression.check(output, encoding="utf8")
def test_config_sphinx_command(cli, temp_with_override, file_regression):
temp_with_override.joinpath("_config.yml").write_text(
"title: test\n", encoding="utf8"
)
temp_with_override.joinpath("_toc.yml").write_text("root: intro\n", encoding="utf8")
cli.invoke(sphinx, temp_with_override.as_posix())
assert temp_with_override.joinpath("conf.py").exists()
output = temp_with_override.joinpath("conf.py").read_text(encoding="utf8")
file_regression.check(output, encoding="utf8")
# TODO sphinx-external-toc now handles appending to exclude_patterns
# but we may want to add similar tests there, checking the output of exclude_patterns
# @pytest.mark.parametrize(
# "toc_file, filename",
# [("p.md", "p.md"), ("p", "p.md"), ("[]p", "[]p.md"), ("[t]p.md", "[t]p.md")],
# )
# def test_only_build_toc_files(testdir, toc_file, filename):
# cli_config = {"latex_individualpages": False}
# toc = Path("toc.yml")
# toc.write_text(f"- file: '{toc_file}'\n")
# Path(filename).write_text("")
# Path("exclude.md").write_text("")
# user_config = {"only_build_toc_files": True}
# final_config, metadata = get_final_config(
# user_yaml=user_config,
# cli_config=cli_config,
# validate=True,
# raise_on_invalid=True,
# )
# assert "exclude.md" in final_config["exclude_patterns"]
# assert filename not in final_config["exclude_patterns"]
# def test_only_build_toc_files_with_exclude_patterns(testdir):
# cli_config = {"latex_individualpages": False}
# toc = Path("toc.yml")
# toc.write_text("- file: landing\n")
# Path("landing.md").write_text("")
# Path("exclude.md").write_text("")
# user_config = {
# "only_build_toc_files": True,
# "exclude_patterns": ["my/*", "patterns"],
# }
# final_config, metadata = get_final_config(
# user_yaml=user_config,
# cli_config=cli_config,
# validate=True,
# raise_on_invalid=True,
# )
# assert "exclude.md" in final_config["exclude_patterns"]
# assert "my/*" in final_config["exclude_patterns"]
# assert "patterns" in final_config["exclude_patterns"]
# assert "landing.md" not in final_config["exclude_patterns"]
# def test_only_build_toc_files_non_default_source_dir(testdir):
# cli_config = {"latex_individualpages": False}
# toc = Path("toc.yml")
# toc.write_text("- file: landing\n")
# sourcedir = Path("s")
# subdir = sourcedir / "subdir"
# subdir.mkdir(parents=True)
# Path(sourcedir / "landing.md").write_text("")
# Path(sourcedir / "exclude.md").write_text("")
# Path(subdir / "sub.md").write_text("")
# user_config = {"only_build_toc_files": True}
# final_config, metadata = get_final_config(
# user_yaml=user_config,
# cli_config=cli_config,
# validate=True,
# raise_on_invalid=True,
# sourcedir=sourcedir,
# )
# assert "exclude.md" in final_config["exclude_patterns"]
# assert "subdir/sub.md" in final_config["exclude_patterns"]
# assert "landing.md" not in final_config["exclude_patterns"]
# def test_only_build_toc_files_missing_toc(testdir):
# cli_config = {"latex_individualpages": False}
# user_config = {"only_build_toc_files": True}
# with pytest.raises(ValueError, match=r".*you must have a toc.*"):
# get_final_config(
# user_yaml=user_config,
# cli_config=cli_config,
# validate=True,
# raise_on_invalid=True,
# )
def test_get_final_config_custom_myst_extensions(data_regression):
cli_config = {"latex_individualpages": False}
user_config = {"parse": {"myst_extra_extensions": ["linkify"]}}
final_config, metadata = get_final_config(
user_yaml=user_config,
cli_config=cli_config,
validate=True,
raise_on_invalid=True,
)
data_regression.check(
{"_user_config": user_config, "final": final_config, "metadata": metadata}
)
def test_get_final_config_bibtex(data_regression):
cli_config = {"latex_individualpages": False}
user_config = {"bibtex_bibfiles": ["tmp.bib"]}
final_config, metadata = get_final_config(
user_yaml=user_config,
cli_config=cli_config,
validate=True,
raise_on_invalid=True,
)
assert "sphinxcontrib.bibtex" in final_config["extensions"]
def test_mathjax_config_warning(data_regression):
mathjax_config = {
"sphinx": {
"config": {
"mathjax_config": {"TeX": {"Macros": {"argmax": "arg\\,max"}}},
}
}
}
cli_config = {"latex_individualpages": False}
user_config = mathjax_config
final_config, metadata = get_final_config(
user_yaml=user_config,
cli_config=cli_config,
validate=True,
raise_on_invalid=True,
)
data_regression.check(
{"_user_config": user_config, "final": final_config, "metadata": metadata},
basename=f"test_mathjax_config_warning{SPHINX_VERSION}",
)
def test_mathjax_config_warning_mathjax2path(data_regression):
mathjax_config = {
"sphinx": {
"config": {
"mathjax_config": {"TeX": {"Macros": {"argmax": "arg\\,max"}}},
"mathjax_path": "https://cdn.jsdelivr.net/npm/mathjax@2/MathJax.js?config=TeX-AMS-MML_CHTML", # noqa: E501
}
}
}
cli_config = {"latex_individualpages": False}
user_config = mathjax_config
final_config, metadata = get_final_config(
user_yaml=user_config,
cli_config=cli_config,
validate=True,
raise_on_invalid=True,
)
data_regression.check(
{"_user_config": user_config, "final": final_config, "metadata": metadata},
basename=f"test_mathjax_config_warning_mathjax2path{SPHINX_VERSION}",
)
| 9,373 | 32.359431 | 123 | py |
jupyter-book | jupyter-book-master/jupyter_book/config.py | """A small sphinx extension to let you configure a site with YAML metadata."""
import json
import sys
from functools import lru_cache
from pathlib import Path
from typing import Optional, Union
import docutils
import jsonschema
import sphinx
import yaml
from sphinx.util import logging
from .utils import _message_box
logger = logging.getLogger(__name__)
# Transform a "Jupyter Book" YAML configuration file into a Sphinx configuration file.
# This is so that we can choose more user-friendly words for things than Sphinx uses.
# e.g., 'logo' instead of 'html_logo'.
# Note that this should only be used for **top level** keys.
PATH_YAML_DEFAULT = Path(__file__).parent.joinpath("default_config.yml")
PATH_JSON_SCHEMA = Path(__file__).parent.joinpath("config_schema.json")
def get_default_sphinx_config():
"""Some configuration values that are really sphinx-specific."""
return dict(
extensions=[
"sphinx_togglebutton",
"sphinx_copybutton",
"myst_nb",
"jupyter_book",
"sphinx_thebe",
"sphinx_comments",
"sphinx_external_toc",
"sphinx.ext.intersphinx",
"sphinx_design",
"sphinx_book_theme",
],
pygments_style="sphinx",
html_theme="sphinx_book_theme",
html_theme_options={"search_bar_text": "Search this book..."},
html_sourcelink_suffix="",
numfig=True,
recursive_update=False,
suppress_warnings=["myst.domains"],
)
@lru_cache(1)
def get_validator():
schema = json.loads(PATH_JSON_SCHEMA.read_text("utf8"))
validator_cls = jsonschema.validators.validator_for(schema)
validator_cls.check_schema(schema)
return validator_cls(schema=schema)
def validate_yaml(yaml: dict, raise_on_errors=False, print_func=print):
"""Validate the YAML configuration against a JSON schema."""
errors = sorted(get_validator().iter_errors(yaml), key=lambda e: e.path)
error_msg = "\n".join(
[
"- {} [key path: '{}']".format(
error.message, "/".join([str(p) for p in error.path])
)
for error in errors
]
)
if not errors:
return
if raise_on_errors:
raise jsonschema.ValidationError(error_msg)
return _message_box(
f"Warning: Validation errors in config:\n{error_msg}",
color="orange",
print_func=print_func,
)
def get_final_config(
*,
user_yaml: Optional[Union[dict, Path]] = None,
cli_config: Optional[dict] = None,
sourcedir: Optional[Path] = None,
validate: bool = True,
raise_on_invalid: bool = False,
use_external_toc: bool = True,
):
"""Create the final configuration dictionary, to parser to sphinx
:param user_config_path: A path to a YAML file written by the user
:param cli_config: Configuration coming directly from the CLI
:param sourcedir: path to source directory.
If it contains a `_static` folder, we ad that to the final `html_static_path`
:param validate: Validate user yaml against the data schema
:param raise_on_invalid: Raise a ValidationError, or only log a warning
Order of precedence is:
1. CLI Sphinx Configuration
2. User JB(YAML) Configuration
3. Default JB (YAML) Configuration
4. Default Sphinx Configuration
"""
# get the default sphinx configuration
sphinx_config = get_default_sphinx_config()
# get the default yaml configuration
yaml_config, default_yaml_update, add_paths = yaml_to_sphinx(
yaml.safe_load(PATH_YAML_DEFAULT.read_text(encoding="utf8"))
)
yaml_config.update(default_yaml_update)
# if available, get the user defined configuration
user_yaml_recurse, user_yaml_update = {}, {}
user_yaml_path = None
if user_yaml:
if isinstance(user_yaml, Path):
user_yaml_path = user_yaml
user_yaml = yaml.safe_load(user_yaml.read_text(encoding="utf8"))
else:
user_yaml = user_yaml
if validate:
validate_yaml(user_yaml, raise_on_errors=raise_on_invalid)
user_yaml_recurse, user_yaml_update, add_paths = yaml_to_sphinx(user_yaml)
# add paths from yaml config
if user_yaml_path:
for path in add_paths:
path = (user_yaml_path.parent / path).resolve()
sys.path.append(path.as_posix())
# first merge the user yaml into the default yaml
_recursive_update(yaml_config, user_yaml_recurse)
# then merge this into the default sphinx config
_recursive_update(sphinx_config, yaml_config)
# TODO: deprecate this in version 0.14
# Check user specified mathjax_config for sphinx >= 4
# https://github.com/executablebooks/jupyter-book/issues/1502
if sphinx.version_info[0] >= 4 and "mathjax_config" in user_yaml_update:
# Switch off warning if user has specified mathjax v2
if (
"mathjax_path" in user_yaml_update
and "@2" in user_yaml_update["mathjax_path"]
):
# use mathjax2_config so not to tigger deprecation warning in future
user_yaml_update["mathjax2_config"] = user_yaml_update.pop("mathjax_config")
else:
_message_box(
(
f"[Warning] Mathjax configuration has changed for sphinx>=4.0 [Using sphinx: {sphinx.__version__}]\n" # noqa: E501
"Your _config.yml needs to be updated:\n" # noqa: E501
"mathjax_config -> mathjax3_config\n" # noqa: E501
"To continue using `mathjax v2` you will need to use the `mathjax_path` configuration\n" # noqa: E501
"\n"
"See Sphinx Documentation:\n"
"https://www.sphinx-doc.org/en/master/usage/extensions/math.html#module-sphinx.ext.mathjax" # noqa: E501
),
color="orange",
print_func=print,
)
# Automatically make the configuration name substitution so older projects build
user_yaml_update["mathjax3_config"] = user_yaml_update.pop("mathjax_config")
# Recursively update sphinx config if option is specified,
# otherwise forcefully override options non-recursively
if sphinx_config.pop("recursive_update") is True:
_recursive_update(sphinx_config, user_yaml_update)
else:
sphinx_config.update(user_yaml_update)
# This is to deal with a special case, where the override needs to be applied after
# the sphinx app is initialised (since the default is a function)
# TODO I'm not sure if there is a better way to deal with this?
config_meta = {
"latex_doc_overrides": sphinx_config.pop("latex_doc_overrides"),
"latex_individualpages": cli_config.pop("latex_individualpages"),
}
if sphinx_config.get("use_jupyterbook_latex"):
sphinx_config["extensions"].append("sphinx_jupyterbook_latex")
# finally merge in CLI configuration
_recursive_update(sphinx_config, cli_config or {})
# Initialize static files
if sourcedir and Path(sourcedir).joinpath("_static").is_dir():
# Add the `_static` folder to html_static_path, only if it exists
paths_static = sphinx_config.get("html_static_path", [])
paths_static.append("_static")
sphinx_config["html_static_path"] = paths_static
# Search the static files paths and initialize any CSS or JS files.
for path in paths_static:
path = Path(sourcedir).joinpath(path)
for path_css in path.rglob("*.css"):
css_files = sphinx_config.get("html_css_files", [])
css_files.append((path_css.relative_to(path)).as_posix())
sphinx_config["html_css_files"] = css_files
for path_js in path.rglob("*.js"):
js_files = sphinx_config.get("html_js_files", [])
js_files.append((path_js.relative_to(path)).as_posix())
sphinx_config["html_js_files"] = js_files
if not use_external_toc:
# TODO perhaps a better logic for this?
# remove all configuration related to sphinx_external_toc
try:
idx = sphinx_config["extensions"].index("sphinx_external_toc")
except ValueError:
pass
else:
sphinx_config["extensions"].pop(idx)
sphinx_config.pop("external_toc_path", None)
sphinx_config.pop("external_toc_exclude_missing", None)
return sphinx_config, config_meta
def yaml_to_sphinx(yaml: dict):
"""Convert a Jupyter Book style config structure into a Sphinx config dict.
:returns: (recursive_updates, override_updates, add_paths)
add_paths collects paths that are specified in the _config.yml (such as those
provided in local_extensions) and returns them for adding to sys.path in
a context where the _config.yml path is known
"""
sphinx_config = {}
# top-level, string type
YAML_TRANSLATIONS = {
"title": "html_title",
"author": "author",
"copyright": "copyright",
"logo": "html_logo",
"project": "project",
}
for key, newkey in YAML_TRANSLATIONS.items():
if key in yaml:
val = yaml.get(key)
if val is None:
val = ""
sphinx_config[newkey] = val
# exclude patterns
if "exclude_patterns" in yaml:
# we always include these excludes, so as not to break back-compatibility
defaults = {"_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"}
defaults.update(yaml["exclude_patterns"])
sphinx_config["exclude_patterns"] = list(sorted(defaults))
if "only_build_toc_files" in yaml:
sphinx_config["external_toc_exclude_missing"] = yaml["only_build_toc_files"]
# Theme
sphinx_config["html_theme_options"] = theme_options = {}
if "launch_buttons" in yaml:
theme_options["launch_buttons"] = yaml["launch_buttons"]
repository_config = yaml.get("repository", {})
for spx_key, yml_key in [
("path_to_docs", "path_to_book"),
("repository_url", "url"),
("repository_branch", "branch"),
]:
if yml_key in repository_config:
theme_options[spx_key] = repository_config[yml_key]
# HTML
html = yaml.get("html")
if html:
for spx_key, yml_key in [
("html_favicon", "favicon"),
("html_baseurl", "baseurl"),
("comments_config", "comments"),
("use_multitoc_numbering", "use_multitoc_numbering"),
]:
if yml_key in html:
sphinx_config[spx_key] = html[yml_key]
for spx_key, yml_key in [
("navbar_footer_text", "navbar_footer_text"),
# Deprecate navbar_footer_text after a release cycle
("extra_footer", "extra_footer"),
("home_page_in_toc", "home_page_in_navbar"),
("announcement", "announcement"),
]:
if yml_key in html:
theme_options[spx_key] = html[yml_key]
for spx_key, yml_key in [("google_analytics_id", "google_analytics_id")]:
if yml_key in html:
theme_options["analytics"] = {}
theme_options["analytics"][spx_key] = html[yml_key]
# Pass through the buttons
btns = ["use_repository_button", "use_edit_page_button", "use_issues_button"]
use_buttons = {btn: html.get(btn) for btn in btns if btn in html}
if any(use_buttons.values()):
if not repository_config.get("url"):
raise ValueError(
"To use 'repository' buttons, you must specify the repository URL"
)
# Update our config
theme_options.update(use_buttons)
# Parse and Rendering
parse = yaml.get("parse")
if parse:
# Enable extra extensions
extensions = sphinx_config.get("myst_enable_extensions", [])
# TODO: deprecate this in v0.11.0
if parse.get("myst_extended_syntax") is True:
extensions.append(
[
"colon_fence",
"dollarmath",
"amsmath",
"deflist",
"html_image",
]
)
_message_box(
(
"myst_extended_syntax is deprecated, instead specify extensions "
"you wish to be enabled. See https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html" # noqa: E501
),
color="orange",
print_func=print,
)
for ext in parse.get("myst_enable_extensions", []):
if ext not in extensions:
extensions.append(ext)
if extensions:
sphinx_config["myst_enable_extensions"] = extensions
# Configuration values we'll just pass-through
for ikey in ["myst_substitutions", "myst_url_schemes"]:
if ikey in parse:
sphinx_config[ikey] = parse.get(ikey)
# Execution
execute = yaml.get("execute")
if execute:
for spx_key, yml_key in [
("nb_execution_allow_errors", "allow_errors"),
("nb_execution_in_temp", "run_in_temp"),
("nb_output_stderr", "stderr_output"),
("nb_execution_timeout", "timeout"),
("nb_execution_cache_path", "cache"),
("nb_execution_mode", "execute_notebooks"),
("nb_execution_excludepatterns", "exclude_patterns"),
]:
if yml_key in execute:
sphinx_config[spx_key] = execute[yml_key]
if sphinx_config.get("nb_execution_mode") is False:
# Special case because YAML treats `off` as "False".
sphinx_config["nb_execution_mode"] = "off"
# LaTeX
latex = yaml.get("latex")
if latex:
for spx_key, yml_key in [
("latex_engine", "latex_engine"),
("use_jupyterbook_latex", "use_jupyterbook_latex"),
]:
if yml_key in latex:
sphinx_config[spx_key] = latex[yml_key]
sphinx_config["latex_doc_overrides"] = {}
if "title" in yaml:
sphinx_config["latex_doc_overrides"]["title"] = yaml["title"]
for key, val in yaml.get("latex", {}).get("latex_documents", {}).items():
sphinx_config["latex_doc_overrides"][key] = val
# Sphinx Configuration
extra_extensions = yaml.get("sphinx", {}).get("extra_extensions")
if extra_extensions:
sphinx_config["extensions"] = get_default_sphinx_config()["extensions"]
if not isinstance(extra_extensions, list):
extra_extensions = [extra_extensions]
for extension in extra_extensions:
if extension not in sphinx_config["extensions"]:
sphinx_config["extensions"].append(extension)
local_extensions = yaml.get("sphinx", {}).get("local_extensions")
# add_paths collects additional paths for sys.path
add_paths = []
if local_extensions:
if "extensions" not in sphinx_config:
sphinx_config["extensions"] = get_default_sphinx_config()["extensions"]
for extension, path in local_extensions.items():
if extension not in sphinx_config["extensions"]:
sphinx_config["extensions"].append(extension)
if path not in sys.path:
add_paths.append(path)
# Overwrite sphinx config or not
if "recursive_update" in yaml.get("sphinx", {}):
sphinx_config["recursive_update"] = yaml.get("sphinx", {}).get(
"recursive_update"
)
# Citations
sphinxcontrib_bibtex_configs = ["bibtex_bibfiles", "bibtex_reference_style"]
if any(bibtex_config in yaml for bibtex_config in sphinxcontrib_bibtex_configs):
# Load sphincontrib-bibtex
if "extensions" not in sphinx_config:
sphinx_config["extensions"] = get_default_sphinx_config()["extensions"]
sphinx_config["extensions"].append("sphinxcontrib.bibtex")
# Report Bug in Specific Docutils Versions
# TODO: Remove when docutils>=0.20 is pinned in jupyter-book
# https://github.com/mcmtroffaes/sphinxcontrib-bibtex/issues/322
if (0, 18) <= docutils.__version_info__ < (0, 20):
logger.warn(
"[sphinxcontrib-bibtex] Beware that docutils versions 0.18 and 0.19 "
"(you are running {}) are known to generate invalid html for citations. "
"If this issue affects you, please use docutils<0.18 (or >=0.20 once released) "
"instead. "
"For more details, see https://sourceforge.net/p/docutils/patches/195/".format(
docutils.__version__
)
)
# Pass through configuration
if yaml.get("bibtex_bibfiles"):
if isinstance(yaml.get("bibtex_bibfiles"), str):
yaml["bibtex_bibfiles"] = [yaml["bibtex_bibfiles"]]
sphinx_config["bibtex_bibfiles"] = yaml["bibtex_bibfiles"]
# items in sphinx.config will override defaults,
# rather than recursively updating them
return sphinx_config, yaml.get("sphinx", {}).get("config") or {}, add_paths
def _recursive_update(config, update, list_extend=False):
"""Update the dict `config` with `update` recursively.
This *updates* nested dicts / lists instead of replacing them.
"""
for key, val in update.items():
if isinstance(config.get(key), dict):
# if a dict value update is set to None,
# then the entire dict will be "wiped",
# otherwise it is recursively updated.
if isinstance(val, dict):
_recursive_update(config[key], val, list_extend)
else:
config[key] = val
elif isinstance(config.get(key), list):
if isinstance(val, list) and list_extend:
config[key].extend(val)
else:
config[key] = val
else:
config[key] = val
| 18,195 | 38.04721 | 135 | py |
jupyter-book | jupyter-book-master/jupyter_book/pdf.py | """Commands to facilitate conversion to PDF."""
import asyncio
import os
from copy import copy
from pathlib import Path
from .utils import _error, _message_box
# LaTeX Documents Tuple Spec
LATEX_DOCUMENTS = (
"startdocname",
"targetname",
"title",
"author",
"theme",
"toctree_only",
)
def html_to_pdf(html_file, pdf_file):
"""
Convert arbitrary HTML file to PDF using pyppeteer.
Parameters
----------
html_file : str
A path to an HTML file to convert to PDF
pdf_file : str
A path to an output PDF file that will be created
"""
asyncio.get_event_loop().run_until_complete(_html_to_pdf(html_file, pdf_file))
async def _html_to_pdf(html_file, pdf_file):
try:
from pyppeteer import launch
except ImportError:
_error(
"Generating PDF from book HTML requires the pyppeteer package. "
"Install it first.",
ImportError,
)
browser = await launch(args=["--no-sandbox"])
page = await browser.newPage()
# Absolute path is needed
html_file = Path(html_file).resolve()
# Waiting for networkidle0 seems to let mathjax render
await page.goto(f"file:///{html_file}", {"waitUntil": ["networkidle2"]})
# Give it *some* margins to make it look a little prettier
# I just made these up
page_margins = {"left": "0in", "right": "0in", "top": ".5in", "bottom": ".5in"}
await page.addStyleTag(
{
"content": """
div.cell_input {
-webkit-column-break-inside: avoid;
page-break-inside: avoid;
break-inside: avoid;
}
div.cell_output {
-webkit-column-break-inside: avoid;
page-break-inside: avoid;
break-inside: avoid;
}
"""
}
)
await page.pdf({"path": pdf_file, "margin": page_margins})
await browser.close()
def update_latex_documents(latex_documents, latexoverrides):
"""
Apply latexoverrides from _config.yml to latex_documents tuple
"""
if len(latex_documents) > 1:
_message_box(
"Latex documents specified as a multi element list in the _config",
"This suggests the user has made custom settings to their build",
"[Skipping] processing of automatic latex overrides",
)
return latex_documents
# Extract latex document tuple
latex_document = latex_documents[0]
# Apply single overrides from _config.yml
updated_latexdocs = []
for loc, item in enumerate(LATEX_DOCUMENTS):
# the last element toctree_only seems optionally included
if loc >= len(latex_document):
break
if item in latexoverrides.keys():
updated_latexdocs.append(latexoverrides[item])
else:
updated_latexdocs.append(latex_document[loc])
return [tuple(updated_latexdocs)]
def latex_document_components(latex_documents):
"""Return a dictionary of latex_document components by name"""
latex_tuple_components = {}
for idx, item in enumerate(LATEX_DOCUMENTS):
# skip if latex_documents doesn't doesn't contain all elements
# of the LATEX_DOCUMENT specification tuple
if idx >= len(latex_documents):
continue
latex_tuple_components[item] = latex_documents[idx]
return latex_tuple_components
def latex_document_tuple(components):
"""Return a tuple for latex_documents from named components dictionary"""
latex_doc = []
for item in LATEX_DOCUMENTS:
if item not in components.keys():
continue
else:
latex_doc.append(components[item])
return tuple(latex_doc)
def autobuild_singlepage_latexdocs(app):
"""
Build list of tuples for each document in the Project
[((startdocname, targetname, title, author, theme, toctree_only))]
https://www.sphinx-doc.org/en/3.x/usage/configuration.html#confval-latex_documents
"""
latex_documents = app.config.latex_documents
if len(latex_documents) > 1:
_message_box(
"Latex documents specified as a multi element list in the _config",
"This suggests the user has made custom settings to their build",
"[Skipping] --individualpages option",
)
return latex_documents
# Extract latex_documents updated tuple
latex_documents = latex_documents[0]
titles = app.env.titles
master_doc = app.config.master_doc
sourcedir = os.path.dirname(master_doc)
# Construct Tuples
DEFAULT_VALUES = latex_document_components(latex_documents)
latex_documents = []
for doc, title in titles.items():
latex_doc = copy(DEFAULT_VALUES)
# if doc has a subdir relative to src dir
docname = None
parts = Path(doc).parts
latex_doc["startdocname"] = doc
if DEFAULT_VALUES["startdocname"] == doc:
targetdoc = DEFAULT_VALUES["targetname"]
else:
if sourcedir in parts:
parts = list(parts)
# assuming we need to remove only the first instance
parts.remove(sourcedir)
docname = "-".join(parts)
targetdoc = docname + ".tex"
latex_doc["targetname"] = targetdoc
latex_doc["title"] = title.astext()
latex_doc = latex_document_tuple(latex_doc)
latex_documents.append(latex_doc)
return latex_documents
| 5,554 | 30.5625 | 86 | py |
ec-darkpattern | ec-darkpattern-master/darkpattern-auto-detection-deeplearning/trainer/trainer.py | from typing import Any, List, Tuple
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
class Trainer:
def __init__(
self,
net: nn.Module,
optimizer: optim.Optimizer,
critetion: nn.Module,
lr_scheduler: Any,
device: torch.device,
) -> None:
self.optimizer = optimizer
self.critetion = critetion
self.lr_scheduler = lr_scheduler
self.device = device
self.net = net.to(self.device)
def loss_fn(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return self.critetion(output, target)
def train_step(
self, input_train: torch.Tensor, target: torch.Tensor # TODO:inputを文字列に変更
) -> Tuple[torch.Tensor, torch.Tensor]: # [loss, output]
self.net.train()
output = self.net(input_train).logits # [batch_size,label_size]
loss = self.loss_fn(output, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss, output
def val_step(
self, input_val: torch.Tensor, target: torch.Tensor # TODO:inputを文字列に変更
) -> Tuple[torch.Tensor, torch.Tensor]: # [loss, output]
self.net.eval()
loss, output = None, None
with torch.no_grad():
output = self.net(input_val).logits # [batch_size,label_size]
loss = self.loss_fn(output, target)
return loss, output
def train(self, train_loader: DataLoader) -> List[float]:
train_losses: List[float] = []
for i, (input_train, target) in enumerate(train_loader):
input_train = input_train.to(self.device)
target = target.to(self.device)
loss, output = self.train_step(input_train, target)
del input_train
del target
torch.cuda.empty_cache()
print(f"Train step: {i + 1}/{len(train_loader)} loss: {loss.item()}")
train_losses.append(loss.item())
self.lr_scheduler.step()
return train_losses
def validate(self, val_loader: DataLoader) -> List[float]:
val_losses: List[float] = []
for i, (input_val, target) in enumerate(val_loader):
input_val = input_val.to(self.device)
target = target.to(self.device)
with torch.no_grad():
loss, output = self.val_step(input_val, target)
del input_val
del target
torch.cuda.empty_cache()
print(f"Val step: {i + 1}/{len(val_loader)} loss: {loss.item()}")
val_losses.append(loss.item())
return val_losses
def test(
self, test_loader: DataLoader
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
test_preds: torch.Tensor = torch.Tensor([])
test_tgts: torch.Tensor = torch.Tensor([])
outputs: torch.Tensor = torch.Tensor([])
for i, (input_test, target) in enumerate(test_loader):
input_test = input_test.to(self.device)
target = target.to(self.device)
_, output = self.val_step(input_test, target)
output = output.to("cpu")
target = target.to("cpu")
pred = output.argmax(dim=-1)
outputs = torch.cat((outputs, output), dim=0)
test_preds = torch.cat((test_preds, pred), dim=0)
test_tgts = torch.cat((test_tgts, target), dim=0)
return outputs, test_tgts, test_preds
def save(self, path: str) -> None:
torch.save(self.net.state_dict(), path)
| 3,588 | 30.482456 | 82 | py |
ec-darkpattern | ec-darkpattern-master/darkpattern-auto-detection-deeplearning/models/nn/bert.py | import torch
from torch import nn
from transformers import BertModel
class DarkpatternClassifierBert(nn.Module):
def __init__(
self,
pretrained: str = "bert-base-uncased",
dropout_rate: float = 0.1,
output_layer: nn.Linear = nn.Linear(in_features=768, out_features=2),
):
super(DarkpatternClassifierBert, self).__init__()
self.__pretrained = pretrained
self.bert = BertModel.from_pretrained(self.__pretrained)
self.dropout = nn.Dropout(dropout_rate)
self.output_layer = output_layer
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.bert(x)[0] # [batch_size,seq_len,768]
x = x[:, 0, :] # [batch_size,1,768]
x = self.dropout(x)
x = self.output_layer(x)
return x
| 801 | 31.08 | 77 | py |
ec-darkpattern | ec-darkpattern-master/darkpattern-auto-detection-deeplearning/experiments/train.py | from os.path import join
from typing import List
import hydra
import numpy as np
import pandas as pd
import torch
from const.path import CONFIG_PATH, DATASET_TSV_PATH, NN_MODEL_PICKLES_PATH
from omegaconf import DictConfig
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from torch import nn
from torch.nn import functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LinearLR
from torch.utils.data import DataLoader, Subset
from trainer.trainer import Trainer
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from utils.dataset import DarkpatternDataset
from utils.random_seed import set_random_seed
from utils.text import text_to_tensor as _text_to_tensor
def cross_validation(
n_fold: int,
pretrained: str,
batch_size: int,
lr: float,
start_factor: float,
max_length: int,
dropout: float,
epochs: int,
save_model: bool,
device: torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu"),
num_labels: int = 2,
) -> None:
"""
Load & Define dataset.
"""
df = pd.read_csv(DATASET_TSV_PATH, sep="\t", encoding="utf-8")
texts = df.text.tolist()
labels = df.label.tolist()
tokenizer = AutoTokenizer.from_pretrained(pretrained)
def text_to_tensor(text: str) -> torch.Tensor:
return _text_to_tensor(text, tokenizer, max_length)
ds = DarkpatternDataset(texts, labels, text_to_tensor)
"""
Execute N (= n_fold) fold cross validation.
"""
skf = StratifiedKFold(n_splits=n_fold)
accuracy_scores: List[float] = []
f1_scores: List[float] = []
precision_scores: List[float] = []
recall_scores: List[float] = []
roc_auc_scores: List[float] = []
for fold, (train_idx, test_idx) in enumerate(skf.split(texts, labels)):
"""
Define train & test dataset.
"""
train_ds = Subset(ds, train_idx)
test_ds = Subset(ds, test_idx)
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=True)
"""
Initialize model, optimizer, loss function, lr_scheduler.
"""
net = AutoModelForSequenceClassification.from_pretrained(
pretrained, num_labels=num_labels
).to(device)
critation = nn.CrossEntropyLoss()
optimizer = AdamW(net.parameters(), lr=lr)
lr_scheduler = LinearLR(
optimizer, start_factor=start_factor, total_iters=epochs
)
"""
Train.
"""
trainer = Trainer(net, optimizer, critation, lr_scheduler, device)
for epoch in range(epochs):
try:
trainer.train(train_loader)
except Exception as e:
print(e)
"""
Evaluation.
"""
outputs, tgt, pred = trainer.test(test_loader)
accuracy_score = metrics.accuracy_score(tgt.numpy(), pred.numpy())
f1_score = metrics.f1_score(tgt.numpy(), pred.numpy())
precision_score = metrics.precision_score(tgt.numpy(), pred.numpy())
recall_score = metrics.recall_score(tgt.numpy(), pred.numpy())
prob = F.softmax(outputs, dim=1)[:, 1] # outputs: [batch_size, num_labels]
roc_auc = metrics.roc_auc_score(tgt.numpy(), prob.numpy())
accuracy_scores.append(accuracy_score)
f1_scores.append(f1_score)
precision_scores.append(precision_score)
recall_scores.append(recall_score)
roc_auc_scores.append(roc_auc)
"""
Save model.
"""
if save_model:
model_path = join(NN_MODEL_PICKLES_PATH, f"{pretrained}_{fold}.pth")
torch.save(net.state_dict(), model_path)
"""
Display evaluation result on console.
"""
roc_auc_score_average = np.mean(roc_auc_scores)
f1_score_average = np.mean(f1_scores)
accuracy_score_average = np.mean(accuracy_scores)
precision_score_average = np.mean(precision_scores)
recall_score_average = np.mean(recall_scores)
roc_auc_score_average = np.mean(roc_auc_scores)
print(
{
"accuracy_scores": accuracy_scores,
"f1_scores": f1_scores,
"precision_scores": precision_scores,
"recall_scores": recall_scores,
"roc_auc_scores": roc_auc_scores,
"f1_score_average": f1_score_average,
"accuracy_score_average": accuracy_score_average,
"precision_score_average": precision_score_average,
"recall_score_average": recall_score_average,
"roc_auc_score_average": roc_auc_score_average,
}
)
parameters_and_evaluation_text = f"""
```parameters:
pretrained: {pretrained}
batch_size: {batch_size}
lr: {lr}
max_length: {max_length}
dropout: {dropout}
epochs: {epochs}
device: {device}
num_labels: {num_labels}
metrics for test:
f1_score_average:{f1_score_average}
accuracy_score_average:{accuracy_score_average}
precision_score_average:{precision_score_average}
recall_score_average:{recall_score_average}
roc_auc_score_average:{roc_auc_score_average}
```
"""
print(parameters_and_evaluation_text)
@hydra.main(config_path=CONFIG_PATH, config_name="config.yaml")
def main(cfg: DictConfig) -> None:
n_fold = cfg.train.n_fold
pretrained = cfg.model.pretrained
batch_size = cfg.train.batch_size
lr = cfg.train.lr
max_length = cfg.preprocess.max_length
dropout = cfg.model.dropout
epochs = cfg.train.epochs
start_factor = cfg.train.start_factor
save_model = cfg.train.save_model
set_random_seed(cfg.random.seed)
cross_validation(
n_fold=n_fold,
pretrained=pretrained,
batch_size=batch_size,
lr=lr,
max_length=max_length,
dropout=dropout,
epochs=epochs,
start_factor=start_factor,
save_model=save_model,
)
if __name__ == "__main__":
main()
| 6,125 | 30.415385 | 88 | py |
ec-darkpattern | ec-darkpattern-master/darkpattern-auto-detection-deeplearning/utils/dataset.py | from typing import Callable, List, Tuple
import torch
from torch.utils.data import Dataset
class DarkpatternDataset(Dataset):
def __init__(
self,
texts: List[str],
labels: List[int],
text_to_tensor: Callable[[str], torch.Tensor],
) -> None:
self.texts: List[str] = texts
self.labels: torch.Tensor = torch.tensor(labels)
self.text_to_tensor = text_to_tensor
def __getitem__(
self, index: int
) -> Tuple[torch.Tensor, torch.Tensor]: # [text_tensor, label_tensor]
text_encoded = self.text_to_tensor(str(self.texts[index]))
return text_encoded, self.labels[index]
def __len__(self) -> int:
return len(self.texts)
| 723 | 26.846154 | 74 | py |
ec-darkpattern | ec-darkpattern-master/darkpattern-auto-detection-deeplearning/utils/random_seed.py | import os
import random
import numpy as np
import torch
def set_random_seed(random_seed: int = 42) -> None:
random.seed(random_seed)
os.environ["PYTHONHASHSEED"] = str(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.benchmark = True
| 341 | 21.8 | 51 | py |
ec-darkpattern | ec-darkpattern-master/darkpattern-auto-detection-deeplearning/utils/text.py | import torch
from transformers import PreTrainedTokenizer
def tensor_to_text(tensor: torch.Tensor, tokenizer: PreTrainedTokenizer) -> str:
"""
Convert tensor to text.
"""
return tokenizer.decode(tensor)
def text_to_tensor(
text: str, tokenizer: PreTrainedTokenizer, max_length: int
) -> torch.Tensor:
"""
Convert text to tensor.
"""
return torch.Tensor(
tokenizer.encode(text, max_length=max_length, pad_to_max_length=True)
).to(torch.long)
| 493 | 22.52381 | 80 | py |
dolphin | dolphin-main/video_utils.py | import imageio
import torch
import numpy as np
import decord
import torchvision
from einops import rearrange
from torchvision.transforms import Resize, InterpolationMode
from utils import get_new_video_name
def prepare_video(
video_path: str,
resolution: int,
device,
dtype=torch.float16,
normalize=True,
start_t: float = 0,
end_t: float = -1,
output_fps: int = -1,
):
vr = decord.VideoReader(video_path)
initial_fps = vr.get_avg_fps()
if output_fps == -1:
output_fps = int(initial_fps)
if end_t == -1:
end_t = len(vr) / initial_fps
else:
end_t = min(len(vr) / initial_fps, end_t)
assert 0 <= start_t < end_t
assert output_fps > 0
start_f_ind = int(start_t * initial_fps)
end_f_ind = int(end_t * initial_fps)
num_f = int((end_t - start_t) * output_fps)
sample_idx = np.linspace(start_f_ind, end_f_ind, num_f, endpoint=False).astype(int)
video = vr.get_batch(sample_idx)
if torch.is_tensor(video):
video = video.detach().cpu().numpy()
else:
video = video.asnumpy()
_, h, w, _ = video.shape
video = rearrange(video, "f h w c -> f c h w")
video = torch.Tensor(video).to(device).to(dtype)
# Use max if you want the larger side to be equal to resolution (e.g. 512)
# k = float(resolution) / min(h, w)
k = float(resolution) / max(h, w)
h *= k
w *= k
h = int(np.round(h / 64.0)) * 64
w = int(np.round(w / 64.0)) * 64
video = Resize((h, w), interpolation=InterpolationMode.BILINEAR, antialias=True)(
video
)
if normalize:
video = video / 127.5 - 1.0
return video, output_fps # video: f c h w
def create_video(frames, fps, path, rescale=False):
# frames: f h w c
outputs = []
for _, x in enumerate(frames):
x = torchvision.utils.make_grid(torch.Tensor(x), nrow=4)
if rescale:
x = (x + 1.0) / 2.0 # -1,1 -> 0,1
x = (x * 255).numpy().astype(np.uint8)
outputs.append(x)
imageio.mimsave(path, outputs, fps=fps)
return path
def preprocess_video(video_path, out_path=None):
if out_path is None:
out_path = get_new_video_name(video_path, func_name="preprocessed")
video, fps = prepare_video(video_path, resolution=512, device="cpu")
video = rearrange(video, "f c h w -> f h w c")
create_video(video, fps, out_path, rescale=True)
print(f"Preprocessed video saved to {out_path}")
| 2,467 | 28.73494 | 87 | py |
dolphin | dolphin-main/utils.py | import os, sys, uuid
import importlib
import numpy as np
import torch
import random
def instantiate_from_config(config, **kwargs):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()), **kwargs)
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return seed
def get_new_video_name(org_vid_name, func_name="update"):
head_tail = os.path.split(org_vid_name)
head = head_tail[0]
tail = head_tail[1]
name_split = tail.split(".")[0].split("_")
this_new_uuid = str(uuid.uuid4())[:4]
if len(name_split) == 1:
most_org_file_name = name_split[0]
else:
assert len(name_split) == 4
most_org_file_name = name_split[3]
recent_prev_file_name = name_split[0]
new_file_name = (
f"{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.mp4"
)
return os.path.join(head, new_file_name)
def generate_video_name_mp4():
return os.path.join("video", str(uuid.uuid4())[:8] + ".mp4")
def generate_audio_name():
return os.path.join("video", str(uuid.uuid4())[:8] + ".wav")
def get_new_uuid():
return str(uuid.uuid4())[:8]
| 1,588 | 25.932203 | 87 | py |
dolphin | dolphin-main/modules/text2video_zero/utils.py | import os
import cv2
import numpy as np
import torch
import torchvision
from torchvision.transforms import Resize, InterpolationMode
import imageio
from einops import rearrange
from PIL import Image
import decord
def create_gif(frames, fps, rescale=False, path=None):
if path is None:
dir = "temporal"
os.makedirs(dir, exist_ok=True)
path = os.path.join(dir, "canny_db.gif")
outputs = []
for i, x in enumerate(frames):
x = torchvision.utils.make_grid(torch.Tensor(x), nrow=4)
if rescale:
x = (x + 1.0) / 2.0 # -1,1 -> 0,1
x = (x * 255).numpy().astype(np.uint8)
outputs.append(x)
# imageio.imsave(os.path.join(dir, os.path.splitext(name)[0] + f'_{i}.jpg'), x)
imageio.mimsave(path, outputs, fps=fps)
return path
def post_process_gif(list_of_results, image_resolution):
output_file = "/tmp/ddxk.gif"
imageio.mimsave(output_file, list_of_results, fps=4)
return output_file
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def pre_process(input_video):
control_imgs = []
for frame in input_video:
img = rearrange(frame, "c h w -> h w c").cpu().numpy().astype(np.uint8)
img = HWC3(img)
H, W, C = img.shape
img = cv2.resize(img, (W, H), interpolation=cv2.INTER_NEAREST)
control_imgs.append(img[None])
control_imgs = np.concatenate(control_imgs)
control = torch.from_numpy(control_imgs.copy()).float() / 255.0
return rearrange(control, "f h w c -> f c h w")
class CrossFrameAttnProcessor:
def __init__(self, unet_chunk_size=2):
self.unet_chunk_size = unet_chunk_size
def __call__(
self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None
):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(
attention_mask, sequence_length, batch_size
)
query = attn.to_q(hidden_states)
is_cross_attention = encoder_hidden_states is not None
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
elif attn.cross_attention_norm:
encoder_hidden_states = attn.norm_cross(encoder_hidden_states)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
# Sparse Attention
if not is_cross_attention:
video_length = key.size()[0] // self.unet_chunk_size
# former_frame_index = torch.arange(video_length) - 1
# former_frame_index[0] = 0
former_frame_index = [0] * video_length
key = rearrange(key, "(b f) d c -> b f d c", f=video_length)
key = key[:, former_frame_index]
key = rearrange(key, "b f d c -> (b f) d c")
value = rearrange(value, "(b f) d c -> b f d c", f=video_length)
value = value[:, former_frame_index]
value = rearrange(value, "b f d c -> (b f) d c")
query = attn.head_to_batch_dim(query)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(query, key, attention_mask)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
return hidden_states
| 3,963 | 32.880342 | 87 | py |
dolphin | dolphin-main/modules/text2video_zero/model.py | import os
from enum import Enum
import numpy as np
import tomesd
import torch
from diffusers import (
StableDiffusionInstructPix2PixPipeline,
StableDiffusionControlNetPipeline,
ControlNetModel,
UNet2DConditionModel,
)
from diffusers.schedulers import EulerAncestralDiscreteScheduler, DDIMScheduler
from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import (
CrossFrameAttnProcessor,
)
from .text_to_video_pipeline import TextToVideoPipeline
from .utils import pre_process
from video_utils import prepare_video, create_video
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
class ModelType(Enum):
Pix2Pix_Video = (1,)
Text2Video = (2,)
ControlNetCanny = (3,)
ControlNetCannyDB = (4,)
ControlNetPose = (5,)
ControlNetDepth = (6,)
class Text2Video_Zero_Model:
def __init__(self, device, dtype, **kwargs):
self.device = device
self.dtype = dtype
self.generator = torch.Generator(device=device)
self.pipe_dict = {
ModelType.Pix2Pix_Video: StableDiffusionInstructPix2PixPipeline,
ModelType.Text2Video: TextToVideoPipeline,
ModelType.ControlNetCanny: StableDiffusionControlNetPipeline,
ModelType.ControlNetCannyDB: StableDiffusionControlNetPipeline,
ModelType.ControlNetPose: StableDiffusionControlNetPipeline,
ModelType.ControlNetDepth: StableDiffusionControlNetPipeline,
}
self.controlnet_attn_proc = CrossFrameAttnProcessor(batch_size=2)
self.pix2pix_attn_proc = CrossFrameAttnProcessor(batch_size=3)
self.text2video_attn_proc = CrossFrameAttnProcessor(batch_size=2)
self.pipe = None
self.model_type = None
self.states = {}
self.model_name = ""
def set_model(self, model_type: ModelType, model_id: str, **kwargs):
safety_checker = kwargs.pop("safety_checker", None)
self.pipe = (
self.pipe_dict[model_type]
.from_pretrained(
model_id,
safety_checker=safety_checker,
torch_dtype=self.dtype,
**kwargs,
)
.to(self.device)
)
self.model_type = model_type
self.model_name = model_id
def inference_chunk(self, frame_ids, **kwargs):
if not hasattr(self, "pipe") or self.pipe is None:
return
prompt = np.array(kwargs.pop("prompt"))
negative_prompt = np.array(kwargs.pop("negative_prompt", ""))
latents = None
if "latents" in kwargs:
latents = kwargs.pop("latents")[frame_ids]
if "image" in kwargs:
kwargs["image"] = kwargs["image"][frame_ids]
if "video_length" in kwargs:
kwargs["video_length"] = len(frame_ids)
if self.model_type == ModelType.Text2Video:
kwargs["frame_ids"] = frame_ids
return self.pipe(
prompt=prompt[frame_ids].tolist(),
negative_prompt=negative_prompt[frame_ids].tolist(),
latents=latents,
generator=self.generator,
**kwargs,
)
def inference(self, split_to_chunks=False, chunk_size=8, **kwargs):
if not hasattr(self, "pipe") or self.pipe is None:
return
if "merging_ratio" in kwargs:
merging_ratio = kwargs.pop("merging_ratio")
# if merging_ratio > 0:
tomesd.apply_patch(self.pipe, ratio=merging_ratio)
seed = kwargs.pop("seed", 0)
if seed < 0:
seed = self.generator.seed()
kwargs.pop("generator", "")
if "image" in kwargs:
f = kwargs["image"].shape[0]
else:
f = kwargs["video_length"]
assert "prompt" in kwargs
prompt = [kwargs.pop("prompt")] * f
negative_prompt = [kwargs.pop("negative_prompt", "")] * f
frames_counter = 0
# Processing chunk-by-chunk
if split_to_chunks:
chunk_ids = np.arange(0, f, chunk_size - 1)
result = []
for i in range(len(chunk_ids)):
ch_start = chunk_ids[i]
ch_end = f if i == len(chunk_ids) - 1 else chunk_ids[i + 1]
frame_ids = [0] + list(range(ch_start, ch_end))
self.generator.manual_seed(seed)
print(f"Processing chunk {i + 1} / {len(chunk_ids)}")
result.append(
self.inference_chunk(
frame_ids=frame_ids,
prompt=prompt,
negative_prompt=negative_prompt,
**kwargs,
).images[1:]
)
frames_counter += len(chunk_ids) - 1
if on_huggingspace and frames_counter >= 80:
break
result = np.concatenate(result)
return result
else:
self.generator.manual_seed(seed)
return self.pipe(
prompt=prompt,
negative_prompt=negative_prompt,
generator=self.generator,
**kwargs,
).images
class CannyText2VideoModel(Text2Video_Zero_Model):
def __init__(self, device, dtype, use_cf_attn=True, **kwargs):
super().__init__(device, dtype, **kwargs)
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny", torch_dtype=dtype
)
self.set_model(
ModelType.ControlNetCanny,
model_id="runwayml/stable-diffusion-v1-5",
controlnet=controlnet,
)
self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
if use_cf_attn:
self.pipe.unet.set_attn_processor(processor=self.controlnet_attn_proc)
self.pipe.controlnet.set_attn_processor(processor=self.controlnet_attn_proc)
def process_controlnet_canny(
self,
video_path,
prompt,
chunk_size=8,
merging_ratio=0.0,
num_inference_steps=20,
controlnet_conditioning_scale=1.0,
guidance_scale=9.0,
seed=42,
eta=0.0,
resolution=512,
save_path=None,
):
added_prompt = "best quality, extremely detailed"
negative_prompts = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
video, fps = prepare_video(
video_path,
resolution=resolution,
device=self.device,
dtype=self.dtype,
normalize=False,
)
control = pre_process(video).to(self.device).to(self.dtype)
# canny_to_save = list(rearrange(control, 'f c w h -> f w h c').cpu().detach().numpy())
# _ = utils.create_video(canny_to_save, 4, path="ddxk.mp4", watermark=None)
f, _, h, w = video.shape
self.generator.manual_seed(seed)
latents = torch.randn(
(1, 4, h // 8, w // 8),
dtype=self.dtype,
device=self.device,
generator=self.generator,
)
latents = latents.repeat(f, 1, 1, 1)
result = self.inference(
image=control,
prompt=prompt + ", " + added_prompt,
height=h,
width=w,
negative_prompt=negative_prompts,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
controlnet_conditioning_scale=controlnet_conditioning_scale,
eta=eta,
latents=latents,
seed=seed,
output_type="numpy",
split_to_chunks=True,
chunk_size=chunk_size,
merging_ratio=merging_ratio,
)
return create_video(result, fps, path=save_path)
class PoseText2VideoModel(Text2Video_Zero_Model):
def __init__(self, device, dtype, use_cf_attn=True, **kwargs):
super().__init__(device, dtype, **kwargs)
controlnet = ControlNetModel.from_pretrained(
"fusing/stable-diffusion-v1-5-controlnet-openpose", torch_dtype=dtype
)
self.set_model(
ModelType.ControlNetPose,
model_id="runwayml/stable-diffusion-v1-5",
controlnet=controlnet,
)
self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
if use_cf_attn:
self.pipe.unet.set_attn_processor(processor=self.controlnet_attn_proc)
self.pipe.controlnet.set_attn_processor(processor=self.controlnet_attn_proc)
def process_controlnet_pose(
self,
video_path,
prompt,
chunk_size=8,
merging_ratio=0.0,
num_inference_steps=20,
controlnet_conditioning_scale=1.0,
guidance_scale=9.0,
seed=42,
eta=0.0,
resolution=512,
save_path=None,
):
added_prompt = "best quality, extremely detailed, HD, ultra-realistic, 8K, HQ, masterpiece, trending on artstation, art, smooth"
negative_prompts = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer difits, cropped, worst quality, low quality, deformed body, bloated, ugly, unrealistic"
video, fps = prepare_video(
video_path,
resolution=resolution,
device=self.device,
dtype=self.dtype,
normalize=False,
)
control = pre_process(video).to(self.device).to(self.dtype)
f, _, h, w = video.shape
self.generator.manual_seed(seed)
latents = torch.randn(
(1, 4, h // 8, w // 8),
dtype=self.dtype,
device=self.device,
generator=self.generator,
)
latents = latents.repeat(f, 1, 1, 1)
result = self.inference(
image=control,
prompt=prompt + ", " + added_prompt,
height=h,
width=w,
negative_prompt=negative_prompts,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
controlnet_conditioning_scale=controlnet_conditioning_scale,
eta=eta,
latents=latents,
seed=seed,
output_type="numpy",
split_to_chunks=True,
chunk_size=chunk_size,
merging_ratio=merging_ratio,
)
return create_video(result, fps, path=save_path)
class DepthText2VideoModel(Text2Video_Zero_Model):
def __init__(self, device, dtype, use_cf_attn=True, **kwargs):
super().__init__(device, dtype, **kwargs)
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-depth", torch_dtype=dtype
)
self.set_model(
ModelType.ControlNetDepth,
model_id="runwayml/stable-diffusion-v1-5",
controlnet=controlnet,
)
self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
if use_cf_attn:
self.pipe.unet.set_attn_processor(processor=self.controlnet_attn_proc)
self.pipe.controlnet.set_attn_processor(processor=self.controlnet_attn_proc)
def process_controlnet_depth(
self,
video_path,
prompt,
chunk_size=8,
merging_ratio=0.0,
num_inference_steps=20,
controlnet_conditioning_scale=1.0,
guidance_scale=9.0,
seed=42,
eta=0.0,
resolution=512,
save_path=None,
):
added_prompt = "best quality, extremely detailed"
negative_prompts = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
video, fps = prepare_video(
video_path,
resolution=resolution,
device=self.device,
dtype=self.dtype,
normalize=False,
)
control = pre_process(video).to(self.device).to(self.dtype)
# depth_map_to_save = list(rearrange(control, 'f c w h -> f w h c').cpu().detach().numpy())
# _ = utils.create_video(depth_map_to_save, 4, path="ddxk.mp4", watermark=None)
f, _, h, w = video.shape
self.generator.manual_seed(seed)
latents = torch.randn(
(1, 4, h // 8, w // 8),
dtype=self.dtype,
device=self.device,
generator=self.generator,
)
latents = latents.repeat(f, 1, 1, 1)
result = self.inference(
image=control,
prompt=prompt + ", " + added_prompt,
height=h,
width=w,
negative_prompt=negative_prompts,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
controlnet_conditioning_scale=controlnet_conditioning_scale,
eta=eta,
latents=latents,
seed=seed,
output_type="numpy",
split_to_chunks=True,
chunk_size=chunk_size,
merging_ratio=merging_ratio,
)
return create_video(result, fps, path=save_path)
class VideoPix2PixModel(Text2Video_Zero_Model):
def __init__(self, device, dtype, use_cf_attn=True, **kwargs):
super().__init__(device, dtype, **kwargs)
self.set_model(ModelType.Pix2Pix_Video, model_id="timbrooks/instruct-pix2pix")
self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
self.pipe.scheduler.config
)
if use_cf_attn:
self.pipe.unet.set_attn_processor(processor=self.pix2pix_attn_proc)
def process_pix2pix(
self,
video,
prompt,
resolution=512,
seed=0,
image_guidance_scale=1.0,
start_t=0,
end_t=-1,
out_fps=-1,
chunk_size=8,
merging_ratio=0.0,
save_path=None,
):
video, fps = prepare_video(
video,
resolution=resolution,
device=self.device,
dtype=self.dtype,
normalize=True,
start_t=start_t,
end_t=end_t,
output_fps=out_fps,
)
self.generator.manual_seed(seed)
result = self.inference(
image=video,
prompt=prompt,
seed=seed,
output_type="numpy",
num_inference_steps=50,
image_guidance_scale=image_guidance_scale,
split_to_chunks=True,
chunk_size=chunk_size,
merging_ratio=merging_ratio,
)
return create_video(result, fps, path=save_path)
class Text2VideoModel(Text2Video_Zero_Model):
def __init__(
self,
device,
dtype,
model_name="dreamlike-art/dreamlike-photoreal-2.0",
use_cf_attn=True,
**kwargs,
):
super().__init__(device, dtype, **kwargs)
unet = UNet2DConditionModel.from_pretrained(
model_name, subfolder="unet", torch_dtype=dtype
)
self.set_model(ModelType.Text2Video, model_id=model_name, unet=unet)
self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
if use_cf_attn:
self.pipe.unet.set_attn_processor(processor=self.text2video_attn_proc)
def process_text2video(
self,
prompt,
motion_field_strength_x=12,
motion_field_strength_y=12,
t0=44,
t1=47,
n_prompt="",
chunk_size=8,
video_length=8,
merging_ratio=0.0,
seed=0,
resolution=512,
fps=2,
use_motion_field=True,
smooth_bg=False,
smooth_bg_strength=0.4,
path=None,
):
self.generator.manual_seed(seed)
added_prompt = "high quality, HD, 8K, trending on artstation, high focus, dramatic lighting"
negative_prompts = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer difits, cropped, worst quality, low quality, deformed body, bloated, ugly, unrealistic"
prompt = prompt.rstrip()
if len(prompt) > 0 and (prompt[-1] == "," or prompt[-1] == "."):
prompt = prompt.rstrip()[:-1]
prompt = prompt.rstrip()
prompt = prompt + ", " + added_prompt
if len(n_prompt) > 0:
negative_prompt = n_prompt
else:
negative_prompt = None
result = self.inference(
prompt=prompt,
video_length=video_length,
height=resolution,
width=resolution,
num_inference_steps=50,
guidance_scale=7.5,
guidance_stop_step=1.0,
t0=t0,
t1=t1,
motion_field_strength_x=motion_field_strength_x,
motion_field_strength_y=motion_field_strength_y,
use_motion_field=use_motion_field,
smooth_bg=smooth_bg,
smooth_bg_strength=smooth_bg_strength,
seed=seed,
output_type="numpy",
negative_prompt=negative_prompt,
merging_ratio=merging_ratio,
split_to_chunks=True,
chunk_size=chunk_size,
)
return create_video(result, fps, path=path)
| 17,263 | 33.528 | 193 | py |
dolphin | dolphin-main/modules/text2video_zero/__init__.py | import torch
from .model import (
CannyText2VideoModel,
PoseText2VideoModel,
DepthText2VideoModel,
VideoPix2PixModel,
Text2VideoModel,
)
from utils import generate_video_name_mp4, get_new_video_name
class CannyText2Video:
def __init__(self, device):
self.device = device
self.model = CannyText2VideoModel(device, dtype=torch.float16)
def inference(self, inputs: str, resolution=512):
vid_path, prompt = inputs.split(",")[0], ",".join(inputs.split(",")[1:])
out_path = get_new_video_name(vid_path, func_name="canny2video")
self.model.process_controlnet_canny(
vid_path,
prompt,
save_path=out_path,
resolution=resolution,
)
return out_path
class PoseText2Video:
def __init__(self, device):
self.device = device
self.model = PoseText2VideoModel(device, dtype=torch.float16)
def inference(self, inputs: str, resolution=512):
vid_path, prompt = inputs.split(",")[0], ",".join(inputs.split(",")[1:])
out_path = get_new_video_name(vid_path, func_name="pose2video")
self.model.process_controlnet_pose(
vid_path,
prompt,
save_path=out_path,
resolution=resolution,
)
return out_path
class DepthText2Video:
def __init__(self, device):
self.device = device
self.model = DepthText2VideoModel(device, dtype=torch.float16)
def inference(self, inputs: str, resolution=512):
vid_path, prompt = inputs.split(",")[0], ",".join(inputs.split(",")[1:])
out_path = get_new_video_name(vid_path, func_name="depth2video")
self.model.process_controlnet_depth(
vid_path,
prompt,
save_path=out_path,
resolution=resolution,
)
return out_path
class VideoPix2Pix:
def __init__(self, device):
self.device = device
self.model = VideoPix2PixModel(device, dtype=torch.float16)
def inference(self, inputs: str):
vid_path, prompt = inputs.split(",")[0], ",".join(inputs.split(",")[1:])
out_path = get_new_video_name(vid_path, func_name="pix2pix")
self.model.process_pix2pix(
vid_path,
prompt,
save_path=out_path,
)
return out_path
class Text2Video:
def __init__(self, device):
self.device = device
self.model = Text2VideoModel(device, dtype=torch.float16)
def inference(self, inputs: str, resolution=512):
prompt = inputs
params = {
"t0": 44,
"t1": 47,
"motion_field_strength_x": 12,
"motion_field_strength_y": 12,
"video_length": 16,
}
out_path, fps = generate_video_name_mp4(), 8
self.model.process_text2video(
prompt,
fps=fps,
path=out_path,
resolution=resolution,
**params,
)
return out_path
| 3,031 | 28.153846 | 80 | py |
dolphin | dolphin-main/modules/text2video_zero/text_to_video_pipeline.py | from diffusers import StableDiffusionPipeline
import torch
from dataclasses import dataclass
from typing import Callable, List, Optional, Union
import numpy as np
from diffusers.utils import deprecate, logging, BaseOutput
from einops import rearrange, repeat
from torch.nn.functional import grid_sample
import torchvision.transforms as T
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
import PIL
from PIL import Image
try:
from kornia.morphology import dilation
except:
print("Kornia error, no using background smooth.")
@dataclass
class TextToVideoPipelineOutput(BaseOutput):
# videos: Union[torch.Tensor, np.ndarray]
# code: Union[torch.Tensor, np.ndarray]
images: Union[List[PIL.Image.Image], np.ndarray]
nsfw_content_detected: Optional[List[bool]]
def coords_grid(batch, ht, wd, device):
# Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py
coords = torch.meshgrid(
torch.arange(ht, device=device), torch.arange(wd, device=device)
)
coords = torch.stack(coords[::-1], dim=0).float()
return coords[None].repeat(batch, 1, 1, 1)
class TextToVideoPipeline(StableDiffusionPipeline):
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
requires_safety_checker: bool = True,
):
super().__init__(
vae,
text_encoder,
tokenizer,
unet,
scheduler,
safety_checker,
feature_extractor,
requires_safety_checker,
)
def DDPM_forward(self, x0, t0, tMax, generator, device, shape, text_embeddings):
rand_device = "cpu" if device.type == "mps" else device
if x0 is None:
return torch.randn(
shape,
generator=generator,
device=rand_device,
dtype=text_embeddings.dtype,
).to(device)
else:
eps = torch.randn(
x0.shape,
dtype=text_embeddings.dtype,
generator=generator,
device=rand_device,
)
alpha_vec = torch.prod(self.scheduler.alphas[t0:tMax])
xt = torch.sqrt(alpha_vec) * x0 + torch.sqrt(1 - alpha_vec) * eps
return xt
def prepare_latents(
self,
batch_size,
num_channels_latents,
video_length,
height,
width,
dtype,
device,
generator,
latents=None,
):
shape = (
batch_size,
num_channels_latents,
video_length,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
rand_device = "cpu" if device.type == "mps" else device
if isinstance(generator, list):
shape = (1,) + shape[1:]
latents = [
torch.randn(
shape, generator=generator[i], device=rand_device, dtype=dtype
)
for i in range(batch_size)
]
latents = torch.cat(latents, dim=0).to(device)
else:
latents = torch.randn(
shape, generator=generator, device=rand_device, dtype=dtype
).to(device)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def warp_latents_independently(self, latents, reference_flow):
_, _, H, W = reference_flow.size()
b, _, f, h, w = latents.size()
assert b == 1
coords0 = coords_grid(f, H, W, device=latents.device).to(latents.dtype)
coords_t0 = coords0 + reference_flow
coords_t0[:, 0] /= W
coords_t0[:, 1] /= H
coords_t0 = coords_t0 * 2.0 - 1.0
coords_t0 = T.Resize((h, w))(coords_t0)
coords_t0 = rearrange(coords_t0, "f c h w -> f h w c")
latents_0 = rearrange(latents[0], "c f h w -> f c h w")
warped = grid_sample(
latents_0, coords_t0, mode="nearest", padding_mode="reflection"
)
warped = rearrange(warped, "(b f) c h w -> b c f h w", f=f)
return warped
def DDIM_backward(
self,
num_inference_steps,
timesteps,
skip_t,
t0,
t1,
do_classifier_free_guidance,
null_embs,
text_embeddings,
latents_local,
latents_dtype,
guidance_scale,
guidance_stop_step,
callback,
callback_steps,
extra_step_kwargs,
num_warmup_steps,
):
entered = False
f = latents_local.shape[2]
latents_local = rearrange(latents_local, "b c f w h -> (b f) c w h")
latents = latents_local.detach().clone()
x_t0_1 = None
x_t1_1 = None
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
if t > skip_t:
continue
else:
if not entered:
print(
f"Continue DDIM with i = {i}, t = {t}, latent = {latents.shape}, device = {latents.device}, type = {latents.dtype}"
)
entered = True
latents = latents.detach()
# expand the latents if we are doing classifier free guidance
latent_model_input = (
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
)
latent_model_input = self.scheduler.scale_model_input(
latent_model_input, t
)
# predict the noise residual
with torch.no_grad():
if null_embs is not None:
text_embeddings[0] = null_embs[i][0]
te = torch.cat(
[
repeat(text_embeddings[0, :, :], "c k -> f c k", f=f),
repeat(text_embeddings[1, :, :], "c k -> f c k", f=f),
]
)
noise_pred = self.unet(
latent_model_input, t, encoder_hidden_states=te
).sample.to(dtype=latents_dtype)
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (
noise_pred_text - noise_pred_uncond
)
if i >= guidance_stop_step * len(timesteps):
alpha = 0
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred, t, latents, **extra_step_kwargs
).prev_sample
# latents = latents - alpha * grads / (torch.norm(grads) + 1e-10)
# call the callback, if provided
if i < len(timesteps) - 1 and timesteps[i + 1] == t0:
x_t0_1 = latents.detach().clone()
print(f"latent t0 found at i = {i}, t = {t}")
elif i < len(timesteps) - 1 and timesteps[i + 1] == t1:
x_t1_1 = latents.detach().clone()
print(f"latent t1 found at i={i}, t = {t}")
if i == len(timesteps) - 1 or (
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
latents = rearrange(latents, "(b f) c w h -> b c f w h", f=f)
res = {"x0": latents.detach().clone()}
if x_t0_1 is not None:
x_t0_1 = rearrange(x_t0_1, "(b f) c w h -> b c f w h", f=f)
res["x_t0_1"] = x_t0_1.detach().clone()
if x_t1_1 is not None:
x_t1_1 = rearrange(x_t1_1, "(b f) c w h -> b c f w h", f=f)
res["x_t1_1"] = x_t1_1.detach().clone()
return res
def decode_latents(self, latents):
video_length = latents.shape[2]
latents = 1 / 0.18215 * latents
latents = rearrange(latents, "b c f h w -> (b f) c h w")
video = self.vae.decode(latents).sample
video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
video = (video / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
video = video.detach().cpu()
return video
def create_motion_field(
self,
motion_field_strength_x,
motion_field_strength_y,
frame_ids,
video_length,
latents,
):
reference_flow = torch.zeros(
(video_length - 1, 2, 512, 512), device=latents.device, dtype=latents.dtype
)
for fr_idx, frame_id in enumerate(frame_ids):
reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_id)
reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_id)
return reference_flow
def create_motion_field_and_warp_latents(
self,
motion_field_strength_x,
motion_field_strength_y,
frame_ids,
video_length,
latents,
):
motion_field = self.create_motion_field(
motion_field_strength_x=motion_field_strength_x,
motion_field_strength_y=motion_field_strength_y,
latents=latents,
video_length=video_length,
frame_ids=frame_ids,
)
for idx, latent in enumerate(latents):
latents[idx] = self.warp_latents_independently(latent[None], motion_field)
return motion_field, latents
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
video_length: Optional[int],
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
guidance_stop_step: float = 0.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_videos_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
xT: Optional[torch.FloatTensor] = None,
null_embs: Optional[torch.FloatTensor] = None,
motion_field_strength_x: float = 12,
motion_field_strength_y: float = 12,
output_type: Optional[str] = "tensor",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: Optional[int] = 1,
use_motion_field: bool = True,
smooth_bg: bool = False,
smooth_bg_strength: float = 0.4,
t0: int = 44,
t1: int = 47,
**kwargs,
):
frame_ids = kwargs.pop("frame_ids", list(range(video_length)))
assert t0 < t1
assert num_videos_per_prompt == 1
assert isinstance(prompt, list) and len(prompt) > 0
assert isinstance(negative_prompt, list) or negative_prompt is None
prompt_types = [prompt, negative_prompt]
for idx, prompt_type in enumerate(prompt_types):
prompt_template = None
for prompt in prompt_type:
if prompt_template is None:
prompt_template = prompt
else:
assert prompt == prompt_template
if prompt_types[idx] is not None:
prompt_types[idx] = prompt_types[idx][0]
prompt = prompt_types[0]
negative_prompt = prompt_types[1]
# Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# Check inputs. Raise error if not correct
self.check_inputs(prompt, height, width, callback_steps)
# Define call parameters
batch_size = 1 if isinstance(prompt, str) else len(prompt)
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# Encode input prompt
text_embeddings = self._encode_prompt(
prompt,
device,
num_videos_per_prompt,
do_classifier_free_guidance,
negative_prompt,
)
# Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# print(f" Latent shape = {latents.shape}")
# Prepare latent variables
num_channels_latents = self.unet.in_channels
xT = self.prepare_latents(
batch_size * num_videos_per_prompt,
num_channels_latents,
1,
height,
width,
text_embeddings.dtype,
device,
generator,
xT,
)
dtype = xT.dtype
# when motion field is not used, augment with random latent codes
if use_motion_field:
xT = xT[:, :, :1]
else:
if xT.shape[2] < video_length:
xT_missing = self.prepare_latents(
batch_size * num_videos_per_prompt,
num_channels_latents,
video_length - xT.shape[2],
height,
width,
text_embeddings.dtype,
device,
generator,
None,
)
xT = torch.cat([xT, xT_missing], dim=2)
xInit = xT.clone()
timesteps_ddpm = [
981,
961,
941,
921,
901,
881,
861,
841,
821,
801,
781,
761,
741,
721,
701,
681,
661,
641,
621,
601,
581,
561,
541,
521,
501,
481,
461,
441,
421,
401,
381,
361,
341,
321,
301,
281,
261,
241,
221,
201,
181,
161,
141,
121,
101,
81,
61,
41,
21,
1,
]
timesteps_ddpm.reverse()
t0 = timesteps_ddpm[t0]
t1 = timesteps_ddpm[t1]
print(f"t0 = {t0} t1 = {t1}")
x_t1_1 = None
# Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
shape = (
batch_size,
num_channels_latents,
1,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
ddim_res = self.DDIM_backward(
num_inference_steps=num_inference_steps,
timesteps=timesteps,
skip_t=1000,
t0=t0,
t1=t1,
do_classifier_free_guidance=do_classifier_free_guidance,
null_embs=null_embs,
text_embeddings=text_embeddings,
latents_local=xT,
latents_dtype=dtype,
guidance_scale=guidance_scale,
guidance_stop_step=guidance_stop_step,
callback=callback,
callback_steps=callback_steps,
extra_step_kwargs=extra_step_kwargs,
num_warmup_steps=num_warmup_steps,
)
x0 = ddim_res["x0"].detach()
if "x_t0_1" in ddim_res:
x_t0_1 = ddim_res["x_t0_1"].detach()
if "x_t1_1" in ddim_res:
x_t1_1 = ddim_res["x_t1_1"].detach()
del ddim_res
del xT
if use_motion_field:
del x0
x_t0_k = x_t0_1[:, :, :1, :, :].repeat(1, 1, video_length - 1, 1, 1)
reference_flow, x_t0_k = self.create_motion_field_and_warp_latents(
motion_field_strength_x=motion_field_strength_x,
motion_field_strength_y=motion_field_strength_y,
latents=x_t0_k,
video_length=video_length,
frame_ids=frame_ids[1:],
)
# assuming t0=t1=1000, if t0 = 1000
if t1 > t0:
x_t1_k = self.DDPM_forward(
x0=x_t0_k,
t0=t0,
tMax=t1,
device=device,
shape=shape,
text_embeddings=text_embeddings,
generator=generator,
)
else:
x_t1_k = x_t0_k
if x_t1_1 is None:
raise Exception
x_t1 = torch.cat([x_t1_1, x_t1_k], dim=2).clone().detach()
ddim_res = self.DDIM_backward(
num_inference_steps=num_inference_steps,
timesteps=timesteps,
skip_t=t1,
t0=-1,
t1=-1,
do_classifier_free_guidance=do_classifier_free_guidance,
null_embs=null_embs,
text_embeddings=text_embeddings,
latents_local=x_t1,
latents_dtype=dtype,
guidance_scale=guidance_scale,
guidance_stop_step=guidance_stop_step,
callback=callback,
callback_steps=callback_steps,
extra_step_kwargs=extra_step_kwargs,
num_warmup_steps=num_warmup_steps,
)
x0 = ddim_res["x0"].detach()
del ddim_res
del x_t1
del x_t1_1
del x_t1_k
else:
x_t1 = x_t1_1.clone()
x_t1_1 = x_t1_1[:, :, :1, :, :].clone()
x_t1_k = x_t1_1[:, :, 1:, :, :].clone()
x_t0_k = x_t0_1[:, :, 1:, :, :].clone()
x_t0_1 = x_t0_1[:, :, :1, :, :].clone()
# smooth background
if smooth_bg:
h, w = x0.shape[3], x0.shape[4]
M_FG = torch.zeros((batch_size, video_length, h, w), device=x0.device).to(
x0.dtype
)
for batch_idx, x0_b in enumerate(x0):
z0_b = self.decode_latents(x0_b[None]).detach()
z0_b = rearrange(z0_b[0], "c f h w -> f h w c")
for frame_idx, z0_f in enumerate(z0_b):
z0_f = torch.round(z0_f * 255).cpu().numpy().astype(np.uint8)
# apply SOD detection
m_f = torch.tensor(
self.sod_model.process_data(z0_f), device=x0.device
).to(x0.dtype)
mask = T.Resize(
size=(h, w), interpolation=T.InterpolationMode.NEAREST
)(m_f[None])
kernel = torch.ones(5, 5, device=x0.device, dtype=x0.dtype)
mask = dilation(mask[None].to(x0.device), kernel)[0]
M_FG[batch_idx, frame_idx, :, :] = mask
x_t1_1_fg_masked = x_t1_1 * (
1 - repeat(M_FG[:, 0, :, :], "b w h -> b c 1 w h", c=x_t1_1.shape[1])
)
x_t1_1_fg_masked_moved = []
for batch_idx, x_t1_1_fg_masked_b in enumerate(x_t1_1_fg_masked):
x_t1_fg_masked_b = x_t1_1_fg_masked_b.clone()
x_t1_fg_masked_b = x_t1_fg_masked_b.repeat(1, video_length - 1, 1, 1)
if use_motion_field:
x_t1_fg_masked_b = x_t1_fg_masked_b[None]
x_t1_fg_masked_b = self.warp_latents_independently(
x_t1_fg_masked_b, reference_flow
)
else:
x_t1_fg_masked_b = x_t1_fg_masked_b[None]
x_t1_fg_masked_b = torch.cat(
[x_t1_1_fg_masked_b[None], x_t1_fg_masked_b], dim=2
)
x_t1_1_fg_masked_moved.append(x_t1_fg_masked_b)
x_t1_1_fg_masked_moved = torch.cat(x_t1_1_fg_masked_moved, dim=0)
M_FG_1 = M_FG[:, :1, :, :]
M_FG_warped = []
for batch_idx, m_fg_1_b in enumerate(M_FG_1):
m_fg_1_b = m_fg_1_b[None, None]
m_fg_b = m_fg_1_b.repeat(1, 1, video_length - 1, 1, 1)
if use_motion_field:
m_fg_b = self.warp_latents_independently(
m_fg_b.clone(), reference_flow
)
M_FG_warped.append(torch.cat([m_fg_1_b[:1, 0], m_fg_b[:1, 0]], dim=1))
M_FG_warped = torch.cat(M_FG_warped, dim=0)
channels = x0.shape[1]
M_BG = (1 - M_FG) * (1 - M_FG_warped)
M_BG = repeat(M_BG, "b f h w -> b c f h w", c=channels)
a_convex = smooth_bg_strength
latents = (1 - M_BG) * x_t1 + M_BG * (
a_convex * x_t1 + (1 - a_convex) * x_t1_1_fg_masked_moved
)
ddim_res = self.DDIM_backward(
num_inference_steps=num_inference_steps,
timesteps=timesteps,
skip_t=t1,
t0=-1,
t1=-1,
do_classifier_free_guidance=do_classifier_free_guidance,
null_embs=null_embs,
text_embeddings=text_embeddings,
latents_local=latents,
latents_dtype=dtype,
guidance_scale=guidance_scale,
guidance_stop_step=guidance_stop_step,
callback=callback,
callback_steps=callback_steps,
extra_step_kwargs=extra_step_kwargs,
num_warmup_steps=num_warmup_steps,
)
x0 = ddim_res["x0"].detach()
del ddim_res
del latents
latents = x0
# manually for max memory savings
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.unet.to("cpu")
torch.cuda.empty_cache()
if output_type == "latent":
image = latents
has_nsfw_concept = None
else:
image = self.decode_latents(latents)
# Run safety checker
image, has_nsfw_concept = self.run_safety_checker(
image, device, text_embeddings.dtype
)
image = rearrange(image, "b c f h w -> (b f) h w c")
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return TextToVideoPipelineOutput(
images=image, nsfw_content_detected=has_nsfw_concept
)
| 24,513 | 33.72238 | 143 | py |
dolphin | dolphin-main/modules/blip/__init__.py | import torch
import numpy as np
from transformers import AutoProcessor, Blip2ForConditionalGeneration
from PIL import Image
from video_utils import prepare_video
class ImageCaptioning:
def __init__(self, device):
print("Initializing BLIP2 for ImageCaptioning")
self.device = device
self.processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
self.model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16
).to(device)
def image_captioning(self, image: Image, prompt=None, is_vqa=False):
if prompt and is_vqa:
prompt = f"Question: {prompt} Answer:"
inputs = self.processor(image, text=prompt, return_tensors="pt").to(
self.device, torch.float16
)
generated_ids = self.model.generate(**inputs, max_new_tokens=40)
generated_text = self.processor.batch_decode(
generated_ids, skip_special_tokens=True
)[0].strip()
return generated_text
def frames_captioning(self, video_path):
video, fps = prepare_video(video_path, 512, "cpu", normalize=False)
# pick each frame for each second
video = video[::fps]
video_nd = np.transpose(video.numpy(), (0, 2, 3, 1)).astype(np.uint8)
pil_images = [Image.fromarray(frame) for frame in video_nd]
caption_results = []
for i, image in enumerate(pil_images):
# image.save(f"temp/{str(i).zfill(5)}.png")
caption = self.image_captioning(
image, prompt="This is a video frame describing that"
)
caption_results.append(f"Second {i}: {caption}.")
return " ".join(caption_results)
def inference(self, inputs):
return self.frames_captioning(inputs)
| 1,840 | 37.354167 | 83 | py |
dolphin | dolphin-main/modules/modelscope_t2v/__init__.py | from __future__ import annotations
import random
import tempfile
import imageio
import numpy as np
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from utils import generate_video_name_mp4
def to_video(frames: list[np.ndarray], fps: int, out_file=None) -> str:
if out_file is None:
out_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
writer = imageio.get_writer(out_file, format="FFMPEG", fps=fps)
for frame in frames:
writer.append_data(frame)
writer.close()
return out_file
class ModelscopeT2V:
def __init__(self, device):
pipe = DiffusionPipeline.from_pretrained(
"damo-vilab/text-to-video-ms-1.7b",
torch_dtype=torch.float16,
variant="fp16",
).to(device)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.enable_vae_slicing()
self.pipe = pipe
def generate_video(
self,
prompt: str,
seed: int,
num_frames: int,
num_inference_steps: int,
out_file: str = None,
) -> str:
if seed == -1:
seed = random.randint(0, 1000000)
generator = torch.Generator().manual_seed(seed)
frames = self.pipe(
prompt,
num_inference_steps=num_inference_steps,
num_frames=num_frames,
generator=generator,
).frames
return to_video(frames, 8, out_file=out_file)
def inference(self, inputs):
video_path = generate_video_name_mp4()
self.generate_video(
prompt=inputs,
seed=-1,
num_frames=16,
num_inference_steps=25,
out_file=video_path,
)
return video_path
| 1,838 | 26.863636 | 87 | py |
dolphin | dolphin-main/modules/annotator/__init__.py | import cv2
import torch
import numpy as np
from einops import rearrange
from torchvision.transforms import Resize, InterpolationMode
from .util import HWC3
from .openpose import OpenposeDetector
from .midas import MidasDetector
from utils import get_new_video_name
from video_utils import prepare_video, create_video
class Video2Canny:
def __init__(self, **kwargs):
print("Initializing Video2Canny")
def pre_process_canny(self, input_video, low_threshold=100, high_threshold=200):
detected_maps = []
for frame in input_video:
img = rearrange(frame, "c h w -> h w c").cpu().numpy().astype(np.uint8)
detected_map = cv2.Canny(img, low_threshold, high_threshold)
detected_map = HWC3(detected_map)
detected_maps.append(detected_map[None])
detected_maps = np.concatenate(detected_maps)
control = torch.from_numpy(detected_maps.copy()).float() / 255.0
return rearrange(control, "f h w c -> f c h w")
def inference(self, inputs):
vid_path = inputs
video, fps = prepare_video(vid_path, resolution=512, device="cpu")
vid_canny = self.pre_process_canny(video)
canny_to_save = list(
rearrange(vid_canny, "f c w h -> f w h c").cpu().detach().numpy()
)
out_path = get_new_video_name(vid_path, "edge")
return create_video(canny_to_save, fps, out_path)
class Video2Pose:
def __init__(self, device, dtype=torch.float16):
print("Initializing Video2Pose")
self.device = device
self.dtype = dtype
self.detector = OpenposeDetector(device=device)
def pre_process_pose(self, input_video, apply_pose_detect: bool = True):
detected_maps = []
for frame in input_video:
img = rearrange(frame, "c h w -> h w c").cpu().numpy().astype(np.uint8)
img = HWC3(img)
if apply_pose_detect:
detected_map, _ = self.detector(img)
else:
detected_map = img
detected_map = HWC3(detected_map)
H, W, C = img.shape
detected_map = cv2.resize(
detected_map, (W, H), interpolation=cv2.INTER_NEAREST
)
detected_maps.append(detected_map[None])
detected_maps = np.concatenate(detected_maps)
control = torch.from_numpy(detected_maps.copy()).float() / 255.0
return rearrange(control, "f h w c -> f c h w")
def inference(self, inputs, resolution=512):
vid_path = inputs
video, fps = prepare_video(
vid_path, resolution=resolution, device=self.device, normalize=False
)
vid_pose = self.pre_process_pose(video)
canny_to_save = list(
rearrange(vid_pose, "f c w h -> f w h c").cpu().detach().numpy()
)
out_path = get_new_video_name(vid_path, "pose")
return create_video(canny_to_save, fps, out_path)
class Video2Depth:
def __init__(self, device, dtype=torch.float16):
print("Initializing Video2Depth")
self.device = device
self.dtype = dtype
self.depth_estimator = MidasDetector(device)
def pre_process_depth(self, input_video, apply_depth_detect: bool = True):
detected_maps = []
for frame in input_video:
img = rearrange(frame, "c h w -> h w c").cpu().numpy().astype(np.uint8)
img = HWC3(img)
if apply_depth_detect:
detected_map, _ = self.depth_estimator(img)
else:
detected_map = img
detected_map = HWC3(detected_map)
H, W, C = img.shape
detected_map = cv2.resize(
detected_map, (W, H), interpolation=cv2.INTER_NEAREST
)
detected_maps.append(detected_map[None])
detected_maps = np.concatenate(detected_maps)
control = torch.from_numpy(detected_maps.copy()).float() / 255.0
return rearrange(control, "f h w c -> f c h w")
def inference(self, inputs, resolution=512):
vid_path = inputs
video, fps = prepare_video(
vid_path,
resolution=resolution,
device=self.device,
dtype=self.dtype,
normalize=False,
)
control = self.pre_process_depth(video).to(self.device).to(self.dtype)
depth_map_to_save = list(
rearrange(control, "f c w h -> f w h c").cpu().detach().numpy()
)
out_path = get_new_video_name(vid_path, "depth")
return create_video(depth_map_to_save, fps, out_path)
| 4,607 | 36.770492 | 84 | py |
dolphin | dolphin-main/modules/annotator/midas/utils.py | """Utils for monoDepth."""
import sys
import re
import numpy as np
import cv2
import torch
def read_pfm(path):
"""Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale)
"""
with open(path, "rb") as file:
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == "PF":
color = True
elif header.decode("ascii") == "Pf":
color = False
else:
raise Exception("Not a PFM file: " + path)
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0:
# little-endian
endian = "<"
scale = -scale
else:
# big-endian
endian = ">"
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n" if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
def read_image(path):
"""Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1)
"""
img = cv2.imread(path)
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
return img
def resize_image(img):
"""Resize image and make it fit for network.
Args:
img (array): image
Returns:
tensor: data ready for network
"""
height_orig = img.shape[0]
width_orig = img.shape[1]
if width_orig > height_orig:
scale = width_orig / 384
else:
scale = height_orig / 384
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
img_resized = (
torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
)
img_resized = img_resized.unsqueeze(0)
return img_resized
def resize_depth(depth, width, height):
"""Resize depth map and bring to CPU (numpy).
Args:
depth (tensor): depth
width (int): image width
height (int): image height
Returns:
array: processed depth
"""
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
depth_resized = cv2.resize(
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
)
return depth_resized
def write_depth(path, depth, bits=1):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth
"""
write_pfm(path + ".pfm", depth.astype(np.float32))
depth_min = depth.min()
depth_max = depth.max()
max_val = (2**(8*bits))-1
if depth_max - depth_min > np.finfo("float").eps:
out = max_val * (depth - depth_min) / (depth_max - depth_min)
else:
out = np.zeros(depth.shape, dtype=depth.type)
if bits == 1:
cv2.imwrite(path + ".png", out.astype("uint8"))
elif bits == 2:
cv2.imwrite(path + ".png", out.astype("uint16"))
return
| 4,582 | 23.121053 | 88 | py |
dolphin | dolphin-main/modules/annotator/midas/api.py | # based on https://github.com/isl-org/MiDaS
import cv2
import os
import torch
import torch.nn as nn
from torchvision.transforms import Compose
from .midas.dpt_depth import DPTDepthModel
from .midas.midas_net import MidasNet
from .midas.midas_net_custom import MidasNet_small
from .midas.transforms import Resize, NormalizeImage, PrepareForNet
from ..util import annotator_ckpts_path
ISL_PATHS = {
"dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"),
"dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"),
"midas_v21": "",
"midas_v21_small": "",
}
remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt"
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def load_midas_transform(model_type):
# https://github.com/isl-org/MiDaS/blob/master/run.py
# load transform only
if model_type == "dpt_large": # DPT-Large
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": # DPT-Hybrid
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21":
net_w, net_h = 384, 384
resize_mode = "upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
elif model_type == "midas_v21_small":
net_w, net_h = 256, 256
resize_mode = "upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
else:
assert (
False
), f"model_type '{model_type}' not implemented, use: --model_type large"
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method=resize_mode,
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
return transform
def load_model(model_type):
# https://github.com/isl-org/MiDaS/blob/master/run.py
# load network
model_path = ISL_PATHS[model_type]
if model_type == "dpt_large": # DPT-Large
model = DPTDepthModel(
path=model_path,
backbone="vitl16_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": # DPT-Hybrid
if not os.path.exists(model_path):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
model = DPTDepthModel(
path=model_path,
backbone="vitb_rn50_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21":
model = MidasNet(model_path, non_negative=True)
net_w, net_h = 384, 384
resize_mode = "upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
elif model_type == "midas_v21_small":
model = MidasNet_small(
model_path,
features=64,
backbone="efficientnet_lite3",
exportable=True,
non_negative=True,
blocks={"expand": True},
)
net_w, net_h = 256, 256
resize_mode = "upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
else:
print(f"model_type '{model_type}' not implemented, use: --model_type large")
assert False
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method=resize_mode,
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
return model.eval(), transform
class MiDaSInference(nn.Module):
MODEL_TYPES_TORCH_HUB = ["DPT_Large", "DPT_Hybrid", "MiDaS_small"]
MODEL_TYPES_ISL = [
"dpt_large",
"dpt_hybrid",
"midas_v21",
"midas_v21_small",
]
def __init__(self, model_type):
super().__init__()
assert model_type in self.MODEL_TYPES_ISL
model, _ = load_model(model_type)
self.model = model
self.model.train = disabled_train
def forward(self, x):
with torch.no_grad():
prediction = self.model(x)
return prediction
| 5,309 | 28.831461 | 124 | py |
dolphin | dolphin-main/modules/annotator/midas/__init__.py | import cv2
import numpy as np
import torch
from einops import rearrange
from .api import MiDaSInference
class MidasDetector:
def __init__(self, device=None):
self.device = device or torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
self.model = MiDaSInference(model_type="dpt_hybrid").to(self.device)
def __call__(self, input_image, a=np.pi * 2.0, bg_th=0.1):
assert input_image.ndim == 3
image_depth = input_image
with torch.no_grad():
image_depth = torch.from_numpy(image_depth).float().to(self.device)
image_depth = image_depth / 127.5 - 1.0
image_depth = rearrange(image_depth, "h w c -> 1 c h w")
depth = self.model(image_depth)[0]
depth_pt = depth.clone()
depth_pt -= torch.min(depth_pt)
depth_pt /= torch.max(depth_pt)
depth_pt = depth_pt.cpu().numpy()
depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8)
depth_np = depth.cpu().numpy()
x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3)
y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3)
z = np.ones_like(x) * a
x[depth_pt < bg_th] = 0
y[depth_pt < bg_th] = 0
normal = np.stack([x, y, z], axis=2)
normal /= np.sum(normal**2.0, axis=2, keepdims=True) ** 0.5
normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
return depth_image, normal_image
| 1,544 | 35.785714 | 81 | py |
dolphin | dolphin-main/modules/annotator/midas/midas/base_model.py | import torch
class BaseModel(torch.nn.Module):
def load(self, path):
"""Load model from file.
Args:
path (str): file path
"""
parameters = torch.load(path, map_location=torch.device('cpu'))
if "optimizer" in parameters:
parameters = parameters["model"]
self.load_state_dict(parameters)
| 367 | 20.647059 | 71 | py |
dolphin | dolphin-main/modules/annotator/midas/midas/midas_net.py | """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
This file contains code that is adapted from
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
"""
import torch
import torch.nn as nn
from .base_model import BaseModel
from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
class MidasNet(BaseModel):
"""Network for monocular depth estimation.
"""
def __init__(self, path=None, features=256, non_negative=True):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print("Loading weights: ", path)
super(MidasNet, self).__init__()
use_pretrained = False if path is None else True
self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
self.scratch.refinenet4 = FeatureFusionBlock(features)
self.scratch.refinenet3 = FeatureFusionBlock(features)
self.scratch.refinenet2 = FeatureFusionBlock(features)
self.scratch.refinenet1 = FeatureFusionBlock(features)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode="bilinear"),
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
)
if path:
self.load(path)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
| 2,709 | 34.194805 | 130 | py |
dolphin | dolphin-main/modules/annotator/midas/midas/vit.py | import torch
import torch.nn as nn
import timm
import types
import math
import torch.nn.functional as F
class Slice(nn.Module):
def __init__(self, start_index=1):
super(Slice, self).__init__()
self.start_index = start_index
def forward(self, x):
return x[:, self.start_index :]
class AddReadout(nn.Module):
def __init__(self, start_index=1):
super(AddReadout, self).__init__()
self.start_index = start_index
def forward(self, x):
if self.start_index == 2:
readout = (x[:, 0] + x[:, 1]) / 2
else:
readout = x[:, 0]
return x[:, self.start_index :] + readout.unsqueeze(1)
class ProjectReadout(nn.Module):
def __init__(self, in_features, start_index=1):
super(ProjectReadout, self).__init__()
self.start_index = start_index
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
def forward(self, x):
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
features = torch.cat((x[:, self.start_index :], readout), -1)
return self.project(features)
class Transpose(nn.Module):
def __init__(self, dim0, dim1):
super(Transpose, self).__init__()
self.dim0 = dim0
self.dim1 = dim1
def forward(self, x):
x = x.transpose(self.dim0, self.dim1)
return x
def forward_vit(pretrained, x):
b, c, h, w = x.shape
glob = pretrained.model.forward_flex(x)
layer_1 = pretrained.activations["1"]
layer_2 = pretrained.activations["2"]
layer_3 = pretrained.activations["3"]
layer_4 = pretrained.activations["4"]
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
unflatten = nn.Sequential(
nn.Unflatten(
2,
torch.Size(
[
h // pretrained.model.patch_size[1],
w // pretrained.model.patch_size[0],
]
),
)
)
if layer_1.ndim == 3:
layer_1 = unflatten(layer_1)
if layer_2.ndim == 3:
layer_2 = unflatten(layer_2)
if layer_3.ndim == 3:
layer_3 = unflatten(layer_3)
if layer_4.ndim == 3:
layer_4 = unflatten(layer_4)
layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
return layer_1, layer_2, layer_3, layer_4
def _resize_pos_embed(self, posemb, gs_h, gs_w):
posemb_tok, posemb_grid = (
posemb[:, : self.start_index],
posemb[0, self.start_index :],
)
gs_old = int(math.sqrt(len(posemb_grid)))
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def forward_flex(self, x):
b, c, h, w = x.shape
pos_embed = self._resize_pos_embed(
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
)
B = x.shape[0]
if hasattr(self.patch_embed, "backbone"):
x = self.patch_embed.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
if getattr(self, "dist_token", None) is not None:
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
else:
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
activations = {}
def get_activation(name):
def hook(model, input, output):
activations[name] = output
return hook
def get_readout_oper(vit_features, features, use_readout, start_index=1):
if use_readout == "ignore":
readout_oper = [Slice(start_index)] * len(features)
elif use_readout == "add":
readout_oper = [AddReadout(start_index)] * len(features)
elif use_readout == "project":
readout_oper = [
ProjectReadout(vit_features, start_index) for out_feat in features
]
else:
assert (
False
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
return readout_oper
def _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
size=[384, 384],
hooks=[2, 5, 8, 11],
vit_features=768,
use_readout="ignore",
start_index=1,
):
pretrained = nn.Module()
pretrained.model = model
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
pretrained.activations = activations
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
# 32, 48, 136, 384
pretrained.act_postprocess1 = nn.Sequential(
readout_oper[0],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[0],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[0],
out_channels=features[0],
kernel_size=4,
stride=4,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess2 = nn.Sequential(
readout_oper[1],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[1],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[1],
out_channels=features[1],
kernel_size=2,
stride=2,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess3 = nn.Sequential(
readout_oper[2],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[2],
kernel_size=1,
stride=1,
padding=0,
),
)
pretrained.act_postprocess4 = nn.Sequential(
readout_oper[3],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[3],
kernel_size=1,
stride=1,
padding=0,
),
nn.Conv2d(
in_channels=features[3],
out_channels=features[3],
kernel_size=3,
stride=2,
padding=1,
),
)
pretrained.model.start_index = start_index
pretrained.model.patch_size = [16, 16]
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
pretrained.model._resize_pos_embed = types.MethodType(
_resize_pos_embed, pretrained.model
)
return pretrained
def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
hooks = [5, 11, 17, 23] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[256, 512, 1024, 1024],
hooks=hooks,
vit_features=1024,
use_readout=use_readout,
)
def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
)
def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
)
def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
model = timm.create_model(
"vit_deit_base_distilled_patch16_384", pretrained=pretrained
)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
hooks=hooks,
use_readout=use_readout,
start_index=2,
)
def _make_vit_b_rn50_backbone(
model,
features=[256, 512, 768, 768],
size=[384, 384],
hooks=[0, 1, 8, 11],
vit_features=768,
use_vit_only=False,
use_readout="ignore",
start_index=1,
):
pretrained = nn.Module()
pretrained.model = model
if use_vit_only == True:
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
else:
pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
get_activation("1")
)
pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
get_activation("2")
)
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
pretrained.activations = activations
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
if use_vit_only == True:
pretrained.act_postprocess1 = nn.Sequential(
readout_oper[0],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[0],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[0],
out_channels=features[0],
kernel_size=4,
stride=4,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess2 = nn.Sequential(
readout_oper[1],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[1],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[1],
out_channels=features[1],
kernel_size=2,
stride=2,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
else:
pretrained.act_postprocess1 = nn.Sequential(
nn.Identity(), nn.Identity(), nn.Identity()
)
pretrained.act_postprocess2 = nn.Sequential(
nn.Identity(), nn.Identity(), nn.Identity()
)
pretrained.act_postprocess3 = nn.Sequential(
readout_oper[2],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[2],
kernel_size=1,
stride=1,
padding=0,
),
)
pretrained.act_postprocess4 = nn.Sequential(
readout_oper[3],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[3],
kernel_size=1,
stride=1,
padding=0,
),
nn.Conv2d(
in_channels=features[3],
out_channels=features[3],
kernel_size=3,
stride=2,
padding=1,
),
)
pretrained.model.start_index = start_index
pretrained.model.patch_size = [16, 16]
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model._resize_pos_embed = types.MethodType(
_resize_pos_embed, pretrained.model
)
return pretrained
def _make_pretrained_vitb_rn50_384(
pretrained, use_readout="ignore", hooks=None, use_vit_only=False
):
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
hooks = [0, 1, 8, 11] if hooks == None else hooks
return _make_vit_b_rn50_backbone(
model,
features=[256, 512, 768, 768],
size=[384, 384],
hooks=hooks,
use_vit_only=use_vit_only,
use_readout=use_readout,
)
| 14,625 | 28.727642 | 96 | py |
dolphin | dolphin-main/modules/annotator/midas/midas/dpt_depth.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_model import BaseModel
from .blocks import (
FeatureFusionBlock,
FeatureFusionBlock_custom,
Interpolate,
_make_encoder,
forward_vit,
)
def _make_fusion_block(features, use_bn):
return FeatureFusionBlock_custom(
features,
nn.ReLU(False),
deconv=False,
bn=use_bn,
expand=False,
align_corners=True,
)
class DPT(BaseModel):
def __init__(
self,
head,
features=256,
backbone="vitb_rn50_384",
readout="project",
channels_last=False,
use_bn=False,
):
super(DPT, self).__init__()
self.channels_last = channels_last
hooks = {
"vitb_rn50_384": [0, 1, 8, 11],
"vitb16_384": [2, 5, 8, 11],
"vitl16_384": [5, 11, 17, 23],
}
# Instantiate backbone and reassemble blocks
self.pretrained, self.scratch = _make_encoder(
backbone,
features,
False, # Set to true of you want to train from scratch, uses ImageNet weights
groups=1,
expand=False,
exportable=False,
hooks=hooks[backbone],
use_readout=readout,
)
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
self.scratch.output_conv = head
def forward(self, x):
if self.channels_last == True:
x.contiguous(memory_format=torch.channels_last)
layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return out
class DPTDepthModel(DPT):
def __init__(self, path=None, non_negative=True, **kwargs):
features = kwargs["features"] if "features" in kwargs else 256
head = nn.Sequential(
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
super().__init__(head, **kwargs)
if path is not None:
self.load(path)
def forward(self, x):
return super().forward(x).squeeze(dim=1)
| 3,154 | 27.681818 | 89 | py |
dolphin | dolphin-main/modules/annotator/midas/midas/midas_net_custom.py | """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
This file contains code that is adapted from
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
"""
import torch
import torch.nn as nn
from .base_model import BaseModel
from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
class MidasNet_small(BaseModel):
"""Network for monocular depth estimation.
"""
def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
blocks={'expand': True}):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print("Loading weights: ", path)
super(MidasNet_small, self).__init__()
use_pretrained = False if path else True
self.channels_last = channels_last
self.blocks = blocks
self.backbone = backbone
self.groups = 1
features1=features
features2=features
features3=features
features4=features
self.expand = False
if "expand" in self.blocks and self.blocks['expand'] == True:
self.expand = True
features1=features
features2=features*2
features3=features*4
features4=features*8
self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
self.scratch.activation = nn.ReLU(False)
self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
Interpolate(scale_factor=2, mode="bilinear"),
nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
self.scratch.activation,
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
if path:
self.load(path)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
if self.channels_last==True:
print("self.channels_last = ", self.channels_last)
x.contiguous(memory_format=torch.channels_last)
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
def fuse_model(m):
prev_previous_type = nn.Identity()
prev_previous_name = ''
previous_type = nn.Identity()
previous_name = ''
for name, module in m.named_modules():
if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
# print("FUSED ", prev_previous_name, previous_name, name)
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
# print("FUSED ", prev_previous_name, previous_name)
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
# elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
# print("FUSED ", previous_name, name)
# torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
prev_previous_type = previous_type
prev_previous_name = previous_name
previous_type = type(module)
previous_name = name | 5,207 | 39.6875 | 168 | py |
dolphin | dolphin-main/modules/annotator/midas/midas/blocks.py | import torch
import torch.nn as nn
from .vit import (
_make_pretrained_vitb_rn50_384,
_make_pretrained_vitl16_384,
_make_pretrained_vitb16_384,
forward_vit,
)
def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
if backbone == "vitl16_384":
pretrained = _make_pretrained_vitl16_384(
use_pretrained, hooks=hooks, use_readout=use_readout
)
scratch = _make_scratch(
[256, 512, 1024, 1024], features, groups=groups, expand=expand
) # ViT-L/16 - 85.0% Top1 (backbone)
elif backbone == "vitb_rn50_384":
pretrained = _make_pretrained_vitb_rn50_384(
use_pretrained,
hooks=hooks,
use_vit_only=use_vit_only,
use_readout=use_readout,
)
scratch = _make_scratch(
[256, 512, 768, 768], features, groups=groups, expand=expand
) # ViT-H/16 - 85.0% Top1 (backbone)
elif backbone == "vitb16_384":
pretrained = _make_pretrained_vitb16_384(
use_pretrained, hooks=hooks, use_readout=use_readout
)
scratch = _make_scratch(
[96, 192, 384, 768], features, groups=groups, expand=expand
) # ViT-B/16 - 84.6% Top1 (backbone)
elif backbone == "resnext101_wsl":
pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
elif backbone == "efficientnet_lite3":
pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
else:
print(f"Backbone '{backbone}' not implemented")
assert False
return pretrained, scratch
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
scratch = nn.Module()
out_shape1 = out_shape
out_shape2 = out_shape
out_shape3 = out_shape
out_shape4 = out_shape
if expand==True:
out_shape1 = out_shape
out_shape2 = out_shape*2
out_shape3 = out_shape*4
out_shape4 = out_shape*8
scratch.layer1_rn = nn.Conv2d(
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
)
scratch.layer2_rn = nn.Conv2d(
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
)
scratch.layer3_rn = nn.Conv2d(
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
)
scratch.layer4_rn = nn.Conv2d(
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
)
return scratch
def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
efficientnet = torch.hub.load(
"rwightman/gen-efficientnet-pytorch",
"tf_efficientnet_lite3",
pretrained=use_pretrained,
exportable=exportable
)
return _make_efficientnet_backbone(efficientnet)
def _make_efficientnet_backbone(effnet):
pretrained = nn.Module()
pretrained.layer1 = nn.Sequential(
effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
)
pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
return pretrained
def _make_resnet_backbone(resnet):
pretrained = nn.Module()
pretrained.layer1 = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
)
pretrained.layer2 = resnet.layer2
pretrained.layer3 = resnet.layer3
pretrained.layer4 = resnet.layer4
return pretrained
def _make_pretrained_resnext101_wsl(use_pretrained):
resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
return _make_resnet_backbone(resnet)
class Interpolate(nn.Module):
"""Interpolation module.
"""
def __init__(self, scale_factor, mode, align_corners=False):
"""Init.
Args:
scale_factor (float): scaling
mode (str): interpolation mode
"""
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
"""
x = self.interp(
x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
)
return x
class ResidualConvUnit(nn.Module):
"""Residual convolution module.
"""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.conv1 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True
)
self.conv2 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
return out + x
class FeatureFusionBlock(nn.Module):
"""Feature fusion block.
"""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock, self).__init__()
self.resConfUnit1 = ResidualConvUnit(features)
self.resConfUnit2 = ResidualConvUnit(features)
def forward(self, *xs):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
output += self.resConfUnit1(xs[1])
output = self.resConfUnit2(output)
output = nn.functional.interpolate(
output, scale_factor=2, mode="bilinear", align_corners=True
)
return output
class ResidualConvUnit_custom(nn.Module):
"""Residual convolution module.
"""
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups=1
self.conv1 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
)
self.conv2 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
)
if self.bn==True:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn==True:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn==True:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x)
# return out + x
class FeatureFusionBlock_custom(nn.Module):
"""Feature fusion block.
"""
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock_custom, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups=1
self.expand = expand
out_features = features
if self.expand==True:
out_features = features//2
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, *xs):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
res = self.resConfUnit1(xs[1])
output = self.skip_add.add(output, res)
# output += res
output = self.resConfUnit2(output)
output = nn.functional.interpolate(
output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
)
output = self.out_conv(output)
return output
| 9,242 | 25.947522 | 150 | py |
dolphin | dolphin-main/modules/annotator/openpose/hand.py | import cv2
import json
import numpy as np
import math
import time
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
import matplotlib
import torch
from skimage.measure import label
from .model import handpose_model
from . import util
class Hand(object):
def __init__(self, model_path, device=None):
self.device = device or torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
self.model = handpose_model().to(self.device)
model_dict = util.transfer(self.model, torch.load(model_path))
self.model.load_state_dict(model_dict)
self.model.eval()
def __call__(self, oriImg):
scale_search = [0.5, 1.0, 1.5, 2.0]
# scale_search = [0.5]
boxsize = 368
stride = 8
padValue = 128
thre = 0.05
multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22))
# paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
for m in range(len(multiplier)):
scale = multiplier[m]
imageToTest = cv2.resize(
oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC
)
imageToTest_padded, pad = util.padRightDownCorner(
imageToTest, stride, padValue
)
im = (
np.transpose(
np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)
)
/ 256
- 0.5
)
im = np.ascontiguousarray(im)
data = torch.from_numpy(im).float().to(self.device)
# data = data.permute([2, 0, 1]).unsqueeze(0).float()
with torch.no_grad():
output = self.model(data).cpu().numpy()
# output = self.model(data).numpy()q
# extract outputs, resize, and remove padding
heatmap = np.transpose(
np.squeeze(output), (1, 2, 0)
) # output 1 is heatmaps
heatmap = cv2.resize(
heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC
)
heatmap = heatmap[
: imageToTest_padded.shape[0] - pad[2],
: imageToTest_padded.shape[1] - pad[3],
:,
]
heatmap = cv2.resize(
heatmap,
(oriImg.shape[1], oriImg.shape[0]),
interpolation=cv2.INTER_CUBIC,
)
heatmap_avg += heatmap / len(multiplier)
all_peaks = []
for part in range(21):
map_ori = heatmap_avg[:, :, part]
one_heatmap = gaussian_filter(map_ori, sigma=3)
binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8)
# 全部小于阈值
if np.sum(binary) == 0:
all_peaks.append([0, 0])
continue
label_img, label_numbers = label(
binary, return_num=True, connectivity=binary.ndim
)
max_index = (
np.argmax(
[
np.sum(map_ori[label_img == i])
for i in range(1, label_numbers + 1)
]
)
+ 1
)
label_img[label_img != max_index] = 0
map_ori[label_img == 0] = 0
y, x = util.npmax(map_ori)
all_peaks.append([x, y])
return np.array(all_peaks)
| 3,588 | 32.542056 | 85 | py |
dolphin | dolphin-main/modules/annotator/openpose/model.py | import torch
from collections import OrderedDict
import torch
import torch.nn as nn
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if "pool" in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(
in_channels=v[0],
out_channels=v[1],
kernel_size=v[2],
stride=v[3],
padding=v[4],
)
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(("relu_" + layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers))
class bodypose_model(nn.Module):
def __init__(self):
super(bodypose_model, self).__init__()
# these layers have no relu layer
no_relu_layers = [
"conv5_5_CPM_L1",
"conv5_5_CPM_L2",
"Mconv7_stage2_L1",
"Mconv7_stage2_L2",
"Mconv7_stage3_L1",
"Mconv7_stage3_L2",
"Mconv7_stage4_L1",
"Mconv7_stage4_L2",
"Mconv7_stage5_L1",
"Mconv7_stage5_L2",
"Mconv7_stage6_L1",
"Mconv7_stage6_L1",
]
blocks = {}
block0 = OrderedDict(
[
("conv1_1", [3, 64, 3, 1, 1]),
("conv1_2", [64, 64, 3, 1, 1]),
("pool1_stage1", [2, 2, 0]),
("conv2_1", [64, 128, 3, 1, 1]),
("conv2_2", [128, 128, 3, 1, 1]),
("pool2_stage1", [2, 2, 0]),
("conv3_1", [128, 256, 3, 1, 1]),
("conv3_2", [256, 256, 3, 1, 1]),
("conv3_3", [256, 256, 3, 1, 1]),
("conv3_4", [256, 256, 3, 1, 1]),
("pool3_stage1", [2, 2, 0]),
("conv4_1", [256, 512, 3, 1, 1]),
("conv4_2", [512, 512, 3, 1, 1]),
("conv4_3_CPM", [512, 256, 3, 1, 1]),
("conv4_4_CPM", [256, 128, 3, 1, 1]),
]
)
# Stage 1
block1_1 = OrderedDict(
[
("conv5_1_CPM_L1", [128, 128, 3, 1, 1]),
("conv5_2_CPM_L1", [128, 128, 3, 1, 1]),
("conv5_3_CPM_L1", [128, 128, 3, 1, 1]),
("conv5_4_CPM_L1", [128, 512, 1, 1, 0]),
("conv5_5_CPM_L1", [512, 38, 1, 1, 0]),
]
)
block1_2 = OrderedDict(
[
("conv5_1_CPM_L2", [128, 128, 3, 1, 1]),
("conv5_2_CPM_L2", [128, 128, 3, 1, 1]),
("conv5_3_CPM_L2", [128, 128, 3, 1, 1]),
("conv5_4_CPM_L2", [128, 512, 1, 1, 0]),
("conv5_5_CPM_L2", [512, 19, 1, 1, 0]),
]
)
blocks["block1_1"] = block1_1
blocks["block1_2"] = block1_2
self.model0 = make_layers(block0, no_relu_layers)
# Stages 2 - 6
for i in range(2, 7):
blocks["block%d_1" % i] = OrderedDict(
[
("Mconv1_stage%d_L1" % i, [185, 128, 7, 1, 3]),
("Mconv2_stage%d_L1" % i, [128, 128, 7, 1, 3]),
("Mconv3_stage%d_L1" % i, [128, 128, 7, 1, 3]),
("Mconv4_stage%d_L1" % i, [128, 128, 7, 1, 3]),
("Mconv5_stage%d_L1" % i, [128, 128, 7, 1, 3]),
("Mconv6_stage%d_L1" % i, [128, 128, 1, 1, 0]),
("Mconv7_stage%d_L1" % i, [128, 38, 1, 1, 0]),
]
)
blocks["block%d_2" % i] = OrderedDict(
[
("Mconv1_stage%d_L2" % i, [185, 128, 7, 1, 3]),
("Mconv2_stage%d_L2" % i, [128, 128, 7, 1, 3]),
("Mconv3_stage%d_L2" % i, [128, 128, 7, 1, 3]),
("Mconv4_stage%d_L2" % i, [128, 128, 7, 1, 3]),
("Mconv5_stage%d_L2" % i, [128, 128, 7, 1, 3]),
("Mconv6_stage%d_L2" % i, [128, 128, 1, 1, 0]),
("Mconv7_stage%d_L2" % i, [128, 19, 1, 1, 0]),
]
)
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks["block1_1"]
self.model2_1 = blocks["block2_1"]
self.model3_1 = blocks["block3_1"]
self.model4_1 = blocks["block4_1"]
self.model5_1 = blocks["block5_1"]
self.model6_1 = blocks["block6_1"]
self.model1_2 = blocks["block1_2"]
self.model2_2 = blocks["block2_2"]
self.model3_2 = blocks["block3_2"]
self.model4_2 = blocks["block4_2"]
self.model5_2 = blocks["block5_2"]
self.model6_2 = blocks["block6_2"]
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1, out1_2, out1], 1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1, out2_2, out1], 1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1, out3_2, out1], 1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1, out4_2, out1], 1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1, out5_2, out1], 1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1, out6_2
class handpose_model(nn.Module):
def __init__(self):
super(handpose_model, self).__init__()
# these layers have no relu layer
no_relu_layers = [
"conv6_2_CPM",
"Mconv7_stage2",
"Mconv7_stage3",
"Mconv7_stage4",
"Mconv7_stage5",
"Mconv7_stage6",
]
# stage 1
block1_0 = OrderedDict(
[
("conv1_1", [3, 64, 3, 1, 1]),
("conv1_2", [64, 64, 3, 1, 1]),
("pool1_stage1", [2, 2, 0]),
("conv2_1", [64, 128, 3, 1, 1]),
("conv2_2", [128, 128, 3, 1, 1]),
("pool2_stage1", [2, 2, 0]),
("conv3_1", [128, 256, 3, 1, 1]),
("conv3_2", [256, 256, 3, 1, 1]),
("conv3_3", [256, 256, 3, 1, 1]),
("conv3_4", [256, 256, 3, 1, 1]),
("pool3_stage1", [2, 2, 0]),
("conv4_1", [256, 512, 3, 1, 1]),
("conv4_2", [512, 512, 3, 1, 1]),
("conv4_3", [512, 512, 3, 1, 1]),
("conv4_4", [512, 512, 3, 1, 1]),
("conv5_1", [512, 512, 3, 1, 1]),
("conv5_2", [512, 512, 3, 1, 1]),
("conv5_3_CPM", [512, 128, 3, 1, 1]),
]
)
block1_1 = OrderedDict(
[("conv6_1_CPM", [128, 512, 1, 1, 0]), ("conv6_2_CPM", [512, 22, 1, 1, 0])]
)
blocks = {}
blocks["block1_0"] = block1_0
blocks["block1_1"] = block1_1
# stage 2-6
for i in range(2, 7):
blocks["block%d" % i] = OrderedDict(
[
("Mconv1_stage%d" % i, [150, 128, 7, 1, 3]),
("Mconv2_stage%d" % i, [128, 128, 7, 1, 3]),
("Mconv3_stage%d" % i, [128, 128, 7, 1, 3]),
("Mconv4_stage%d" % i, [128, 128, 7, 1, 3]),
("Mconv5_stage%d" % i, [128, 128, 7, 1, 3]),
("Mconv6_stage%d" % i, [128, 128, 1, 1, 0]),
("Mconv7_stage%d" % i, [128, 22, 1, 1, 0]),
]
)
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_0 = blocks["block1_0"]
self.model1_1 = blocks["block1_1"]
self.model2 = blocks["block2"]
self.model3 = blocks["block3"]
self.model4 = blocks["block4"]
self.model5 = blocks["block5"]
self.model6 = blocks["block6"]
def forward(self, x):
out1_0 = self.model1_0(x)
out1_1 = self.model1_1(out1_0)
concat_stage2 = torch.cat([out1_1, out1_0], 1)
out_stage2 = self.model2(concat_stage2)
concat_stage3 = torch.cat([out_stage2, out1_0], 1)
out_stage3 = self.model3(concat_stage3)
concat_stage4 = torch.cat([out_stage3, out1_0], 1)
out_stage4 = self.model4(concat_stage4)
concat_stage5 = torch.cat([out_stage4, out1_0], 1)
out_stage5 = self.model5(concat_stage5)
concat_stage6 = torch.cat([out_stage5, out1_0], 1)
out_stage6 = self.model6(concat_stage6)
return out_stage6
| 8,853 | 34.416 | 87 | py |
dolphin | dolphin-main/modules/annotator/openpose/util.py | import math
import numpy as np
import matplotlib
import cv2
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
# transfer caffe model to pytorch which will match the layer name
def transfer(model, model_weights):
transfered_model_weights = {}
for weights_name in model.state_dict().keys():
transfered_model_weights[weights_name] = model_weights[
".".join(weights_name.split(".")[1:])
]
return transfered_model_weights
# draw the body keypoint and lims
def draw_bodypose(canvas, candidate, subset):
stickwidth = 4
limbSeq = [
[2, 3],
[2, 6],
[3, 4],
[4, 5],
[6, 7],
[7, 8],
[2, 9],
[9, 10],
[10, 11],
[2, 12],
[12, 13],
[13, 14],
[2, 1],
[1, 15],
[15, 17],
[1, 16],
[16, 18],
[3, 17],
[6, 18],
]
colors = [
[255, 0, 0],
[255, 85, 0],
[255, 170, 0],
[255, 255, 0],
[170, 255, 0],
[85, 255, 0],
[0, 255, 0],
[0, 255, 85],
[0, 255, 170],
[0, 255, 255],
[0, 170, 255],
[0, 85, 255],
[0, 0, 255],
[85, 0, 255],
[170, 0, 255],
[255, 0, 255],
[255, 0, 170],
[255, 0, 85],
]
for i in range(18):
for n in range(len(subset)):
index = int(subset[n][i])
if index == -1:
continue
x, y = candidate[index][0:2]
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
for i in range(17):
for n in range(len(subset)):
index = subset[n][np.array(limbSeq[i]) - 1]
if -1 in index:
continue
cur_canvas = canvas.copy()
Y = candidate[index.astype(int), 0]
X = candidate[index.astype(int), 1]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly(
(int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1
)
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
# plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
# plt.imshow(canvas[:, :, [2, 1, 0]])
return canvas
# image drawed by opencv is not good.
def draw_handpose(canvas, all_hand_peaks, show_number=False):
edges = [
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[0, 5],
[5, 6],
[6, 7],
[7, 8],
[0, 9],
[9, 10],
[10, 11],
[11, 12],
[0, 13],
[13, 14],
[14, 15],
[15, 16],
[0, 17],
[17, 18],
[18, 19],
[19, 20],
]
for peaks in all_hand_peaks:
for ie, e in enumerate(edges):
if np.sum(np.all(peaks[e], axis=1) == 0) == 0:
x1, y1 = peaks[e[0]]
x2, y2 = peaks[e[1]]
cv2.line(
canvas,
(x1, y1),
(x2, y2),
matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0])
* 255,
thickness=2,
)
for i, keyponit in enumerate(peaks):
x, y = keyponit
cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
if show_number:
cv2.putText(
canvas,
str(i),
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
0.3,
(0, 0, 0),
lineType=cv2.LINE_AA,
)
return canvas
# detect hand according to body pose keypoints
# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
def handDetect(candidate, subset, oriImg):
# right hand: wrist 4, elbow 3, shoulder 2
# left hand: wrist 7, elbow 6, shoulder 5
ratioWristElbow = 0.33
detect_result = []
image_height, image_width = oriImg.shape[0:2]
for person in subset.astype(int):
# if any of three not detected
has_left = np.sum(person[[5, 6, 7]] == -1) == 0
has_right = np.sum(person[[2, 3, 4]] == -1) == 0
if not (has_left or has_right):
continue
hands = []
# left hand
if has_left:
left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
x1, y1 = candidate[left_shoulder_index][:2]
x2, y2 = candidate[left_elbow_index][:2]
x3, y3 = candidate[left_wrist_index][:2]
hands.append([x1, y1, x2, y2, x3, y3, True])
# right hand
if has_right:
right_shoulder_index, right_elbow_index, right_wrist_index = person[
[2, 3, 4]
]
x1, y1 = candidate[right_shoulder_index][:2]
x2, y2 = candidate[right_elbow_index][:2]
x3, y3 = candidate[right_wrist_index][:2]
hands.append([x1, y1, x2, y2, x3, y3, False])
for x1, y1, x2, y2, x3, y3, is_left in hands:
# pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
# handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
# handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
# const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
# const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
# handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
x = x3 + ratioWristElbow * (x3 - x2)
y = y3 + ratioWristElbow * (y3 - y2)
distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
# x-y refers to the center --> offset to topLeft point
# handRectangle.x -= handRectangle.width / 2.f;
# handRectangle.y -= handRectangle.height / 2.f;
x -= width / 2
y -= width / 2 # width = height
# overflow the image
if x < 0:
x = 0
if y < 0:
y = 0
width1 = width
width2 = width
if x + width > image_width:
width1 = image_width - x
if y + width > image_height:
width2 = image_height - y
width = min(width1, width2)
# the max hand box value is 20 pixels
if width >= 20:
detect_result.append([int(x), int(y), int(width), is_left])
"""
return value: [[x, y, w, True if left hand else False]].
width=height since the network require squared input.
x, y is the coordinate of top left
"""
return detect_result
# get max index of 2d array
def npmax(array):
arrayindex = array.argmax(1)
arrayvalue = array.max(1)
i = arrayvalue.argmax()
j = arrayindex[i]
return i, j
| 8,372 | 32.626506 | 121 | py |
dolphin | dolphin-main/modules/annotator/openpose/__init__.py | import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import torch
import numpy as np
from . import util
from .body import Body
from .hand import Hand
from ..util import annotator_ckpts_path
body_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth"
hand_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/hand_pose_model.pth"
class OpenposeDetector:
def __init__(self, device=None):
body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth")
hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth")
if not os.path.exists(hand_modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(body_model_path, model_dir=annotator_ckpts_path)
load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path)
device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.body_estimation = Body(body_modelpath, device)
self.hand_estimation = Hand(hand_modelpath, device)
def __call__(self, oriImg, hand=False):
oriImg = oriImg[:, :, ::-1].copy()
with torch.no_grad():
candidate, subset = self.body_estimation(oriImg)
canvas = np.zeros_like(oriImg)
canvas = util.draw_bodypose(canvas, candidate, subset)
if hand:
hands_list = util.handDetect(candidate, subset, oriImg)
all_hand_peaks = []
for x, y, w, is_left in hands_list:
peaks = self.hand_estimation(oriImg[y : y + w, x : x + w, :])
peaks[:, 0] = np.where(
peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x
)
peaks[:, 1] = np.where(
peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y
)
all_hand_peaks.append(peaks)
canvas = util.draw_handpose(canvas, all_hand_peaks)
return canvas, dict(candidate=candidate.tolist(), subset=subset.tolist())
| 2,170 | 40.75 | 113 | py |
dolphin | dolphin-main/modules/annotator/openpose/body.py | import cv2
import numpy as np
import math
from scipy.ndimage.filters import gaussian_filter
import torch
from torchvision import transforms
from . import util
from .model import bodypose_model
class Body(object):
def __init__(self, model_path, device=None):
self.device = device or torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
self.model = bodypose_model().to(self.device)
model_dict = util.transfer(self.model, torch.load(model_path))
self.model.load_state_dict(model_dict)
self.model.eval()
def __call__(self, oriImg):
# scale_search = [0.5, 1.0, 1.5, 2.0]
scale_search = [0.5]
boxsize = 368
stride = 8
padValue = 128
thre1 = 0.1
thre2 = 0.05
multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
for m in range(len(multiplier)):
scale = multiplier[m]
imageToTest = cv2.resize(
oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC
)
imageToTest_padded, pad = util.padRightDownCorner(
imageToTest, stride, padValue
)
im = (
np.transpose(
np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)
)
/ 256
- 0.5
)
im = np.ascontiguousarray(im)
data = torch.from_numpy(im).float().to(self.device)
# data = data.permute([2, 0, 1]).unsqueeze(0).float()
with torch.no_grad():
Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
# extract outputs, resize, and remove padding
# heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
heatmap = np.transpose(
np.squeeze(Mconv7_stage6_L2), (1, 2, 0)
) # output 1 is heatmaps
heatmap = cv2.resize(
heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC
)
heatmap = heatmap[
: imageToTest_padded.shape[0] - pad[2],
: imageToTest_padded.shape[1] - pad[3],
:,
]
heatmap = cv2.resize(
heatmap,
(oriImg.shape[1], oriImg.shape[0]),
interpolation=cv2.INTER_CUBIC,
)
# paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
paf = np.transpose(
np.squeeze(Mconv7_stage6_L1), (1, 2, 0)
) # output 0 is PAFs
paf = cv2.resize(
paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC
)
paf = paf[
: imageToTest_padded.shape[0] - pad[2],
: imageToTest_padded.shape[1] - pad[3],
:,
]
paf = cv2.resize(
paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC
)
heatmap_avg += heatmap_avg + heatmap / len(multiplier)
paf_avg += +paf / len(multiplier)
all_peaks = []
peak_counter = 0
for part in range(18):
map_ori = heatmap_avg[:, :, part]
one_heatmap = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(one_heatmap.shape)
map_left[1:, :] = one_heatmap[:-1, :]
map_right = np.zeros(one_heatmap.shape)
map_right[:-1, :] = one_heatmap[1:, :]
map_up = np.zeros(one_heatmap.shape)
map_up[:, 1:] = one_heatmap[:, :-1]
map_down = np.zeros(one_heatmap.shape)
map_down[:, :-1] = one_heatmap[:, 1:]
peaks_binary = np.logical_and.reduce(
(
one_heatmap >= map_left,
one_heatmap >= map_right,
one_heatmap >= map_up,
one_heatmap >= map_down,
one_heatmap > thre1,
)
)
peaks = list(
zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])
) # note reverse
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
peak_id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [
peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))
]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [
[2, 3],
[2, 6],
[3, 4],
[4, 5],
[6, 7],
[7, 8],
[2, 9],
[9, 10],
[10, 11],
[2, 12],
[12, 13],
[13, 14],
[2, 1],
[1, 15],
[15, 17],
[1, 16],
[16, 18],
[3, 17],
[6, 18],
]
# the middle joints heatmap correpondence
mapIdx = [
[31, 32],
[39, 40],
[33, 34],
[35, 36],
[41, 42],
[43, 44],
[19, 20],
[21, 22],
[23, 24],
[25, 26],
[27, 28],
[29, 30],
[47, 48],
[49, 50],
[53, 54],
[51, 52],
[55, 56],
[37, 38],
[45, 46],
]
connection_all = []
special_k = []
mid_num = 10
for k in range(len(mapIdx)):
score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
candA = all_peaks[limbSeq[k][0] - 1]
candB = all_peaks[limbSeq[k][1] - 1]
nA = len(candA)
nB = len(candB)
indexA, indexB = limbSeq[k]
if nA != 0 and nB != 0:
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
norm = max(0.001, norm)
vec = np.divide(vec, norm)
startend = list(
zip(
np.linspace(candA[i][0], candB[j][0], num=mid_num),
np.linspace(candA[i][1], candB[j][1], num=mid_num),
)
)
vec_x = np.array(
[
score_mid[
int(round(startend[I][1])),
int(round(startend[I][0])),
0,
]
for I in range(len(startend))
]
)
vec_y = np.array(
[
score_mid[
int(round(startend[I][1])),
int(round(startend[I][0])),
1,
]
for I in range(len(startend))
]
)
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(
vec_y, vec[1]
)
score_with_dist_prior = sum(score_midpts) / len(
score_midpts
) + min(0.5 * oriImg.shape[0] / norm - 1, 0)
criterion1 = len(
np.nonzero(score_midpts > thre2)[0]
) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append(
[
i,
j,
score_with_dist_prior,
score_with_dist_prior + candA[i][2] + candB[j][2],
]
)
connection_candidate = sorted(
connection_candidate, key=lambda x: x[2], reverse=True
)
connection = np.zeros((0, 5))
for c in range(len(connection_candidate)):
i, j, s = connection_candidate[c][0:3]
if i not in connection[:, 3] and j not in connection[:, 4]:
connection = np.vstack(
[connection, [candA[i][3], candB[j][3], s, i, j]]
)
if len(connection) >= min(nA, nB):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, 20))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limbSeq[k]) - 1
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): # 1:size(subset,1):
if (
subset[j][indexA] == partAs[i]
or subset[j][indexB] == partBs[i]
):
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB] != partBs[i]:
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += (
candidate[partBs[i].astype(int), 2]
+ connection_all[k][i][2]
)
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = (
(subset[j1] >= 0).astype(int)
+ (subset[j2] >= 0).astype(int)
)[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
subset[j1][:-2] += subset[j2][:-2] + 1
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += (
candidate[partBs[i].astype(int), 2]
+ connection_all[k][i][2]
)
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = (
sum(candidate[connection_all[k][i, :2].astype(int), 2])
+ connection_all[k][i][2]
)
subset = np.vstack([subset, row])
# delete some rows of subset which has few parts occur
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
# subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
# candidate: x, y, score, id
return candidate, subset
| 13,127 | 38.18806 | 123 | py |
dolphin | dolphin-main/modules/mplug/get_video_caption.py | import ruamel.yaml as yaml
import numpy as np
import torch
import torch.nn as nn
from .models.model_caption_mplug_vatex import MPLUG
from .models.vit import interpolate_pos_embed, resize_pos_embed
from .models.tokenization_bert import BertTokenizer
from decord import VideoReader
import decord
import os
config_path = os.path.join("model_zoo", "mplug", "videocap_vatex_mplug_large.yaml")
mplug_pth_path = os.path.join("model_zoo", "mplug", "mplug_large.pth")
config = yaml.load(open(config_path, "r"), Loader=yaml.Loader)
def prepare_model(device):
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = MPLUG(config=config, tokenizer=tokenizer)
model = model.to(device)
assert os.path.exists(
mplug_pth_path
), "Please download mplug_large.pth checkpoint from https://alice-open.oss-cn-zhangjiakou.aliyuncs.com/mPLUG/mplug_large.pth and put it in ./model_zoo/mplug/"
checkpoint = torch.load(mplug_pth_path, map_location=device)
try:
state_dict = checkpoint["model"]
except:
state_dict = checkpoint["module"]
if config["clip_name"] == "ViT-B-16":
num_patches = int(config["image_res"] * config["image_res"] / (16 * 16))
elif config["clip_name"] == "ViT-L-14":
num_patches = int(config["image_res"] * config["image_res"] / (14 * 14))
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())
pos_embed = resize_pos_embed(
state_dict["visual_encoder.visual.positional_embedding"].unsqueeze(0),
pos_embed.unsqueeze(0),
)
state_dict["visual_encoder.visual.positional_embedding"] = pos_embed
for key in list(state_dict.keys()):
if ("fusion" in key or "bert" in key) and "decode" not in key:
encoder_key = key.replace("fusion.", "").replace("bert.", "")
state_dict[encoder_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict, strict=False)
model.eval()
return model, tokenizer
def pipeline(video_path, model, tokenizer, device):
video = load_video_from_path_decord(
video_path, config["image_res"], config["image_res"], config["num_frm_test"]
).to(device)
if config["prompt"] != "":
caption = [config["prompt"] + config["eos"]] * video.size(0)
caption = tokenizer(
caption,
padding="longest",
truncation=True,
max_length=25,
return_tensors="pt",
).to(device)
else:
caption = None
topk_ids, topk_probs = model(video, caption, None, train=False)
for topk_id, topk_prob in zip(topk_ids, topk_probs):
ans = (
tokenizer.decode(topk_id[0])
.replace("[SEP]", "")
.replace("[CLS]", "")
.replace("[PAD]", "")
.strip()
)
ans += " ."
return ans
def load_video_from_path_decord(
video_path,
height=None,
width=None,
num_frame=12,
start_time=None,
end_time=None,
fps=-1,
):
decord.bridge.set_bridge("torch")
if not height or not width:
vr = VideoReader(video_path)
else:
vr = VideoReader(video_path, width=width, height=height)
vlen = len(vr)
if start_time or end_time:
assert fps > 0, "must provide video fps if specifying start and end time."
start_idx = min(int(start_time * fps), vlen)
end_idx = min(int(end_time * fps), vlen)
else:
start_idx, end_idx = 0, vlen
frame_index = np.arange(start_idx, end_idx, vlen / num_frame, dtype=int)
raw_sample_frms = vr.get_batch(frame_index)
raw_sample_frms = raw_sample_frms.permute(0, 3, 1, 2).float().unsqueeze(0)
return raw_sample_frms
| 3,736 | 31.495652 | 162 | py |
dolphin | dolphin-main/modules/mplug/models/model_caption_mplug_vatex.py | from functools import partial
from .vit import VisionTransformer
from .modeling_mplug import BertConfig, BertModel, BertPrefixModel, FusionModel
from .visual_transformers import initialize_clip
from .predictor import TextGenerator
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
class MPLUG(nn.Module):
def __init__(
self,
text_encoder=None,
text_decoder=None,
tokenizer=None,
config=None,
):
super().__init__()
self.tokenizer = tokenizer
self.module_setting(config)
self.visual_encoder, _ = initialize_clip(config)
# self.text_encoder = BertModel.from_pretrained(config['text_encoder'], config=self.config_encoder, add_pooling_layer=False)
# self.fusion_encoder = FusionModel.from_pretrained(config['text_encoder'], config=self.config_fusion, add_pooling_layer=False)
# self.text_decoder = BertPrefixModel.from_pretrained(config['text_decoder'], config=self.config_decoder)
model_name = config["text_decoder"]
self.text_decoder = BertPrefixModel.from_pretrained(
model_name, config=self.config_decoder
)
self.beam_generator = TextGenerator(config, self.text_decoder)
self.prompt = config["prompt"]
self.prompt_length = len(self.tokenizer(self.prompt).input_ids) - 1
def forward(
self,
image,
quesiton,
answer=None,
train=True,
out_size=5,
scst=False,
device=None,
):
# shiyaya: for video
B, N, C, W, H = image.size()
image = image.view(-1, C, W, H)
image = image.to(device, non_blocking=True)
# end
# if(scst):
# return self.beam_search(image, quesiton, answer, train=True,out_size=out_size)
image_embeds = self.visual_encoder.visual(image, skip_last_layer=True)
if self.large:
image_embeds = self.dropout(
self.visn_layer_norm(self.visn_fc(image_embeds))
)
# # shiyaya: for video, temporal mean
# C = image_embeds.size(-1)
# image_embeds = image_embeds.view(B, N, -1, C).mean(dim=1)
# # end
# shiyaya: for video, concat
C = image_embeds.size(-1)
image_embeds = image_embeds.view(B, N, -1, C).view(B, -1, C)
# end
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
if train:
"""
k: number of answers for each question
weights: weight for each answer
"""
answer_targets = answer.input_ids.masked_fill(
answer.input_ids == self.tokenizer.pad_token_id, -100
)
if self.prompt != "":
answer_targets[:, : self.prompt_length] = -100
answer_output = self.text_decoder(
answer.input_ids,
attention_mask=answer.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
labels=answer_targets,
return_dict=True,
reduction="none",
)
loss = answer_output.loss
return loss
else:
topk_ids, topk_probs = self.generation(image_embeds, image_atts)
return topk_ids, topk_probs
def module_setting(self, config):
self.config_encoder = BertConfig.from_json_file(config["bert_config"])
self.config_encoder.num_hidden_layers = self.config_encoder.text_encoder_layers
self.config_fusion = BertConfig.from_json_file(config["bert_config"])
self.config_decoder = BertConfig.from_json_file(config["bert_config"])
self.config_decoder.add_cross_attention = True
self.config_decoder.num_hidden_layers = self.config_decoder.text_decode_layers
self.large = False
if self.config_encoder.hidden_size != config["vision_width"]:
self.visn_fc = nn.Linear(
config["vision_width"], self.config_encoder.hidden_size
)
self.visn_layer_norm = nn.LayerNorm(
self.config_encoder.hidden_size, eps=1e-12
)
self.dropout = nn.Dropout(self.config_encoder.hidden_dropout_prob)
self.large = True
self.use_checkpoint = (
config["use_checkpoint"] if "use_checkpoint" in config else True
)
def beam_search(self, image, quesiton, answer=None, train=True, out_size=5):
image_embeds = self.visual_encoder.visual(image, skip_last_layer=True)
if self.large:
image_embeds = self.dropout(
self.visn_layer_norm(self.visn_fc(image_embeds))
)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
if self.open_generation:
topk_ids, topk_probs = self.generation(
image_embeds, image_atts, out_size=out_size
)
else:
topk_ids, topk_probs = self.rank_answer(
question_output,
quesiton.attention_mask
if (not self.merge_attention and not self.concat_last_layer)
else merge_text_attention,
answer.input_ids,
answer.attention_mask,
k,
)
return topk_ids, topk_probs
@torch.no_grad()
def copy_params(self):
for model_pair in self.model_pairs:
for param, param_m in zip(
model_pair[0].parameters(), model_pair[1].parameters()
):
param_m.data.copy_(param.data) # initialize
param_m.requires_grad = False # not update by gradient
@torch.no_grad()
def _momentum_update(self):
for model_pair in self.model_pairs:
for param, param_m in zip(
model_pair[0].parameters(), model_pair[1].parameters()
):
param_m.data = param_m.data * self.momentum + param.data * (
1.0 - self.momentum
)
def generation(
self,
question_states,
question_atts,
answer_ids=None,
answer_atts=None,
k=None,
out_size=1,
):
input_ids = None
if self.prompt != "":
prompt = [self.prompt] * question_states.size(0)
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(
question_states.device
)
input_ids = input_ids[:, :-1]
encoder_inputs = [question_states, question_atts, input_ids]
topk_ids, topk_probs = self.beam_generator.translate_batch_scst(
encoder_inputs, out_size=out_size
)
return topk_ids, topk_probs
def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k):
num_ques = question_states.size(0)
start_ids = answer_ids[0, 0].repeat(num_ques, 1) # bos token
start_output = self.text_decoder(
start_ids,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
return_dict=True,
reduction="none",
)
logits = start_output.logits[:, 0, :] # first token's logit
# topk_probs: top-k probability
# topk_ids: [num_question, k]
answer_first_token = answer_ids[:, 1]
prob_first_token = F.softmax(logits, dim=1).index_select(
dim=1, index=answer_first_token
)
topk_probs, topk_ids = prob_first_token.topk(k, dim=1)
# answer input: [num_question*k, answer_len]
input_ids = []
input_atts = []
for b, topk_id in enumerate(topk_ids):
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
input_ids = torch.cat(input_ids, dim=0)
input_atts = torch.cat(input_atts, dim=0)
targets_ids = input_ids.masked_fill(
input_ids == self.tokenizer.pad_token_id, -100
)
# repeat encoder's output for top-k answers
question_states = tile(question_states, 0, k)
question_atts = tile(question_atts, 0, k)
output = self.text_decoder(
input_ids,
attention_mask=input_atts,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
labels=targets_ids,
return_dict=True,
reduction="none",
)
answer_loss = output.loss
answer_loss = answer_loss.view(input_ids.size(0), -1)
# topk_prob: first token probability
topk_probs = topk_probs.view(-1, 1)
log_probs = torch.cat([topk_probs.log(), -answer_loss], dim=1)
# re-calculate log probabilities for the answer sequences using chain rule
log_probs_sum = log_probs.sum(1)
log_probs_sum = log_probs_sum.view(num_ques, k)
topk_probs = F.softmax(log_probs_sum, dim=-1)
# get top-k after re-ranking
topk_probs, rerank_id = topk_probs.topk(k, dim=1)
topk_ids = torch.gather(topk_ids, 1, rerank_id)
return topk_ids, topk_probs
def tile(x, dim, n_tile):
init_dim = x.size(dim)
repeat_idx = [1] * x.dim()
repeat_idx[dim] = n_tile
x = x.repeat(*(repeat_idx))
order_index = torch.LongTensor(
np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])
)
return torch.index_select(x, dim, order_index.to(x.device))
| 9,712 | 35.242537 | 135 | py |
dolphin | dolphin-main/modules/mplug/models/predictor.py | #!/usr/bin/env python
""" Translator Class and builder """
from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
import os
import math
import json
import torch
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0, device='cuda:0'):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"), device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def build_predictor(args, tokenizer, symbols, model, logger=None):
scorer = None #GNMTGlobalScorer(args.alpha, length_penalty='wu')
translator = TextGenerator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)
return translator
class TextGenerator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
cuda (bool): use cuda
beam_trace (bool): trace beam search for debugging
logger(logging.Logger): logger.
"""
def __init__(self,
args,
model,
vocab=None,
symbols=None,
global_scorer=None,
logger=None,
dump_beam=""):
self.alpha = 0.6
self.logger = logger
# self.cuda = args.visible_gpus != '-1'
self.cuda = (torch.cuda.device_count() > 0)
self.args = args
self.model = model
#TODO generator
#self.generator = self.model.generator
self.vocab = vocab
self.symbols = symbols
self.start_token = 101 #['[PAD]']
self.end_token = 102 #'[PAD]']
self.global_scorer = global_scorer
self.beam_size = args['beam_size']
self.min_length = args['min_length']
self.max_length = args['max_length']
self.dump_beam = dump_beam
# for debugging
self.beam_trace = self.dump_beam != ""
self.beam_accum = None
if self.beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def _build_target_tokens(self, pred):
# vocab = self.fields["tgt"].vocab
tokens = []
for tok in pred:
tok = int(tok)
tokens.append(tok)
if tokens[-1] == self.end_token:
tokens = tokens[:-1]
break
tokens = [t for t in tokens if t < len(self.vocab)]
tokens = self.vocab.DecodeIds(tokens).split(' ')
return tokens
def translate_batch(self, encoder_inputs, do_sample=False, out_size=1):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
if do_sample:
return self._fast_translate_batch(encoder_inputs, self.max_length, min_length=self.min_length, do_sample=do_sample,out_size=out_size)
else:
with torch.no_grad():
return self._fast_translate_batch(encoder_inputs, self.max_length, min_length=self.min_length, do_sample=do_sample,out_size=out_size)
def translate_batch_scst(self, encoder_inputs, do_sample=False, out_size=1):
return self._fast_translate_batch(encoder_inputs, self.max_length, min_length=self.min_length, do_sample=do_sample,out_size=out_size)
def _fast_translate_batch(self,
encoder_inputs,
max_length,
min_length=0,
do_sample=False,
out_size=1):
# TODO: faster code path for beam_size == 1.
# TODO: support these blacklisted features.
assert not self.dump_beam
if do_sample:
beam_size = 1
else:
beam_size = self.beam_size
if len(encoder_inputs) == 3:
src_features, padding_mask, input_ids = encoder_inputs
elif len(encoder_inputs) == 2:
src_features, padding_mask = encoder_inputs
input_ids = None
device = src_features.device
# Tile states and memory beam_size times.
# dec_states.map_batch_fn(
# lambda state, dim: tile(state, beam_size, dim=dim))
batch_size = src_features.size(0)
src_features = tile(src_features, beam_size, dim=0)
attention_mask = tile(padding_mask, beam_size, dim=0)
#TODO support p_gen ...
# if self.args.p_gen:
# src = tile(batch.src, beam_size, dim=0)
batch_offset = torch.arange(
batch_size, dtype=torch.long, device=device)
beam_offset = torch.arange(
0,
batch_size * beam_size,
step=beam_size,
dtype=torch.long,
device=device)
if input_ids is not None:
alive_seq = tile(input_ids, beam_size, dim=0)
else:
alive_seq = torch.full(
[batch_size * beam_size, 1],
self.start_token,
dtype=torch.long,
device=device)
# Give full probability to the first beam on the first step.
topk_log_probs = (
torch.tensor([0.0] + [float("-inf")] * (beam_size - 1),
device=device).repeat(batch_size))
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)] # noqa: F812
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["gold_score"] = [0] * batch_size
results["batch"] = []
dec_attn_mask = None
dec_position_ids = None
for step in range(max_length):
dec_feat_seq = self.model(alive_seq,
encoder_hidden_states = src_features,
encoder_attention_mask = attention_mask,
return_dict = True,
reduction = 'none')
dec_feat_seq = dec_feat_seq.logits[:, -1, :]
vocab_size = dec_feat_seq.size(-1)
log_probs = torch.log(torch.softmax(dec_feat_seq.view(-1, vocab_size), dim=-1))
if step < min_length:
log_probs[:, self.end_token] = -1e20
alpha = self.alpha #global_scorer.alpha
if do_sample:
length_penalty = 1.0
else:
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
if do_sample:
_scores = log_probs / self.args.temperature
_scores = top_k_top_p_filtering(
_scores, top_k=self.args.top_k, top_p=self.args.top_p, min_tokens_to_keep=1
) # (batch_size * num_beams, vocab_size)
# Sample 2 next words for each beam (so we have some spare tokens and match output of greedy beam search)
topk_ids = torch.multinomial(F.softmax(_scores, dim=-1), num_samples=1) # (batch_size * num_beams, 2)
# Compute next scores
_scores = F.log_softmax(_scores, dim=1) # (batch_size * num_beams, vocab_size)
_scores += topk_log_probs.view(-1).unsqueeze(1)
topk_scores = torch.gather(_scores, -1, topk_ids) # (batch_size * num_beams, 2)
#log_probs += # (batch_size * num_beams, 2)
# Match shape of greedy beam search
topk_ids = topk_ids.view(-1, beam_size) # (batch_size, 2 * num_beams)
topk_scores = topk_scores.view(-1, beam_size) # (batch_size, 2 * num_beams)
else:
log_probs += topk_log_probs.view(-1).unsqueeze(1)
curr_scores = log_probs / length_penalty
curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)
topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)
topk_log_probs = topk_scores * length_penalty
# Resolve beam origin and true word ids.
# topk_beam_index = topk_ids.div(vocab_size)
topk_beam_index = topk_ids // vocab_size
topk_ids = topk_ids.fmod(vocab_size)
# Map beam_index to batch_index in the flat representation.
batch_index = (
topk_beam_index
+ beam_offset[:topk_beam_index.size(0)].unsqueeze(1))
select_indices = batch_index.view(-1)
# Append last prediction.
alive_seq = torch.cat(
[alive_seq.index_select(0, select_indices),
topk_ids.view(-1, 1)], -1)
is_finished = topk_ids.eq(self.end_token)
if step + 1 == max_length:
is_finished.fill_(1) #self.end_token)
# End condition is top beam is finished.
end_condition = is_finished[:, 0].eq(1) #self.end_token)
# Save finished hypotheses.
if is_finished.any():
predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1) #self.end_token)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
hypotheses[b].append((
topk_scores[i, j],
predictions[i, j, 0:]))
# If the batch reached the end, save the n_best hypotheses.
if end_condition[i]:
best_hyp = sorted(
hypotheses[b], key=lambda x: x[0], reverse=True)
# if self.args.dataset == "qg_ranking_test" or (self.args.dataset == 'paraphrase' and (not self.args.sample_topk)):
# for each in best_hyp[:beam_size]:
# score, pred = each
# results["scores"][b].append(score)
# results["predictions"][b].append(pred)
# else:
# score, pred = best_hyp[0]
# results["scores"][b].append(score)
# results["predictions"][b].append(pred)
for each in best_hyp[:beam_size]:
score, pred = each
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(0).nonzero().view(-1)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
break
# Remove finished batches for the next step.
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished) \
.view(-1, alive_seq.size(-1))
# Reorder states.
select_indices = batch_index.view(-1)
src_features = src_features.index_select(0, select_indices)
attention_mask = attention_mask.index_select(0, select_indices)
pred_ids = []
scores = []
# print (pred_ids, scores)
for each in results["scores"]:
scores.append(each[:out_size])
for each in results["predictions"]:
pred_ids.append(each[:out_size])
return pred_ids,scores
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
pad_token_id,
eos_token_ids,
batch_size,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
assert self.num_keep_best == 1, 'cannot generate >1 sentences in greedy search'
# current position / max lengths / length of generated sentences / unfinished sentences
unfinished_sents = []
cur_unfinished = input_ids.new(batch_size).fill_(1)
# log of scores for each sentence in the batch
logprobs = []
past = None
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, past=past)
outputs = self(**model_inputs)
if cur_len == 1:
token_len = 2 + self.od_labels_len
next_token_idx = 1
else:
assert cur_len > 1
if not self._do_output_past(outputs):
token_len = cur_len + 1 + self.od_labels_len
next_token_idx = cur_len
else:
token_len = 2
next_token_idx = 1
assert outputs[0].shape[1] == token_len
next_token_logits = outputs[0][:, next_token_idx, :]
# if model has past, then set the past variable to speed up decoding
if self._do_output_past(outputs):
past = outputs[1]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
for i in range(batch_size):
for previous_token in set(input_ids[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if next_token_logits[i, previous_token] < 0:
next_token_logits[i, previous_token] *= repetition_penalty
else:
next_token_logits[i, previous_token] /= repetition_penalty
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# Compute scores
_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size, vocab_size)
_scores = torch.gather(_scores, -1, next_token.unsqueeze(-1)) # (batch_size, 1)
logprobs.append(_scores) # (batch_size, 1)
unfinished_sents.append(cur_unfinished)
# update generations and finished sentences
tokens_to_add = next_token * cur_unfinished + pad_token_id * (1 - cur_unfinished)
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
#for t in input_ids:
#print(self.tokenizer.convert_ids_to_tokens(t.tolist()))
for eos_token_id in eos_token_ids:
cur_unfinished = cur_unfinished.mul(tokens_to_add.ne(eos_token_id).long())
cur_len = cur_len + 1
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if cur_unfinished.max() == 0:
break
# add eos_token_ids to unfinished sentences
if cur_len == max_length:
input_ids[:, -1].masked_fill_(cur_unfinished.to(dtype=torch.bool), eos_token_ids[0])
logprobs = torch.cat(logprobs, dim=1)
unfinished_sents = torch.stack(unfinished_sents, dim=1).float()
sum_logprobs = (logprobs * unfinished_sents).sum(dim=1)
# return logprobs to keep consistent with beam search output
logprobs = sum_logprobs / unfinished_sents.sum(dim=1)
# pad to the same length, otherwise DataParallel will give error
pad_len = max_length - input_ids.shape[1]
if pad_len > 0:
padding_ids = input_ids.new(batch_size, pad_len).fill_(pad_token_id)
input_ids = torch.cat([input_ids, padding_ids], dim=1)
# (batch_size, n_best, max_len), (batch_size, n_best)
return input_ids.unsqueeze(1), logprobs.unsqueeze(1)
def top_k_top_p_filtering(logits, top_k=10, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
class Translation(object):
"""
Container for a translated sentence.
Attributes:
src (`LongTensor`): src word ids
src_raw ([str]): raw src words
pred_sents ([[str]]): words from the n-best translations
pred_scores ([[float]]): log-probs of n-best translations
attns ([`FloatTensor`]) : attention dist for each translation
gold_sent ([str]): words from gold translation
gold_score ([float]): log-prob of gold translation
"""
def __init__(self, fname, src, src_raw, pred_sents,
attn, pred_scores, tgt_sent, gold_score):
self.fname = fname
self.src = src
self.src_raw = src_raw
self.pred_sents = pred_sents
self.attns = attn
self.pred_scores = pred_scores
self.gold_sent = tgt_sent
self.gold_score = gold_score
def log(self, sent_number):
"""
Log translation.
"""
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
output += "PRED SCORE: {:.4f}\n".format(best_score)
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += ("GOLD SCORE: {:.4f}\n".format(self.gold_score))
if len(self.pred_sents) > 1:
output += '\nBEST HYP:\n'
for score, sent in zip(self.pred_scores, self.pred_sents):
output += "[{:.4f}] {}\n".format(score, sent)
return output
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
| 21,697 | 40.726923 | 149 | py |
dolphin | dolphin-main/modules/mplug/models/vit.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.vision_transformer import _cfg, PatchEmbed
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_gradients = None
self.attention_map = None
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def forward(self, x, register_hook=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
if register_hook:
self.save_attention_map(attn)
attn.register_hook(self.save_attn_gradients)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, register_hook=False):
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward(self, x, register_blk=-1):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x)
for i,blk in enumerate(self.blocks):
x = blk(x, register_blk==i)
x = self.norm(x)
return x
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.patch_embed.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
if orig_size!=new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return new_pos_embed
else:
return pos_embed_checkpoint
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
#_logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
posemb = torch.squeeze(posemb, dim=0)
return posemb | 9,634 | 41.822222 | 132 | py |
dolphin | dolphin-main/modules/mplug/models/visual_transformers.py | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch.nn.functional as F
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
import numpy as np
from .clip import clip
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
# _logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
orig = posemb_grid.dtype
posemb_grid = F.interpolate(
posemb_grid.float(), size=(gs_new, gs_new), mode="bilinear"
)
posemb_grid = posemb_grid.to(orig)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def initialize_clip(config, num_patches=240):
if config["clip_name"] == "ViT-B-16":
clip_model, preprocess = clip.load("ViT-B-16.tar", jit=False)
num_patches = int(config["image_res"] * config["image_res"] / (16 * 16))
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())
elif config["clip_name"] == "ViT-L-14":
clip_model, preprocess = clip.load(
os.path.join("model_zoo", "mplug", "ViT-L-14.tar"),
jit=False,
)
num_patches = int(config["image_res"] * config["image_res"] / (14 * 14))
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 1024).float())
pos_embed.weight = resize_pos_embed(
clip_model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0)
)
clip_model.visual.positional_embedding = pos_embed
return clip_model, preprocess
# def initialize_vit(VISUAL_CONFIG, model_type="ViT-B_32", pretrained_dir="data/ViT-B_32.npz", img_size=(384, 640),
# num_patches=240):
# from vit.models.modeling import VisionTransformer, CONFIGS
# config = CONFIGS[model_type]
# model = VisionTransformer(config, img_size=224, zero_head=True, num_classes=1)
# model.load_from(np.load(pretrained_dir))
# pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())
# pos_embed.weight = resize_pos_embed(model.transformer.embeddings.position_embeddings, pos_embed.unsqueeze(0))
# model.transformer.embeddings.position_embeddings = pos_embed
# if VISUAL_CONFIG.freeze_clip:
# for parameter in model.parameters():
# parameter.requires_grad = False
# return model
def initialize_optimizer(visual_model, lr, momentum, weight_decay):
optimizer = torch.optim.SGD(
visual_model.parameters(), lr, momentum=momentum, weight_decay=weight_decay
)
return optimizer
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.sgd_lr
for milestone in args.schedule.split(","):
lr *= 0.1 if epoch >= float(milestone) else 1.0
for param_group in optimizer.param_groups:
param_group["lr"] = lr
from torch.optim import Optimizer
class FusedOptimizer(Optimizer):
def __init__(self, optimizers):
self.optimizers = optimizers
param_groups = []
for optimizer in self.optimizers:
param_groups += optimizer.param_groups
# super(FusedOptimizer, self).__init__([], {})
self.param_groups = param_groups
def step(self):
for optimizer in self.optimizers:
optimizer.step()
| 3,991 | 35.290909 | 132 | py |
dolphin | dolphin-main/modules/mplug/models/modeling_mplug.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
import transformers
transformers.logging.set_verbosity_error()
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.config = config
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = clamp_inf(attention_scores)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = clamp_inf(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = clamp_inf(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class FusionLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.stride_layer = getattr(self.config, "stride_layer", 100)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.crossattention = BertAttention(config, is_cross_attention=True)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
layer_nums=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
if layer_nums == 0 or layer_nums % self.stride_layer != 0:
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
elif layer_nums != 0 and layer_nums % self.stride_layer == 0:
self_attention_outputs = self.attention(
torch.cat([encoder_hidden_states, hidden_states], 1),
torch.cat([encoder_attention_mask, attention_mask], 3),
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
#outputs = outputs + (present_key_value,)
outputs = outputs + (present_key_value[0], present_key_value[1])
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.has_cross_attention = getattr(self.config, "add_cross_attention", False)
if self.has_cross_attention:
self.crossattention = BertAttention(config, is_cross_attention=True)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if self.has_cross_attention:
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
if type(encoder_hidden_states) == list:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
encoder_attention_mask[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
else:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
#outputs = outputs + (present_key_value,)
outputs = outputs + (present_key_value[0], present_key_value[1])
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class FusionEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([FusionLayer(config,i) for i in range(config.num_hidden_layers)])
# self.layer = [FusionLayer(config,i) for i in range(config.num_hidden_layers)]
self.start_layer = max(0, config.num_hidden_layers-config.fusion_layers)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
self.stride_layer = getattr(self.config, "stride_layer", 100)
image_length = encoder_hidden_states.shape[1]
text_length = hidden_states.shape[1]
for i in range(self.start_layer, len(self.layer)):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, past_key_value, output_attentions))
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
i-self.start_layer,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
i-self.start_layer,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if hidden_states.shape[1] == (image_length+text_length):
encoder_hidden_states_new, hidden_states = torch.split(hidden_states, (image_length, text_length), 1)
encoder_hidden_states = encoder_hidden_states + encoder_hidden_states_new
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return [encoder_hidden_states, hidden_states]
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i in range(len(self.layer)):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, past_key_value, output_attentions))
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
#first_token_tensor = hidden_states[:, 0]
#return first_token_tensor
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
#tokenizer_class=_TOKENIZER_FOR_DOC,
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
device, is_decoder)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class FusionModel(BertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = FusionEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
#tokenizer_class=_TOKENIZER_FOR_DOC,
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
device, is_decoder)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_hidden_states, sequence_output = encoder_outputs
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return [encoder_hidden_states, sequence_output]
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BertPrefixModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
#@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=CausalLMOutputWithCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=True,
reduction='mean',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=1)*soft_labels,dim=-1)
loss_distill = loss_distill[labels!=-100].mean()
lm_loss = (1-alpha)*lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
class BertPrefixModelForGrounding(BertPrefixModel):
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=True,
reduction='mean',
soft_labels=None,
alpha=0,
return_logits=False,
prob_mask=None
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if prob_mask is not None:
prob_mask = prob_mask.view(1,1,prob_mask.shape[-1])
prediction_scores = prediction_scores + prob_mask
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction)
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=1)*soft_labels,dim=-1)
loss_distill = loss_distill[labels!=-100].mean()
lm_loss = (1-alpha)*lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=True,
reduction='mean',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction)
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=1)*soft_labels,dim=-1)
loss_distill = (loss_distill * (labels!=-100)).sum(1)
lm_loss = (1-alpha)*lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
"is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class FusionForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = FusionModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
#tokenizer_class=_TOKENIZER_FOR_DOC,
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_embeds=encoder_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder
)
sequence_output = outputs[1]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(prediction_scores, dim=1)*soft_labels,dim=-1)
loss_distill = loss_distill[labels!=-100].mean()
masked_lm_loss = (1-alpha)*masked_lm_loss + alpha*loss_distill
return masked_lm_loss
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
#tokenizer_class=_TOKENIZER_FOR_DOC,
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
#tokenizer_class=_TOKENIZER_FOR_DOC,
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
#tokenizer_class=_TOKENIZER_FOR_DOC,
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
#tokenizer_class=_TOKENIZER_FOR_DOC,
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def clamp_inf(tensor):
if tensor.dtype == torch.float16 and torch.isinf(tensor).any():
clamp_value = torch.finfo(tensor.dtype).max - 1000
tensor = torch.clamp(tensor, min=-clamp_value, max=clamp_value)
return tensor
| 112,740 | 43.526461 | 213 | py |
dolphin | dolphin-main/modules/mplug/models/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def available_models():
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False, download_root: str = None):
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location="cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
model = build_model(state_dict)#.to(device)
n_px = model.visual.input_resolution
transform = Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
if not jit:
if str(device) == "cpu":
model.float()
return model, transform
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, transform
def tokenize(texts: Union[str, List[str]], context_length: int = 77):
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 6,098 | 37.601266 | 142 | py |
dolphin | dolphin-main/modules/mplug/models/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
if self.training:
dropout = 0.1
else:
dropout = 0.0
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=dropout,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, skip_last_layer=False):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if not skip_last_layer:
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x)
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head, dropout=0.1)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor, text_mask=None):
if text_mask is None:
text_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=text_mask)[0]
def forward(self, x: torch.Tensor, text_mask = None):
x = x + self.attention(self.ln_1(x), text_mask = text_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor, text_mask=None, use_checkpoint=True):
for layer in self.resblocks:
if use_checkpoint:
x = torch.utils.checkpoint.checkpoint(layer, x , text_mask)
else:
x = layer(x, text_mask = text_mask)
return x
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.heads = heads
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, skip_last_layer=False, text_embedding=None, text_mask=None, use_checkpoint=True):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)[:x.size(1),:]
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, use_checkpoint=use_checkpoint)
x = x.permute(1, 0, 2) # LND -> NLD
if skip_last_layer:
x = self.ln_post(x)
# x = x @ self.proj
else:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict or "module.bert.encoder.visual_model.visual.proj" in state_dict
# if args.use_clip_visual:
if "module.bert.encoder.visual_model.visual.proj" in state_dict:
new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module.bert.encoder.visual_model."):
new_state_dict[key.replace('module.bert.encoder.visual_model.', '')] = value
state_dict = new_state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
#convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 18,310 | 39.511062 | 178 | py |
jericho | jericho-master/jericho/game_info.py | # Copyright (C) 2018 Microsoft Corporation
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
nine05 = {
"name": "905",
"rom": "905.z5",
"seed": 0,
"walkthrough": 'answer phone/stand up/s/remove watch/remove clothes/drop all/enter shower/take watch/wear watch/n/get all from table/open dresser/get clothes/wear clothes/e/open front door/s/unlock car/enter car/no/no/yes',
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bathe/shower/wash;carry/hold/take inventory;die/q/quit;disrobe/undress;get out/off/up;hear/listen;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/enter;l/look;leave/go/run/walk;nap/sleep;noscript/unscript;recording;recording off;recording on;replay;restart;restore;save;script/transcrip;script/transcrip off;script/transcrip on;smell/sniff;stand up;stand/exit/out/outside;wait/z;yes;no;answer OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;carry/hold/take off OBJ;carry/hold/take/get OBJ;close/shut OBJ;don/wear OBJ;drive OBJ;drop OBJ;eat OBJ;get in/into/on/onto OBJ;get off OBJ;get out of OBJ;go/run/walk to/into/in/inside/through OBJ;go/run/walk/enter OBJ;hear/listen OBJ;hear/listen to OBJ;knock on OBJ;l/look at OBJ;l/look inside/in/into/through/on OBJ;l/look under OBJ;leave into/in/inside/through OBJ;leave/exit/out/outside OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;lock OBJ;open OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/examine/x OBJ;remove OBJ;search OBJ;sign OBJ;smell/sniff OBJ;stand on OBJ;taste OBJ;turn OBJ off;turn OBJ on;turn off OBJ;turn on OBJ;unlock OBJ;use OBJ;carry/hold/take/get OBJ from OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;open OBJ with OBJ;put OBJ in/inside/into OBJ;put OBJ on/onto OBJ;sign OBJ with OBJ;",
"max_word_length" : 9
}
acorncourt = {
"name": "acorncourt",
"rom": "acorncourt.z5",
"seed": 4,
"walkthrough": 'point machine at tree/get ball/put ball in machine/turn on machine/turn off machine/fix bucket with acorn/fix bucket with acorn/fix bucket with acorn/fix bucket with acorn/turn crank/tie bucket to rope/turn crank/turn crank/look in bucket/get key/unlock gate/open gate',
"minimal_actions": 'take key/look in bucket/take pail/unlock gate with key/turn crank/aim machine at tree/turn off machine/take balls from pail/take acorns/put balls in machine/turn on machine/open gate/fix bucket with acorn/tie bucket to rope',
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand up;think;verify;version;wait/z;wave;y/yes;adjust/set OBJ;aim/direct/point OBJ;aim/direct/point OBJ east;aim/direct/point OBJ west;attach/fasten/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull/lift/raise OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;get/peel OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;aim/direct/point OBJ at OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;fix/plug/repair OBJ with OBJ;get OBJ from OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
adventureland = {
"name": "adventureland",
"rom": "adventureland.z5",
"seed": 4,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": 'E/N/GET AXE/D/GET OX/BUNYON/SWIM/S/W/W/GET OX/GET AXE/GET FRUIT/E/CLIMB TREE/GET KEYS/D/CHOP TREE/DROP AXE/GO STUMP/DROP FRUIT/DROP OX/GET LAMP/GET BOTTLE/D/GET RUBIES/U/DROP RUBIES/D/D/UNLOCK DOOR with keys/DROP KEYS/open door/D/LIGHT LAMP/D/S/GET BLADDER/N/D/D/GET FLINT/W/N/GET RUG/D/GET NET/U/U/AWAY/turn off LAMP/S/DROP FLINT/DROP BLADDER/take mud/N/E/GET FISH/S/W/GO STUMP/DROP FISH/DROP NET/U/GET FLINT/GET BLADDER/GET GAS/GO STUMP/DROP MUD/D/D/D/LIGHT LAMP/D/S/U/DROP BLADDER/LIGHT GAS/GO LEDGE/JUMP/say boo/GET MIRROR/E/GET CROWN/W/JUMP/W/D/N/U/U/U/U/DROP CROWN/D/D/D/D/S/U/GET BRICKS/D/N/D/D/W/N/D/throw bricks/GET FIRESTONE/U/U/AWAY/turn off lamp/E/W/S/GO STUMP/DROP FIRESTONE/DROP RUG/DROP MIRROR/GET MUD/D/D/D/LIGHT LAMP/D/N/GET HONEY/DRINK WATER/GET BEES/S/U/U/U/U/U/N/e/take water/w/take mud/go stump/d/d/d/d/n/drink water/get bees/s/u/u/u/u/drop bottle/take bottle/n/take mud/n/DROP BEES/GET EGGS/S/get oil/GO STUMP/DROP EGGS/DROP HONEY/turn off lamp/rub lamp/rub lamp',
"minimal_actions": 'Light gas/Swim/Get bees/Drop firestone/Get crown/Get net/Drop fruit/Get mirror/Get lamp/Lamp off/Drop bladder/Get bottle/Get ox/Open door/Go west/Go north/Get rubies/Rub lamp/Drop keys/Go south/Drop net/Drop mirror/Go down/Drop rug/Drop bricks/Get eggs/Get flint/go down/Get bladder/Go hole/Cut tree/Get gas/Go hallway/Yell/Jump/Climb tree/Drop mud/Drink water/Catch fish/Get bricks/Get axe/Drop bees/Drop ox/Get mud/Drop honey/Drop fish/Get firestone/Drop bottle/Go up/Bunyon/get bottle/Light lamp/Get fruit/Get honey/Drop axe/Drop eggs/Drop flint/Get keys/Go east/Go throne/Away/Drop crown/Get rug/Drop rubies',
"grammar" : "awake/awaken/wake;awake/awaken/wake up;away;bother/curses/darn/drat;brief/normal;bunyon;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;close/cover/shut up;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;help;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand/carry/get/hold/take up;think;tsurris;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;chop/cut/prune/slice OBJ;chop/cut/prune/slice down OBJ;chop/cut/prune/slice up OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;close/cover/shut OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;find OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/hold/take into OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look through OBJ;l/look under OBJ;lie/sit/go/leave/run/walk inside OBJ;lie/sit/go/leave/run/walk/carry/get/hold/take in OBJ;lie/sit/stand/carry/get/hold/take on OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;dig OBJ with OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;lock OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
advent = {
"name": "advent",
"rom": "advent.z5",
"seed": 5,
"walkthrough": 'E/GET ALL/W/S/S/S/UNLOCK GRATE WITH KEYS/OPEN GRATE/D/W/GET CAGE/W/LIGHT LAMP/W/W/GET BIRD/W/D/S/GET NUGGET/N/N/DROP BIRD/GET BIRD/N/DROP KEYS/GET SILVER/N/PLUGH/DROP SILVER/DROP NUGGET/DROP CAGE/DROP FOOD/PLUGH/S/S/SW/W/KILL DRAGON/YES/GET RUG/E/D/N/N/W/W/D/POUR WATER ON PLANT/U/E/E/NW/NW/N/W/DROP ALL/E/GET EMERALD/W/GET ALL/NW/S/GET VASE/SE/E/GET PILLOW/W/NE/E/U/E/U/N/PLUGH/DROP PILLOW/DROP RUG/DROP VASE/DROP EMERALD/FILL BOTTLE/GET FOOD/PLUGH/PLOVER/NE/GET PYRAMID/S/PLOVER/PLUGH/DROP PYRAMID/PLUGH/S/GET KEYS/D/W/D/W/W/W/W/D/POUR WATER ON BEANSTALK/U/E/D/FILL BOTTLE/U/W/D/CLIMB BEANSTALK/W/GET EGGS/WAIT/GET AXE/N/OIL DOOR/N/GET TRIDENT/W/D/DROP TRIDENT/SW/U/NE/GIVE EGGS/NE/NE/E/D/S/E/GIVE FOOD TO BEAR/UNLOCK CHAIN WITH KEYS/GET BEAR/GET CHAIN/W/W/N/NE/E/GET SPICES/W/S/W/W/SW/DROP BEAR/SW/SW/D/GET TRIDENT/SE/SE/NE/E/DROP KEYS/E/GET MAGAZINE/E/DROP MAGAZINE/W/U/N/OPEN CLAM WITH TRIDENT/D/D/GET PEARL/U/U/S/U/E/U/N/PLUGH/DROP TRIDENT/DROP CHAIN/DROP SPICES/DROP PEARL/PLUGH/S/D/W/D/W/W/W/W/D/CLIMB BEANSTALK/W/FEE/FIE/FOE/FOO/GET EGGS/S/E/U/E/E/NE/E/U/E/THROW AXE AT DWARF/U/N/PLUGH/DROP EGGS/PLUGH/S/S/S/GET JEWELRY/N/W/GET COINS/W/W/E/E/GET DIAMONDS/N/W/N/E/E/N/N/PLUGH/DROP DIAMONDS/DROP COINS/DROP JEWELRY/PLUGH/S/S/W/W/W/E/S/E/S/N/GET ALL/SE/N/D/E/E/GET ROD/XYZZY/DROP ALL/GET LAMP/PLUGH/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/SW/GET ALL/NE/DROP ALL/SW/BLAST',
"minimal_actions": 'EXAMINE LAMP/TAKE VASE/NW/DROP PILLOW/WATER PLANT/N/TAKE BARS/NE/TAKE DIAMONDS/TAKE CAGE/OIL GATE/DROP CAGE/SAY PLOVER/THROW PENTACLE/DROP ELIXIR/TAKE PENTACLE/THROW BEAR/DOWN/SW/LOCK DOOR/S/OPEN CLAM/OPEN GRATE/DROP SPICES/EXAMINE GRATE/TAKE PILLOW/KILL DRAGON/DROP RUG/ON LAMP/TAKE BOTTLE/TAKE KEYS/TAKE ORB/DROP BARS/IN/TAKE RUG/CROSS BRIDGE/WAVE ROD/FEE/TAKE PEARL/OPEN CHAIN/FREE BIRD/SE/TAKE SCEPTRE/DROP COINS/DROP GAZETTE/FOO/FIE/DROP KEYS/TAKE BIRD/TAKE LAMP/TAKE ROD/DROP PYRAMID/TAKE COINS/DROP EGGS/TAKE DYNAMITE/DROP LAMP/FEED BEAR/SAY PLUGH/EXAMINE SANDWICHES/FILL BOTTLE/DROP CHAIN/YES/FOE/UP/DROP DYNAMITE/TAKE TRIDENT/DROP BOTTLE/SAY XYZZY/TAKE ELIXIR/TAKE SPICES/DROP TRIDENT/TAKE CHAIN/TAKE CROWN/DROP ROD/TAKE NUGGET/E/CLIMB PLANT/TAKE SANDWICHES/TAKE EMERALD/BLAST/DROP EMERALD/DROP NUGGET/TAKE BEAR/OFF LAMP/W/DROP VASE/TAKE PYRAMID/DROP DIAMONDS/TAKE GAZETTE/OUT/TAKE EGGS/DROP JEWELLERY/GIVE EGGS/TAKE JEWELLERY',
"grammar" : "abracadab/foobar/frotz/hocus/open-ses/sesame/shazam;answer/say/shout/speak blast;answer/say/shout/speak fee;answer/say/shout/speak fie;answer/say/shout/speak foe;answer/say/shout/speak foo;answer/say/shout/speak plover;answer/say/shout/speak plugh;answer/say/shout/speak sesame/shazam/hocus/abracadab/foobar/open-ses/frotz;answer/say/shout/speak xyzzy;awake/awaken/wake;awake/awaken/wake up;blast;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;fee;fie;foe;foo;full/fullscore;full/fullscore score;get out/off/up;hear/listen;help;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;nap/sleep;no;noscript/unscript;notify;notify off;notify on;nouns/pronouns;objects;off;on;places;plover;plugh;pray;recording;recording off;recording on;replay;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;sorry;stand up;stand/exit/out/outside;think;use;verify;version;wait/z;wave;xyzzy;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;capture/catch OBJ;carry/hold/take off OBJ;carry/hold/take/get OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;count OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;douse/pour oil on OBJ;douse/pour water on OBJ;douse/pour/empty OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;free/release OBJ;get in/into/on/onto OBJ;get off OBJ;go/run/walk/cross/enter OBJ;grease/lubricate/oil OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;kick OBJ;l/look at OBJ;l/look inside/in/into/through/on OBJ;l/look under OBJ;leave/exit/out/outside OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;rotate/screw/turn/twist/unscrew/switch OBJ off;rotate/screw/turn/twist/unscrew/switch OBJ on;rotate/screw/turn/twist/unscrew/switch on OBJ;rotate/screw/turn/twist/unscrew/switch/close/cover/shut off OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;taste OBJ;water OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;ask that OBJ OBJ;attach/fasten/fix/tie OBJ to OBJ;blast OBJ with OBJ;burn/light OBJ with OBJ;capture/catch OBJ with OBJ;carry/hold/take OBJ off OBJ;carry/hold/take/remove/get OBJ from OBJ;clear/move/press/push/shift OBJ OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;force/jemmy/lever/prise/prize/pry OBJ apart/open with OBJ;force/jemmy/lever/prise/prize/pry apart/open OBJ with OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;put/discard/drop/throw OBJ on/onto OBJ;read OBJ in OBJ;read about OBJ in OBJ;tell OBJ about OBJ;tell/ask OBJ to OBJ;transfer/clear/move/press/push/shift OBJ to OBJ;unlock/force/jemmy/lever/prise/prize/pry/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
afflicted = {
"name": "afflicted",
"rom": "afflicted.z8",
"seed": 0,
"walkthrough": 'read paper/e/search grate/search window/knock on window/w/n/x tables/note tables/ask angela about affliction/e/search ice/take hand/n/note vomit/note sink/note dryer/s/note counter/note stools/w/n/n/w/open cauldron/take foot/e/n/open dish washer/take right foot/s/s/s/take lamp/turn on lamp/n/n/n/d/open crate/u/s/s/x prep table/x shelf/take tongs/s/s/e/take eyes with tongs/w/n/n/n/x chopper/note it/x blade/open cover/turn screw/reach in chopper/open kit/wear tourniquet/take all/s/s/u/take key/read book/read killing/read culture/d/n/n/n/d/put eyes in body/put entrails in body/u/n/open dumpster/take all/s/d/put all in crate/no/u/s/s/s/s/w/search window/break window/x opener/take opener/e/n/kill sofia with opener',
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;carry/hold/take inventory;curse/damn/fuck/shit;disco/dance;exit/out/stand;get out/off/up;go/leave/run/walk;go/leave/run/walk through;go/leave/run/walk to bathroom;hints/hint;hints/hint off;hop/jump/skip;i/inv/inventory;info/about/help;l/look;listen;long/verbose;nap/sleep;no;normal/brief;notify;notify off;notify on;plugh/xyzzy;pronouns/nouns;q/quit;restart;restore;save;score;short/superbrie;sing;smell/sniff;solution/walkthru/walkthrou;sorry;stand up;think;transcrip/script;transcrip/script off;transcrip/script on;urinate/pee;use toilet;verify;version;wait/z;wave;y/yes;accuse OBJ;apply OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;document/note OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;flush OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;go/leave/run/walk into/in/inside/through OBJ;hear OBJ;inspect/check/describe/examine/watch/x OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;lick/taste OBJ;listen to OBJ;lock OBJ;pick OBJ up;pick up OBJ;play OBJ;put OBJ down;put OBJ on;put down OBJ;put on OBJ;put on OBJ;reach in OBJ;reach inside OBJ;reach into OBJ;read OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;sit on top of OBJ;sit on/in/inside OBJ;slap/kick/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;stroke/feel/touch OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;talk to OBJ;tap/knock OBJ;tap/knock on OBJ;uncover/open/unwrap OBJ;unlock OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach OBJ to OBJ;carry/hold/take OBJ off OBJ;consult OBJ on/about OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;fasten/fix/tie OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;get/carry/hold/take OBJ with OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;open/unlock OBJ with OBJ;put OBJ in/inside/into OBJ;remove/get/carry/hold/take OBJ from OBJ;stab/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ with OBJ;tell OBJ about OBJ;",
"max_word_length" : 9
}
anchorhead = {
"name": "anchor",
"rom": "anchor.z8",
"seed": 0,
"walkthrough": 'Se/Push can against wall/Climb can/Up/West/Push play/East/Look up Verlac/West/Unlock Door/West/West/West/NW/West/Read book/Show keys to Michael/East/SE/East/South/South/East/South/SW/NW/Unlock door/North/Close door/Lock door/Up/North/Undress/Drop all/West/Bathe/East/Lie in bed/Sleep/Look/wait/wait/Get wallet/Open wallet/Get card/Leave bed/Dress/Get all/Wear coat/South/Down/West/Open cupboard/Pull lining/Read journal/North/Get flashlight/Open Cabinet/Get matches/Turn on flashlight/NW/Get broom/Unlock door/Down/South/Search crates/Get box/Drop cardboardbox/Wipe web/Drop broom/Get key/Put key on keyring/North/Up/SE/East/East/Look at paintings/Look at scene/South/Get album/Look up Wilhelm in album/Look up Eustacia in album/Look up Croseus in album/West/Up/East/North/Get volume/Look at bookshelf/Get Poe/Examine safe/turn dial to 47/turn dial to 11/ turn dial to 3/Look in safe/Get puzzle box and flute/South/South/Open jewelry box/Get silver locket/Push bed/Look in hole/Get pages/Read pages/North/West/Down/North/West/Unlock door/North/NW/Unlock crypt/Down/Examine coffin/Look up William on nameplate/Open William coffin/Get skull/Up/SE/South/Close door/Lock door/East/South/Unlock door/South/SE/NE/North/West/Get newspaper/Read newspaper/South/South/SE/Look up Edward in record/Look up Mordecai in record/Look up Elijah inrecord/Look up Heinrich in record/Look up Wilhelm in record/NW/SW/Look up Edward in record/Look up Mordecai in Record/Look up Elijah inrecord/Look up Heinrich in record/ Look up Wilhelm in record/NE/North/North/North/North/West/NW/West/North/Ring bell/Show card to librarian/Ask librarian for book/Open Historical/Get slip of paper/Read slip of paper/Drop historical/South/East/SE/East/North/Get Lantern/South/Look under table/Get flask/South/NW/Read wall/West/South/Look in displaycase/Ask proprietor about amulet/Give puzzle box to proprietor/Get puzzle box/North/East/SW/South/South/East/East/Give flask to bum/Ask bum about himself/Ask bum about brother/Ask bum about Anna/Ask bum about crypt/Tell bum about skull/Show skull to bum/Give amulet to bum/Put copper key on keyring/SE/Get tin/NW/West/South/SW/NW/North/ Close door/Up/East/North/East/Get letter opener/Look at screen/Remove ring/Look at ring/Wear ring/Type 0628 on laptop/Look at screen/West/South/Pull cord/Up/Push newspaper under door/Put letter opener in keyhole/Get newspaper/Get brass key/Put brass key on keyring/Unlock door/North/Search straw/Get gold locket/South/Down/West/North/West/Get towel/East/Undress/Drop all/Lie in bed/Sleep/sleep/sleep/sleep/Leave bed/Dress/Wear coat/Get all/South/East/North/East/Look at fireplace/Turn sphere/SW/West/Look in hole/East/Look in hole/NW/Look in hole/SW/SE/Put lens in telescope/Look in telescope/SE/East/Down/West/Down/North/West/NW/Down/East/Search wine rack/Turn c/Turn w/Turn h/Turn e/Turn m/North/North/North/NE/Down/Say ialdabaoloth/North/South/Up/SW/NW/East/Down/West/Down/South/SE/NE/South/South/Look at woods/West/SW/West/Get drawing paper/Get Hook/South/Lift plywood cover/Down/Search bones/Get Teddy/Up/North/East/South/Down/Hide under bones/Wait/Up/Up/North/East/NE/East/North/East/SE/Break padlock with hook/Down/Look at shape/Search shape/Put steel key on keyring/Look at furnace/Open hatch/West/ Put all in pocket/Jump onto riser/North/Get cloth/East/Read huge tome/East/Up/Up/Get rope/Down/West/West/South/Drop robe in shaft/Tie rope to railing/Down/Drop rope/Light flashlight/Get robe/get lantern/NW/North/North/Open tin/Put oil on hatch/Open hatch/Up/East/West/NW/East/Unlock drawer/Open drawer/Get all from drawer/Read letter/Put bronze key on keyring/West/West/West/North/West/North/Knock on door 11/Give teddy to woman/Look in overalls/Get long steel key/Put long steel key on keyring/West/South/South/Search thicket/Unlock hatch/get lantern/North/Light match/Light lantern/Put towel on valve/Turn wheel/North/North/Open hatch/Wait/Wait/North/Tie chain to me/Look at controls/Pull lever/Untie chain/North/Read notes/Get caliper/Get memo/Get Blueprint/Read blueprint/Get mirror 1/Put mirror 1 in caliper/Open tin/Rub oil on mirror 1/South/Down/Jump off equipment/South/South/South/South/NE/East/East/NE/East/Up/Down/wait/wait/Break door/Break door/Get glass/Look at window pane/Put glass in crack/Cut jacket with glass/Open closet/Wear coat/Get all from closet/South/South/Unlock west door/Open west door/West/Look in tear/Get torn square/Read torn square/Get needle/East/South/South/Down/Get lantern/Light match/Light lantern/North/North/Get magazine/Give magazine to madman/Get large key/Put large key on keyring/Unlock gate/North/North/North/Remove coat/Wear robe/East/Wait/wait/wait/wait/wait/Look/Put all in pocket/Get amulet/North/Give gold locket to creature/Hit creature with hook/North/West/North/East/NE/Unlock door/Open door/East/Up/Look at device/Get real mirror/put all in pocket/Wait/Give mirror 1 to Michael/Wait/Wait/Wait/Wait/Wait/Wait/Wait/Wait/Wait/Pick cuffs with needle/Free boy/West/SW/SW/West/West/North/Wait/South/East/South/East/South/South/Touch obelisk/Show ring to Michael/Show amulet to Michael/cover hole 3/cover hole 6/Play flute/wait/wait/Go north/Get test/Look at little window',
"grammar" : "about/author/credit/credits/help/hint/hints/menu;awake/awaken/wake;awake/awaken/wake up;bathe/shower;bother/curses/darn/drat;brief/normal;chant/sing;crawl/go/run/walk to bed;crawl/go/run/walk to sleep;cry/scream/shout/shriek/yell;damn/fuck/shit/sod;describe off;describe on;die/q/quit;dive/swim;doze/nap/sleep;dress;exit/out/outside;fall asleep;fool/mess/play around;full/fullscore;full/fullscore score;get dressed;get out/off/down;get out/off/up;get undressed;hear/listen;hello/hi;hide;hop/jump/leap/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;ialdabaol;in/inside/cross/enter;l/look;l/look around;leave/crawl/go/run/walk;lie/sit;lie/sit down;long/verbose;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;play;pray;remember/think;restart;restore/resume;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;smell/sniff;sorry;speak;speak/answer/say ialdabaol;stand;stand/get up;swing;take inventory/inv/i;undress;verify;version;wait/z;wave;y/yes;adjust/set OBJ;ask for OBJ;attack/break/crack/destroy/fight/hit/kick/kill/murder/punch/scratch/smash/strike/swat/thump/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;bend/straighte OBJ;bite/chew/devour/eat OBJ;blow/breathe OBJ;blow/breathe OBJ up;blow/breathe in/into OBJ;blow/breathe out/on OBJ;blow/breathe up OBJ;blow/breathe/lay/place/put/stick OBJ out;bother/curses/darn/drat OBJ;burn/ignite/light OBJ;bury OBJ;buy/purchase OBJ;call/dial/phone/telephone OBJ;check/examine/inspect/watch/x OBJ;chop/cut/prune/rip/slice/tear OBJ;chuck/discard/drop/throw/toss OBJ;clean/dust/polish/rub/scrub/shine/smear/sweep/wipe OBJ;clear/move/press/push/rock/roll/shift/shove/slide OBJ;climb/scale OBJ;climb/scale down/up/over/on/onto OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;crawl/go/run/walk around/to/by/toward/towards/near/behind OBJ;crawl/go/run/walk close/closer to OBJ;crawl/go/run/walk down/up OBJ;crawl/go/run/walk into/in/inside/through/under OBJ;crease/fold/unfold OBJ;cross/enter/crawl/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dangle/hang from/off/on OBJ;dangle/hang on to OBJ;dig OBJ;dig in OBJ;disrobe/doff/shed/remove OBJ;dive/swim/climb/scale in/into OBJ;don/wear OBJ;drag/haul/pull/yank OBJ;drag/haul/pull/yank on OBJ;draw/have OBJ;drink/gulp/imbibe/sip/swallow OBJ;dump/pour/spill OBJ;dump/pour/spill OBJ out;dump/pour/spill out OBJ;embrace/hug/kiss/smooch OBJ;empty OBJ out;empty out OBJ;exit/out/outside/leave OBJ;extinguis/snuff OBJ;feel/fondle/grope/touch/taste OBJ;find OBJ;flip/switch OBJ;flip/switch/rotate/screw/spin/turn/twist/unscrew OBJ off;flip/switch/rotate/screw/spin/turn/twist/unscrew OBJ on;flip/switch/rotate/screw/spin/turn/twist/unscrew on OBJ;flip/switch/rotate/screw/spin/turn/twist/unscrew/close/cover/shut off OBJ;fool/mess OBJ;fool/mess/play around with OBJ;fool/mess/play with OBJ;free/loosen/untie OBJ;get down from OBJ;get in/into/inside OBJ;get off OBJ;get off of OBJ;get on/onto OBJ;get under OBJ;get up from OBJ;get/carry/catch/grab/hold/hook/snag/take OBJ;grip/squash/squeeze OBJ;grip/squash/squeeze/leave through OBJ;hear/listen OBJ;hear/listen to OBJ;hello/hi OBJ;hide in/among/under/behind/inside OBJ;hop/jump/leap/skip in/on/onto/to/into OBJ;hop/jump/leap/skip off OBJ;hop/jump/leap/skip over OBJ;hop/jump/leap/skip/get out of OBJ;jiggle/rattle/shake OBJ;jimmy/pick lock with OBJ;knock/rap on/at OBJ;l/look at OBJ;l/look behind OBJ;l/look inside/in/into/through/on/out OBJ;l/look under OBJ;l/look up/for OBJ;latch/lock OBJ;lay/place/put/stick OBJ down;lay/place/put/stick OBJ on;lay/place/put/stick down OBJ;lay/place/put/stick finger/fingers on/over/in OBJ;lay/place/put/stick on OBJ;lay/place/put/stick out OBJ;lean on/against OBJ;let go of OBJ;lie/sit down in/on OBJ;lie/sit down on/at OBJ;lie/sit in/inside OBJ;lie/sit on/at OBJ;lift/raise OBJ;lift/raise/take finger/fingers from/off OBJ;lift/raise/take finger/fingers off of OBJ;lower OBJ;measure OBJ;open/uncover/unwrap OBJ;pack OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;play OBJ;play on OBJ;pray to OBJ;read OBJ;read about OBJ;remember/think OBJ;remember/think about OBJ;remove finger/fingers from OBJ;ring OBJ;rotate/screw/spin/turn/twist/unscrew OBJ;search OBJ;search for OBJ;set OBJ down;set down OBJ;set fire to OBJ;smell/sniff OBJ;stand on OBJ;swing OBJ;swing on OBJ;take OBJ off;take off OBJ;trip OBJ;type OBJ;unlock OBJ;unpack/empty OBJ;wave OBJ;wave at/to OBJ;affix/attach/fasten/fix/tie OBJ to/on/onto OBJ;answer/say OBJ to OBJ;ask OBJ for OBJ;attack/break/crack/destroy/fight/hit/kick/kill/murder/punch/scratch/smash/strike/swat/thump/wreck OBJ with OBJ;burn/ignite/light OBJ with OBJ;bury OBJ in/under OBJ;chop/cut/prune/rip/slice/tear OBJ with OBJ;chuck/discard/drop/throw/toss OBJ at/against/on/onto OBJ;chuck/discard/drop/throw/toss OBJ in/into/down OBJ;chuck/discard/drop/throw/toss/lay/place/put/stick OBJ on/onto OBJ;clean/dust/polish/rub/scrub/shine/smear/sweep/wipe OBJ on/onto OBJ;clean/dust/polish/rub/scrub/shine/smear/sweep/wipe OBJ with OBJ;clear/move/press/push/rock/roll/shift/shove/slide OBJ to/under/underneat/beneath/against/by/near OBJ;clear/move/press/push/rock/roll/shift/shove/slide OBJ with OBJ;clear/move/press/push/rock/roll/shift/shove/slide/drag/haul/pull/yank/lay/place/put/stick OBJ close/closer to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;drag/haul/pull/yank/lay/place/put/stick OBJ under/underneat/beneath OBJ;dump/pour/spill OBJ into/in/onto/on/over OBJ;feed/give/hand/offer/pay OBJ OBJ;feed/give/hand/offer/pay OBJ to OBJ;feed/give/hand/offer/pay over OBJ to OBJ;fill OBJ with OBJ;get/carry/catch/grab/hold/hook/snag/take OBJ with OBJ;hide OBJ in/under OBJ;insert OBJ in/into OBJ;jimmy/pick OBJ with OBJ;latch/lock OBJ with OBJ;lay/place/put/stick OBJ in/inside/into OBJ;lay/place/put/stick OBJ in/into/inside/between OBJ;lay/place/put/stick OBJ on/onto/across/over OBJ;measure OBJ with OBJ;rotate/screw/spin/turn/twist/unscrew/adjust/set OBJ to OBJ;set OBJ on OBJ;set OBJ on/across/over OBJ;set fire to OBJ with OBJ;speak OBJ to OBJ;unlock/open/uncover/unwrap OBJ with OBJ;unpack/remove/get/take OBJ from/off OBJ;wave OBJ at OBJ;",
"max_word_length" : 9
}
awaken = {
"name": "awaken",
"rom": "awaken.z5",
"seed": 0,
"walkthrough": 'u/take limb/e/se/n/hit dog/nw/n/e/s/n/push railing/take section/n/n/n/read book/take book/n/search desk/take stopper/take ash/put ash on journal/read journal/move bookshelf/take old book/read old book/take robe/s/s/s/put ladder under trap/climb ladder/climb rope/tie rope to branch/d/tie rope to handle/d/take ladder/s/s/put ladder under tree/climb ladder/n/push bell/s/d/get ladder/n/n/put ladder under trap/climb ladder/n/take lantern/n/take green bottle/open green bottle',
"grammar" : "about/help;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;clue/clues/hint/hints;clue/clues/hint/hints off;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;get out/off/up;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;sorry;stand up;think;verify;version;wait/z;wave;xyzzy;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kick/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;check/describe/examine/watch/x OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;detach/untie OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/pet/touch OBJ;fill OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;ring OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ on OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ with OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;detach/untie OBJ from OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;put OBJ under/below/beneath/against OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
balances = {
"name": "balances",
"rom": "balances.z5",
"seed": 0,
"walkthrough": 'search furniture/learn rezrov/rezrov box/get grimoire from box/examine grimoire/out/north/search oats/examine shiny scroll/gnusto shiny scroll/learn bozbar/bozbar horse/mount horse/north/learn yomin/yomin tortoise/learn bozbar/bozbar tortoise/get chewed scroll/northwest/take sapphire/examine sapphire/examine book/caskly chewed scroll/gnusto scroll/n/take carpet/s/learn rezrov/rezrov door/w/learn frotz/frotz coin/take cube/write cave on cube/take crumpled/gnusto crumpled/take gold coin from right pan/take bronze coin from left pan/put gold coin on right/put silver coin on left/e/se/s/learn urbzig/urbzig snake/take cube/write chasm on featureless cube/drop carpet/sit on carpet/stand/sit on carpet/stand/take carpet/e/listen/learn lobal/lobal me/listen/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/take feather/w/nw/w/take silver/put bronze on left/put feather on left/e/se/drop carpet/sit on carpet/stand/take carpet/learn yomin/drop carpet/sit on carpet/learn yomin/learn yomin/learn yomin/stand/sit on carpet/stand/give silver to barker/yomin barker/take ticket 2306/yomin barker/take ticket 5802/give ticket 2306 to barker/give ticket 5802 to barker/write prize on featureless/sit on carpet/stand/take carpet/learn urbzig/learn lleps/lleps urbzig/drop toy/urbzig toy/learn urbzig/urbzig mace/take cube/write mace on featureless/e/learn frotz/frotz temple/clean podium/put chasm cube in bottom left socket/put cave cube in bottom right socket/put prize cube in top left socket/put mace cube in top right socket',
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;c,cast;carry/get/hold/take inventory;carry/get/hold/take out/off/up;close/cover/shut up;damn/fuck/shit/sod;diagnose/health;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;memory/spells;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand up;think;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;c,cast OBJ;carry/get/hold/take OBJ;carry/get/hold/take in/into/on/onto OBJ;carry/get/hold/take off OBJ;cast OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;flip/toss OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;mount/ride/straddle OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ from/off OBJ;cast OBJ at OBJ;cast OBJ on OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
ballyhoo = {
"name": "ballyhoo",
"rom": "ballyhoo.z3",
"seed": 0,
# Walkthrough adapted from http://www.the-spoiler.com/ADVENTURE/Infocom/ballyhoo.1.html
"walkthrough": 's/help midget/w/take mask/s/w/hide behind president/listen to conversation/again/e/e/take pole/n/n/n/drop mask/u/e/e/e/e/e/e/take balloon/w/w/w/w/w/w/d/d/take all/s/s/w/s/untie balloon/inhale helium/guard, hello/s/w/wear clown mask/knock on door/s/take ash tray/close door/search ash/take scrap/search tray/go under wall/e/n/e/n/ne/search garbage/take ticket/punch out blue dot/sw/s/put ticket in slot/e/ask pitchman about dr nostrum/s/se/look into cage/take keys with pole/unlock door/open door/n/take bucket/s/nw/n/w/w/s/w/s/e/unlock compartment/open compartment/take whip/n/e/n/put ticket in slot/e/e/e/n/ne/take stool/nw/s/w/w/w/n/n/unlock cage/open cage/w/whip smooth lion/again/again/lift grate/throw meat in passage/e/w/lower grate/search stand/e/s/s/s/w/give case to harry/n/e/put ticket in slot/e/e/s/show case to jenny/give case to jenny/n/n/give ticket to rimshaw/rimshaw, hypnotise me/wait/wait/wait/wait/buy candy from hawker/give money to hawker/stand/e/u/e/d/e/u/e/d/s/stand in line/wait/wait/get out of long line/stand in short line/wait/wait/wait/get out of long line/yes/stand in long line/bite banana/drop banana/n/ask hawker about candy/u/w/d/w/u/w/d/w/stand/s/w/go under wall/search garbage/take bar/s/e/e/n/ne/show bar to tina/tina, hello/shake hands/nw/take radio/s/w/w/s/se/drop all except radio/n/take headphones/s/u/u/set radio to 1170/turn radio off/rewind tape/wait/play tape/wait/wait/rewind tape/turn radio on/record/wait/wait/wait/wait/d/d/rewind tape/turn radio off/take all/drop stool and bucket/nw/unlock cage/open cage/w/play tape/search straw/open trap door/take ribbon/e/close cage/se/take bucket/nw/n/w/w/s/w/touch wood with pole/take mousetrap/drop trap/e/wait/w/e/w/catch mouse with bucket/take mouse/e/n/e/put ticket in slot/e/s/show mouse to elephant/again/wait/sw/drop all/u/turn crank/look into wagon/knock on door/in/lock door/search desk/take spreadsheet/move desk/u/read spreadsheet/d/take all/w/ask harry about eddie smaldone/e/ne/se/slide ticket under front/e/take ticket/open secret panel/y/n/open secret panel/w/nw/n/w/s/ne/se/slide ticket under front/e/look under table/take suitcase/wait/w/drop all/u/u/wait/e/wait/take shaft/pull shaft/d/d/take all except key, whip, stool/nw/n/w/fill bucket with water/s/ne/n/pour water on detective/ask detective about chelsea/drop bucket/take note and card/read note/e/s/u/take all/u/n/w/w/w/s/w/s/e/show ribbon to chuckles/show scrap to chuckles/show note to chuckles/show spreadsheet/eddie,hello/show card to chuckles/search pocket/take veil/wear veil/wear dress/wear jacket/knock on door/e/close door/take crowbar/move moose/open door/w/w/open door with crowbar/s/take thumb/n/e/e/put thumb in hole/wait/take chelsea/w/n/e/ne/n/w/n/n/clap/roustabout, get net/take off veil/take off jacket/take off dress/drop all/w/take stand/e/drop stand/take radio/climb stand/u/turn radio on/drop radio/d/take pole/climb stand/u/u/take radio/e/e/e/w/w/w/w/drop radio and pole/d/s/s/s/e/call WPDL/w/n/n/n/climb stand/u/take all/e/e/e/e/e/wait/wait/wait',
"grammar" : "answer/reply;applau/cheer/clap;balanc;bounce/dive/fall/jump/leap;brief;cheat;crouch/duck/hide;depart/exit/withdr;diagno;did/ok/okay/sure/y/yea/yes;didn'/no/nope/wrong;doze/nap/sleep/snooze;enter;gaze/l/look/squint/stare;go/procee/run/step/walk;hello/hi/howdy;help/hint/hints;hop/skip;howl/scream/shout/yell;invent/i;leave;q/quit;record;rest/sit/squat;restar;restor;rise/stand;save;score;script;smell/sniff/whiff;smile;stay/wait/z;super/superb;thank/thanks;unscri;verbos;versio;wave;what/what'/whats;where/wheres;who/whos;why;you;activa/start OBJ;adjust/tune OBJ;advanc OBJ;answer/reply OBJ;answer/reply to OBJ;applau/cheer OBJ;approa OBJ;arrest OBJ;assaul/attack/fight/hit/kill/murder/slap/strike OBJ;attach/fasten/secure/tie OBJ;awake/rouse/wake OBJ;awake/rouse/wake up OBJ;balanc OBJ;bet/wager OBJ;bite OBJ;board/embark OBJ;boost/lift/raise OBJ;bounce/dive/fall/jump/leap across OBJ;bounce/dive/fall/jump/leap down OBJ;bounce/dive/fall/jump/leap from OBJ;bounce/dive/fall/jump/leap off OBJ;bounce/dive/fall/jump/leap on OBJ;bounce/dive/fall/jump/leap out OBJ;bounce/dive/fall/jump/leap over OBJ;bounce/dive/fall/jump/leap up OBJ;break/damage/demoli/destro/peel/pop/smash/wreck OBJ;break/damage/demoli/destro/peel/pop/smash/wreck down OBJ;breath/inhale/suck OBJ;breath/inhale/suck in OBJ;bribe OBJ;buy/order/purcha OBJ;call OBJ;call out OBJ;call to OBJ;carry/get/grab/hold/take OBJ;carry/get/grab/hold/take down OBJ;carry/get/grab/hold/take off OBJ;carry/get/grab/hold/take out OBJ;chase/follow/pursue OBJ;cheat OBJ;cheat at OBJ;clap OBJ;clean/tidy/wash OBJ;clean/tidy/wash up OBJ;climb/crawl/creep/scale OBJ;climb/crawl/creep/scale down OBJ;climb/crawl/creep/scale off OBJ;climb/crawl/creep/scale out OBJ;climb/crawl/creep/scale over OBJ;climb/crawl/creep/scale up OBJ;climb/crawl/creep/scale/carry/get/grab/hold/take on OBJ;close/shut/unset OBJ;close/shut/unset off OBJ;compar/match OBJ;count OBJ;crack/snap/whip OBJ;cross/enter OBJ;crouch/duck/hide under OBJ;cut/perfor/slice OBJ;cut/perfor/slice/climb/crawl/creep/scale/carry/get/grab/hold/take in OBJ;debark/disemb OBJ;depart/exit/withdr OBJ;descen OBJ;descri/examin/inspec/observ/scour/see/study/watch OBJ;descri/examin/inspec/observ/scour/see/study/watch on OBJ;descri/examin/inspec/observ/scour/see/study/watch/gaze/l/look/squint/stare in OBJ;descri/examin/inspec/observ/scour/see/study/watch/gaze/l/look/squint/stare/frisk/rummag/search for OBJ;devour/eat/gobble/ingest OBJ;dial/phone/teleph OBJ;did not OBJ;did so OBJ;dig in OBJ;dig throug OBJ;dig with OBJ;doff/remove/shed OBJ;don/wear OBJ;draw/open/part/pry OBJ;draw/open/part/pry up OBJ;drink/guzzle/imbibe/quaff/sip/swallo/swill OBJ;drink/guzzle/imbibe/quaff/sip/swallo/swill from OBJ;drive OBJ;drop OBJ;eject OBJ;empty OBJ;empty out OBJ;exting OBJ;feed OBJ;feel/pat/pet/poke/rub/tap/tickle/touch OBJ;fill OBJ;find/seek OBJ;flick/flip/rotate/set/toggle/turn OBJ;flick/flip/rotate/set/toggle/turn around OBJ;flick/flip/rotate/set/toggle/turn off OBJ;flick/flip/rotate/set/toggle/turn on OBJ;free/unatta/unfast/unknot/untang/untie OBJ;frisk/rummag/search OBJ;frisk/rummag/search in OBJ;frisk/rummag/search throug OBJ;gaze/l/look/squint/stare OBJ;gaze/l/look/squint/stare around OBJ;gaze/l/look/squint/stare at OBJ;gaze/l/look/squint/stare behind OBJ;gaze/l/look/squint/stare down OBJ;gaze/l/look/squint/stare on OBJ;gaze/l/look/squint/stare out OBJ;gaze/l/look/squint/stare over OBJ;gaze/l/look/squint/stare throug OBJ;gaze/l/look/squint/stare to OBJ;gaze/l/look/squint/stare under OBJ;gaze/l/look/squint/stare up OBJ;go/bounce/dive/fall/jump/leap/procee/run/step/walk in OBJ;go/climb/crawl/creep/scale/bounce/dive/fall/jump/leap/procee/run/step/walk throug OBJ;go/climb/crawl/creep/scale/carry/get/grab/hold/take/procee/run/step/walk under OBJ;go/crouch/duck/hide/rise/stand/procee/run/step/walk behind OBJ;go/move/procee/run/step/walk around OBJ;go/procee/run/step/walk OBJ;go/procee/run/step/walk away OBJ;go/procee/run/step/walk down OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk out OBJ;go/procee/run/step/walk over OBJ;go/procee/run/step/walk to OBJ;go/procee/run/step/walk up OBJ;hang/insert/lay/place/put/stuff down OBJ;hang/insert/lay/place/put/stuff on OBJ;hang/insert/lay/place/put/stuff up OBJ;hear OBJ;hello/hi/howdy OBJ;help/hint/hints/save OBJ;howl/scream/shout/yell at OBJ;howl/scream/shout/yell to OBJ;hurl/throw/toss OBJ;hypnot/mesmer OBJ;jimmy/pick OBJ;jimmy/pick up OBJ;kick OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;lean OBJ;lean to OBJ;leave OBJ;let go OBJ;lick/taste OBJ;lie/reclin down OBJ;lie/reclin in OBJ;lie/reclin on OBJ;light OBJ;listen to OBJ;lock OBJ;lower OBJ;make OBJ;move OBJ;move up OBJ;pass/pay OBJ;play OBJ;pole-/poleva/vault on OBJ;pole-/poleva/vault over OBJ;pour/spill/sprink OBJ;pour/spill/sprink out OBJ;press/pull/push/shove/boost/lift/raise up OBJ;press/pull/push/shove/move on OBJ;punch OBJ;punch out OBJ;reach OBJ;reach in OBJ;read/skim OBJ;record OBJ;record on OBJ;rest/sit/squat down OBJ;rest/sit/squat in OBJ;rest/sit/squat on OBJ;rewind OBJ;rise/stand in OBJ;rise/stand on OBJ;rise/stand/carry/get/grab/hold/take up OBJ;shake OBJ;sidewa/side- OBJ;slide/press/pull/push/shove OBJ;smell/sniff/whiff OBJ;smile at OBJ;speak/talk in OBJ;speak/talk to OBJ;spin/whirl OBJ;stay/wait/z for OBJ;stay/wait/z in OBJ;stay/wait/z on OBJ;stop OBJ;tame OBJ;tell/addres OBJ;thank/thanks OBJ;unlock OBJ;wave OBJ;wave at OBJ;wave to OBJ;what/what'/whats OBJ;where/wheres OBJ;who/whos OBJ;you OBJ;adjust/tune OBJ to OBJ;adjust/tune in OBJ to OBJ;ask/consul/query OBJ about OBJ;ask/consul/query OBJ for OBJ;ask/consul/query OBJ on OBJ;assaul/attack/fight/hit/kill/murder/slap/strike OBJ with OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie/hang/insert/lay/place/put/stuff OBJ around OBJ;balanc OBJ with OBJ;boost/lift/raise OBJ to OBJ;bounce/dive/fall/jump/leap over OBJ with OBJ;break/damage/demoli/destro/peel/pop/smash/wreck OBJ with OBJ;breath/inhale/suck in OBJ from OBJ;bribe OBJ with OBJ;buy/order/purcha OBJ from OBJ;captur/catch OBJ in OBJ;captur/catch OBJ with OBJ;carry/get/grab/hold/take OBJ in OBJ;carry/get/grab/hold/take OBJ off OBJ;carry/get/grab/hold/take OBJ out OBJ;carry/get/grab/hold/take OBJ with OBJ;compar/match OBJ to OBJ;compar/match OBJ with OBJ;cover OBJ with OBJ;crack/snap/whip OBJ at OBJ;crack/snap/whip OBJ on OBJ;crack/snap/whip OBJ over OBJ;crack/snap/whip OBJ with OBJ;cut/perfor/slice OBJ with OBJ;cut/perfor/slice throug OBJ with OBJ;descri/examin/inspec/observ/scour/see/study/watch OBJ throug OBJ;dial/phone/teleph/call OBJ on OBJ;dial/phone/teleph/call OBJ with OBJ;doff/remove/shed/carry/get/grab/hold/take OBJ from OBJ;donate/give/hand/offer/sell/send/feed/pass/pay OBJ OBJ;donate/give/hand/offer/sell/send/feed/pass/pay OBJ to OBJ;draw/open/part/pry OBJ with OBJ;drop OBJ down OBJ;drop/hang/insert/lay/place/put/stuff OBJ in OBJ;drop/hang/insert/lay/place/put/stuff OBJ on OBJ;drop/hang/insert/lay/place/put/stuff OBJ out OBJ;empty OBJ from OBJ;empty OBJ out OBJ;feel/pat/pet/poke/rub/tap/tickle/touch OBJ on OBJ;feel/pat/pet/poke/rub/tap/tickle/touch OBJ with OBJ;fill OBJ with OBJ;flick/flip/rotate/set/toggle/turn OBJ to OBJ;flick/flip/rotate/set/toggle/turn OBJ with OBJ;flick/flip/rotate/set/toggle/turn in OBJ to OBJ;free/unatta/unfast/unknot/untang/untie OBJ with OBJ;frisk/rummag/search OBJ for OBJ;gaze/l/look/squint/stare at OBJ throug OBJ;hang/insert/lay/place/put/stuff OBJ across OBJ;hang/insert/lay/place/put/stuff OBJ behind OBJ;hang/insert/lay/place/put/stuff OBJ down OBJ;hang/insert/lay/place/put/stuff OBJ over OBJ;hurl/throw/toss OBJ at OBJ;hurl/throw/toss OBJ in OBJ;hurl/throw/toss OBJ off OBJ;hurl/throw/toss OBJ out OBJ;hurl/throw/toss OBJ over OBJ;hurl/throw/toss OBJ throug OBJ;hurl/throw/toss OBJ to OBJ;hurl/throw/toss OBJ up OBJ;jimmy/pick OBJ with OBJ;lock OBJ with OBJ;move OBJ with OBJ;pass/pay OBJ throug OBJ;pole-/poleva/vault over OBJ with OBJ;pour/spill/sprink OBJ from OBJ;pour/spill/sprink OBJ in OBJ;pour/spill/sprink OBJ on OBJ;pour/spill/sprink OBJ over OBJ;pour/spill/sprink out OBJ into OBJ;reach OBJ with OBJ;read/skim OBJ on OBJ;read/skim OBJ throug OBJ;read/skim OBJ with OBJ;shake OBJ with OBJ;show OBJ OBJ;show OBJ to OBJ;slide OBJ down OBJ;slide OBJ in OBJ;slide OBJ on OBJ;slide OBJ throug OBJ;slide/press/pull/push/shove/move/hang/insert/lay/place/put/stuff OBJ under OBJ;tame OBJ with OBJ;tell OBJ OBJ;tell OBJ about OBJ;tell OBJ to OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
curses = {
"name": "curses",
"rom": "curses.z5",
"seed": 0,
# Walkthrough adapted from https://ifarchive.org/if-archive/solutions/Curses.step and http://mirror.ifarchive.org/if-archive/solutions/CursesR16.sol
"walkthrough": "drop biscuit, paper/south/get parcel from cupboard/unwrap parcel/north/north/west/south/examine sheets/turn wireless on/push wireless north/west/give box of chocolates to jemima/say yellow/wait/wait/wait/wait/wait/wait/wait/get gloves/wear gloves/east/east/east/get wrench/west/south/south/southeast/east/examine rolls/open torch/empty torch/get new battery/put new battery in torch/close torch/south/take sack/north/open door/north/get all/turn handle/south/west/west/get flash/east/south/west/get wrench/tighten joint/get novel/get poetry/east/north/northwest/get all from cupboard/north/close trapdoor/get jewellery box/open trapdoor/north/open demijohn/get all from demijohn/west/west/east/east/east/turn wheel off/turn wheel/get medicine bottle/put medicine bottle in shaft/turn wheel/enter dumb waiter/get all/pull ropes/out/mouse, south/south/mouse, west/hole, w/hole, w/hole, w/hole, n/hole, w/hole, n/hole, s/hole, e/hole, s/hole, e/hole, e/hole, e/hole, e/get key/north/enter dumb waiter/pull ropes/out/wear gas mask/north/unlock door with brass key/open door/north/remove gas mask/read poetry/west/take handkerchief/wave handkerchief/west/say time/take poetry/northwest/west/west/get bottle/get implement/enter roller/turn roller on/e/e/e/w/n/n/w/w/n/n/n/n/w/w/w/get out/get etching/enter roller/e/e/e/s/s/s/s/s/e/e/s/s/get out/southeast/south/south/enter dumb waiter/pull ropes/out/west/south/south/southeast/east/north/up/northwest/take gothic key/throw wishbone at ghost/southeast/down/put all in fireplace/west/down/get all/unlock hatch with brass key/open hatch/down/east/turn wheel off/turn wheel/enter dumb waiter/pull ropes/pull ropes/out/west/south/south/southeast/south/south/turn projector on/get ace/put ace in projector/south/examine cups/pull anchor/put ship in mounted bottle/examine ship/fore/get branch/aft/up/take flag/port/get flag/get spar/aft/turn wheel/down/get ace/north/north/east/south/push south wall/south/put flag on bed/break windows/south/look under sill/take gold key/north/lie on bed/sleep/east/east/east/turn wheel/pinch me/get up/north/north/west/northwest/north/north/east/enter dumb waiter/pull ropes/pull ropes/out/north/east/east/break frame/unlock jewellery box with gold key/open jewellery box/take sketch/take spar/take clover/wave spar/wave clover/open coffin/put rods in coffin/close lid/open lid/close lid/open lid/get rods from coffin/southeast/get sceptre/up/up/look up 1420 in map/turn door/northeast/push ball southwest/push ball south/get tarot box/east/break cabinet/get all from cabinet/get rod of returning/strike it/wait/wait/eat tablet/get crook/point rod of returning at me/s/se/s/s/get etching/put it in projector/south/e/e/s/s/s/s/e/e/e/e/e/squeeze weed killer bottle/w/w/w/w/w/n/n/n/n/w/w/take bean pole/strike returning/point it at me/read poetry/north/open tarot box/get eight/get star/get maiden/up/put eight on pack/put maiden on pack/put star on pack/push bell/say even/north/up/get all cards/down/south/west/west/say time/south/east/east/get stick/get crook/get staff/wave stick/wave crook/wave staff/put rods in coffin/close lid/open lid/close lid/open lid/close lid/open lid/get all from coffin/west/west/north/northwest/enter roller/n/n/e/e/e/get out/get rose/down/strike rod of bronze/point it at mural/down/get wrought iron key/down/smell/smell/down/turn switch off/up/west/strike rod of bronze/point it at metal/north/slide i/slide k/slide c/slide a/slide n/slide e/slide l/slide o/slide s/slide t/south/slide k/slide k/get rod of returning/strike it/point it at me/se/s/s/get etching/get maiden/put maiden in projector/south/up/east/get fire/get husbandry/get luck/strike rod of husbandry/point it at goats/south/strike rod of fire/point it at thorns/south/strike rod of luck/point it at me/south/get coin/southeast/get stone/southwest/wake homer/say agamemnon/say ptolemy/say yellow/north/northwest/push dionysus southeast/push dionysus southwest/push poseidon northeast/again/push demeter southwest/push demeter northwest/southeast/put stone in opening/down/get amber/up/south/northeast/northwest/north/north/north/east/south/give coin to bartender/get ekmek/north/get fig/west/west/southwest/play syrinx/put fig in urn/oracle, hello/northeast/down/give ekmek to andromeda/wave hairband/get rod of returning/strike it/point it at me/n/get book/s/east/down/down/northeast/east/east/get implement/walk 4 paces west/walk 4 paces south/dig/get box/unlock it with gothic key/open it/west/west/southwest/up/up/w/s/se/s/s//get maiden/get sketch/put sketch in projector/jump/south/north/take stone/put round stone into grating/wait/wait/wait/wait/wait/wait/south/southeast/southwest/blow whistle/take amber/put amber in socket/get cloak/wear it/south/north/northeast/northeast/southeast/south/anoint me/north/southeast/get spindle/northwest/wait/get alpha/get kappa/get epic poem/get short poem/put epic poem in kappa/put short poem in alpha/east/give kappa to callimachus/south/give alpha to apollonius/get sash/north/west/northwest/north/west/get rusty key/east/south/southwest/southwest/down/lie on couch/sleep/lie on couch/sleep/lie on couch/sleep/lie on couch/sleep/z/z/z/z/z/z/z/z/w/get up/w/get up/w/get up/w/get up/twist nose/west/west/north/east/west/take oak/take sceptre/put sceptre in first socket/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/turn sceptre/get sceptre/put sceptre in second socket/[ Repeatedly \"turn sceptre\" until the coffin opens ]/turn sceptre/turn sceptre/turn sceptre/turn sceptre/east/east/up/northeast/southeast/remove cloak/turn it/wear it/open door/up/put oak on table/northwest/northwest/north/unlock grating with rusty key/open grating/down/get heart from boat/up/south/southeast/southwest/down/west/west/enter coffin/close lid/again/up/west/west/up/up/enter roller/w/w/w/s/s/get out/east/get orb/west/southeast/south/east/east/get featureless rod/get spindle/put featureless rod in coffin/wave spindle/put it in coffin/get eight of wands/wave it/put it in coffin/get bean pole/wave bean pole/put it in coffin/close lid/open lid/close lid/open lid/close lid/open lid/close lid/open lid/get all from coffin/west/west/north/northwest/east/east/mosaic, lagach/mural, lagach/still life, lagach/north/remove cloak/turn it/wear cloak/northeast/northeast/north/north/remove cloak/wear sash/north/northeast/east/south/anoppe/take astrolabe/put astrolabe in mounting/look through astrolabe/get hand/down/down/west/put skull on statue/turn it/put hand on statue/turn it/put heart in statue/get rod of fire/get rod of ice/strike rod of fire/point it at knight/point east/east/point east/east/give rose to knight/show keepsake to knight/point west/west/knight, open flagstone/point down/strike rod of ice/point it at rod of life/get rod of life/again/south/south/down/northwest/west/west/southeast/south/south/enter dumb waiter/pull ropes/again/out/west/unlock door with wrought iron key/open door/northwest/polish orb/get rod of sacrifice/strike it/point sacrifice at board/get rods from sack/put rods in socket/get rods from sack/put rods in socket/get rods from sack/put rods in socket/get orb/put it in opening/strike rod of infinity/point it at lemniscus/west/down/swing rope/get daisy/wear it/up/east/get torch/northeast/east/south/south/east/wear sandals/west/south/north/west/open cover/put torch in well/down/east/z/z/z/z/z/z/z/z/west/up/z/z/z/z/get pole/east/get stone/wave pole/strike it/point it at me/read poetry/east/down/wave stone/give note to man/say carte/up/west/west/west/say time/get torch/southeast/east/enter dumb waiter/pull ropes/again/out/west/south/down",
"grammar" : "anoppe/eppona;answer/say/shout/speak;awake/awaken/wake;awake/awaken/wake up;beep/bleep/chirp;bet/gamble/wager;bother/curses/darn/drat;brief/normal;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;close/cover/shut up;damn/fuck/shit/sod;diagnose;die/q/quit;dig;display/present/show off;dive/swim;drink/sip/swallow;exhibit/reveal/sing;exit/out/outside/stand;float/sail;float/sail away;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;help;hint/hints;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;lagach;lie/sit;lie/sit down;long/verbose;make/wish;make/wish a wish;make/wish wish;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;pace;places;plain;play croquet;plover/plugh/xyzzy;pray;pretty;restart;restore;rip/tear;save;score;script;script off;script on;shantih;short/superbrie;smell/sniff;sorry;stand/carry/get/hold/take up;think;time;verify;version;wait/z;wave;y/yes;achetez/buy/purchase OBJ;add/join/mend/repair/attach/fasten/fix/tie OBJ;adjust/set OBJ;anoint/oil OBJ;answer/say/shout/speak lagach to OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;beckon/direct/gesture/point OBJ;beckon/direct/gesture/point at OBJ;bet/gamble/wager with OBJ;board/embark/cross/enter/go/leave/run/walk OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;change/flip/switch OBJ;check/describe/examine/watch/x reflectio of OBJ;chop/cut/prune/slice OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;close/cover/shut OBJ;damn/fuck/shit/sod OBJ;dance/tango/waltz with OBJ;dig OBJ;dig with OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;etch/inscribe/scribe/write OBJ;fill OBJ;frisk OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/hold/take into OBJ;hear/listen OBJ;hear/listen to OBJ;help OBJ;hop/jump/skip over OBJ;hypnotise/hypnotize OBJ;kick/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;knock at OBJ;knock on OBJ;l/look at OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look through OBJ;l/look under OBJ;lie/sit down on OBJ;lie/sit/go/leave/run/walk inside OBJ;lie/sit/go/leave/run/walk/carry/get/hold/take in OBJ;lie/sit/stand/carry/get/hold/take on OBJ;make/wish for OBJ;milk OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pet/stroke/tickle/feel/fondle/grope/touch OBJ;pick OBJ up;pick up OBJ;pinch/slap OBJ;play with OBJ;play/blow OBJ;put OBJ down;put down OBJ;put on OBJ;ram OBJ;read/check/describe/examine/watch/x OBJ;reverse/revolve/tighten/tweak/unwind/wind/wrench/rotate/screw/turn/twist/unscrew OBJ;ring/clear/move/press/push/shift OBJ;rip/tear OBJ;rip/tear down OBJ;roll/shoot/toss OBJ;rotate/screw/turn/twist/unscrew OBJ around;rotate/screw/turn/twist/unscrew OBJ inside out;rotate/screw/turn/twist/unscrew round OBJ;scratch/clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;search OBJ;shantih OBJ;smell/sniff OBJ;squash/squeeze OBJ;stack OBJ;start/stop OBJ;strike OBJ;swing OBJ;swing on OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wash mouth out with OBJ;wash mouth with OBJ;wash my mouth out with OBJ;wash my mouth with OBJ;wave OBJ;add/join/mend/repair/attach/fasten/fix/tie OBJ to OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;beckon/direct/gesture/point OBJ at OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;check/describe/examine/watch/x OBJ in OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;close/cover/shut OBJ with OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;etch/inscribe/scribe/write OBJ on OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;hang/mount OBJ on OBJ;hang/mount OBJ onto OBJ;hang/mount OBJ up on OBJ;hang/mount OBJ up onto OBJ;hang/mount up OBJ on OBJ;hang/mount up OBJ onto OBJ;hypnotise/hypnotize OBJ with OBJ;lay OBJ on OBJ;lean/prop/rest/support OBJ against OBJ;lean/prop/rest/support OBJ on OBJ;lean/prop/rest/support OBJ with OBJ;lean/prop/rest/support up OBJ with OBJ;lock OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;rotate/screw/turn/twist/unscrew OBJ with OBJ;shantih OBJ OBJ;stack OBJ in OBJ;stack OBJ on OBJ;stack OBJ onto OBJ;stack OBJ with OBJ;strike/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ with OBJ;tighten/tweak/unwind/wind/wrench OBJ with OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
cutthroat = {
"name": "cutthroat",
"rom": "cutthroat.z3",
"seed": 10,
# walkthrough adapted from https://ifarchive.org/if-archive/solutions/Sols3.zip and http://www.eristic.net/games/infocom/cutthroats.html
"walkthrough": 'look out window/wind watch/get out of bed/read note/open dresser/get passbook and room key/look in closet/i/open door/n/close door/lock door/wait/n/n/e/e/e/e/s/sit/order food/order drink/eat food/drink drink/yes/get out of chair/n/w/w/s/s/w/w/e/e/e/e/ne/s/s/sw/w/w/e/e/wait/n/withdraw $603/s/ne/n/nw/w/w/w/open window/look through window/w/drop passbook/e/e/n/n/w/w/sw/sw/nw/wait/examine plate/se/ne/ne/e/s/n/e/e/s/n/e/s/buy drink/drink drink/n/se/s/s/se/i/wait for johnny/YES/YES/YES/YES/YES/YES/yes/show $500/no/nw/n/n/nw/w/s/wait/wait/look/give $56/buy flashlight and repellent/buy c battery and putty/buy electromagnet/rent compressor/i/n/e/s/sit/order food/order drink/eat food/drink drink/get out of chair/n/se/s/s/sw/w/w/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/e/e/ne/n/n/nw/w/w/s/s/w/wait/look through window/w/e/wait/look through window/open window/enter window/get envelope/n/w/s/s/unlock door/open door/s/read envelope/get all from closet/i/n/close door/lock door/n/n/e/e/s/s/w/w/drop room key/e/e/e/e/ne/nw/s/sit/order drink/drink drink/wait/wait/get off chair/n/w/s/wait/n/w/n/w/s/d/n/n/n/get drill/open panel/get c battery/put c battery in drill/close panel/s/hide envelope under bunk/s/s/s/wait/wait/wait/latitude is 25/longitude is 25/n/u/n/s/drop wet suit/drop flippers/drop mask/drop drill/i/w/n/d/n/get all/examine compressor/examine air tank/fill tank with air/get air tank/s/s/drink water/s/d/u/s/n/n/n/look under bunk/n/s/u/w/s/examine drill/n/wait/get out of bunk/get all/get envelope/s/s/s/show envelope to johnny/n/u/d/n/eat stew/s/u/drop all/get watch/wear watch/get wet suit/wear wet suit/get air tank/wear air tank/get flippers/wear flippers/get mask/wear mask/i/look/get tube/get flashlight/get canister/get drill/get electromagnet/i/look/dive in ocean/open shark repellent/d/turn on flashlight/d/d/d/s/open door/s/put magnet on mine/turn on magnet/drop magnet/u/remove tank/s/s/turn drill on/drill safe with drill/turn drill off/grab case/n/n/wear air tank/d/n/u/examine glass case/examine stamps/turn drill on/drill case with drill/open tube/put glob on case/d/n/u/u/u/u/u',
"grammar" : "affirm/ok/okay/sure/uh-hu/y/yeah/yes/yup;again/g;back;barf/chomp/lose;bathe/swim/wade;breath;brief;bye/goodby;chase/come/follow/pursue;chat/say/speak/talk;curse/cuss/damn/fuck/hell/shit/swear;diagno;disemb/exit;dive;dunno/maybe;enter;gaze/l/look/peer/stare;hello/hi;help;hop/skip;i/invent;jump/leap;leave/withdr;lie/nap/rest/sleep;mumble/sigh;negati/no/nope/uh-uh;pray;progre/rating/score;q/quit;restar;restor;save;scream/shout/yell;script;stand;stay;super/superb;t/time;unscri;verbos;versio;wait/z;what/what'/whats/who/who's/whos;win/winnag;ask/inquir/questi OBJ;ask/inquir/questi about OBJ;ask/inquir/questi for OBJ;awake/startl/surpri/wake OBJ;awake/startl/surpri/wake up OBJ;bathe/swim/wade OBJ;bathe/swim/wade in OBJ;bite/chew/consum/eat/munch/nibble OBJ;blow out OBJ;board OBJ;brandi/wave OBJ;brandi/wave at OBJ;brandi/wave to OBJ;break/chip/chop/damage/destro/hit/smash in OBJ;breath OBJ;buy/order/purcha OBJ;bye/goodby OBJ;call OBJ;call to OBJ;carry/get/grab/hold/remove/take OBJ;carry/get/grab/hold/remove/take off OBJ;carry/get/grab/hold/remove/take out OBJ;cast/chuck/hurl/throw/toss OBJ;chase/come/follow/pursue OBJ;chat/say/speak/talk to OBJ;chat/say/speak/talk with OBJ;check OBJ;check/gaze/l/look/peer/stare/search for OBJ;clean/polish/scrub OBJ;climb OBJ;climb down OBJ;climb out OBJ;climb throug OBJ;climb up OBJ;climb/carry/get/grab/hold/remove/take in OBJ;climb/carry/get/grab/hold/remove/take on OBJ;close/shut OBJ;count OBJ;cross/ford OBJ;curse/cuss/damn/fuck/hell/shit/swear OBJ;deflat OBJ;deposi OBJ;descri/examin OBJ;detach/discon/free/unfast/unhook/untie OBJ;disemb OBJ;dive in OBJ;douse/exting OBJ;drink/guzzle/imbibe/sip/swallo OBJ;drop/releas OBJ;empty OBJ;enter OBJ;exit OBJ;feel/pat/pet/rub/touch OBJ;fill OBJ;find/see/seek/where OBJ;flip/set/turn OBJ;flip/set/turn off OBJ;flip/set/turn on OBJ;flip/set/turn over OBJ;fold OBJ;fold up OBJ;gaze/l/look/peer/stare OBJ;gaze/l/look/peer/stare around OBJ;gaze/l/look/peer/stare at OBJ;gaze/l/look/peer/stare behind OBJ;gaze/l/look/peer/stare in OBJ;gaze/l/look/peer/stare on OBJ;gaze/l/look/peer/stare out OBJ;gaze/l/look/peer/stare throug OBJ;gaze/l/look/peer/stare under OBJ;gaze/l/look/peer/stare with OBJ;go/procee/run/step/walk OBJ;go/procee/run/step/walk around OBJ;go/procee/run/step/walk down OBJ;go/procee/run/step/walk in OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk throug OBJ;go/procee/run/step/walk to OBJ;go/procee/run/step/walk up OBJ;hello/hi OBJ;help OBJ;hide behind OBJ;hide in OBJ;hide under OBJ;insert/place/put/stuff/wedge down OBJ;insert/place/put/stuff/wedge on OBJ;jump/leap across OBJ;jump/leap from OBJ;jump/leap in OBJ;jump/leap off OBJ;jump/leap/go/procee/run/step/walk over OBJ;kick OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;knock/rap over OBJ;latitu OBJ;launch OBJ;lean on OBJ;lease/rent OBJ;leave OBJ;lie/nap/rest/sleep down OBJ;lie/nap/rest/sleep in OBJ;lie/nap/rest/sleep on OBJ;lift/raise OBJ;light/start/strike OBJ;listen for OBJ;listen to OBJ;longit OBJ;lower OBJ;make OBJ;molest/rape OBJ;move/roll/pull/tug/yank OBJ;open OBJ;open up OBJ;pick OBJ;pick up OBJ;play OBJ;pour/spill OBJ;pray for OBJ;press/push/shove OBJ;press/push/shove on OBJ;press/push/shove/lift/raise up OBJ;pull/tug/yank on OBJ;pump up OBJ;reach in OBJ;read/skim OBJ;read/skim in OBJ;rob OBJ;roll up OBJ;scream/shout/yell at OBJ;search OBJ;search in OBJ;send OBJ;send for OBJ;shake OBJ;sit down OBJ;sit in OBJ;sit on OBJ;sit with OBJ;slide OBJ;smell/sniff OBJ;smoke OBJ;spin OBJ;squeez OBJ;stand on OBJ;stand/carry/get/grab/hold/remove/take up OBJ;swing/thrust OBJ;taste OBJ;tell OBJ;unfold OBJ;unlock OBJ;wait/z OBJ;wait/z for OBJ;wear OBJ;weigh OBJ;what/what'/whats/who/who's/whos OBJ;wind OBJ;wind up OBJ;withdr OBJ;aim/point OBJ at OBJ;apply OBJ to OBJ;ask/inquir/questi OBJ about OBJ;ask/inquir/questi OBJ for OBJ;attach/connec/fasten/secure/tie OBJ around OBJ;attach/connec/fasten/secure/tie OBJ to OBJ;attach/connec/fasten/secure/tie up OBJ with OBJ;attack/fight/hurt/injure OBJ with OBJ;blind/jab/poke OBJ with OBJ;blow up OBJ with OBJ;brace/suppor OBJ with OBJ;brandi/wave OBJ at OBJ;break/chip/chop/damage/destro/hit/smash OBJ with OBJ;break/chip/chop/damage/destro/hit/smash down OBJ with OBJ;break/chip/chop/damage/destro/hit/smash in OBJ with OBJ;burn/ignite/incine OBJ with OBJ;burn/ignite/incine down OBJ with OBJ;buy/order/purcha OBJ from OBJ;carry/get/grab/hold/remove/take OBJ from OBJ;carry/get/grab/hold/remove/take OBJ off OBJ;carry/get/grab/hold/remove/take OBJ out OBJ;cast/chuck/hurl/throw/toss OBJ at OBJ;cast/chuck/hurl/throw/toss OBJ down OBJ;cast/chuck/hurl/throw/toss OBJ in OBJ;cast/chuck/hurl/throw/toss OBJ off OBJ;cast/chuck/hurl/throw/toss OBJ on OBJ;cast/chuck/hurl/throw/toss OBJ over OBJ;cast/chuck/hurl/throw/toss OBJ with OBJ;cut/pierce/scrape/slice OBJ with OBJ;deposi OBJ in OBJ;deposi/insert/place/put/stuff/wedge OBJ on OBJ;detach/discon/free/unfast/unhook/untie OBJ from OBJ;dig OBJ in OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;dispat/kill/murder/slay/stab OBJ with OBJ;donate/feed/give/hand/offer/pay OBJ OBJ;donate/feed/give/hand/offer/pay OBJ to OBJ;drill OBJ in OBJ;drill OBJ with OBJ;drop/releas OBJ down OBJ;drop/releas OBJ in OBJ;drop/releas OBJ on OBJ;feel/pat/pet/rub/touch OBJ to OBJ;feel/pat/pet/rub/touch OBJ with OBJ;fill OBJ with OBJ;fix/glue/patch/plug/repair OBJ with OBJ;flash/show OBJ OBJ;flash/show OBJ to OBJ;flip/set/turn OBJ for OBJ;flip/set/turn OBJ to OBJ;flip/set/turn OBJ with OBJ;gaze/l/look/peer/stare at OBJ with OBJ;grease/lubric OBJ with OBJ;hide OBJ behind OBJ;hide OBJ in OBJ;hide OBJ under OBJ;inflat OBJ with OBJ;insert/place/put/stuff/wedge OBJ across OBJ;insert/place/put/stuff/wedge OBJ agains OBJ;insert/place/put/stuff/wedge OBJ at OBJ;insert/place/put/stuff/wedge OBJ behind OBJ;insert/place/put/stuff/wedge OBJ betwee OBJ;insert/place/put/stuff/wedge OBJ by OBJ;insert/place/put/stuff/wedge OBJ in OBJ;insert/place/put/stuff/wedge OBJ over OBJ;insert/place/put/stuff/wedge OBJ under OBJ;lease/rent OBJ from OBJ;lift/raise OBJ with OBJ;light/start OBJ with OBJ;liquif/melt OBJ with OBJ;lock OBJ with OBJ;lower OBJ down OBJ;move OBJ with OBJ;move/roll/pull/tug/yank/press/push/shove/slide OBJ OBJ;open OBJ with OBJ;pick OBJ with OBJ;pour/spill OBJ from OBJ;pour/spill OBJ in OBJ;pour/spill OBJ on OBJ;press/push/shove OBJ off OBJ;press/push/shove OBJ throug OBJ;press/push/shove OBJ under OBJ;pump up OBJ with OBJ;read/skim OBJ with OBJ;roll/press/push/shove/slide OBJ to OBJ;slide OBJ under OBJ;spray OBJ on OBJ;spray OBJ with OBJ;squeez OBJ on OBJ;strike OBJ with OBJ;swing/thrust OBJ at OBJ;tell OBJ about OBJ;unlock OBJ with OBJ;what/what'/whats/who/who's/whos OBJ OBJ;",
"max_word_length" : 6
}
deephome = {
"name": "deephome",
"rom": "deephome.z5",
"seed": 0,
# walkthrough adapted from http://www.ifarchive.org/if-archive/solutions/deephome.sol
"walkthrough": "read note/x door/push mountain/sw/open cabinet/l in cabinet/take shield/take sword/search table/read letter/sw/read warning note/get torch/wait/wait/s/s/s/s/s/s/get pickaxe/n/e/dig rock with axe/take coal/w/n/e/open furnace/put coal in furnace/close furnace/x generator/pull lever/w/n/n/n/n/w/enter car/push yellow/out/nw/sw/get gear/ne/w/e/nw/se/n/s/ne/sw/e/w/se/enter car/push green button/out/w/sw/sw/x terrock/ne/ne/w/e/s/n/e/enter car/push yellow button/out/e/s/s/e/l up terrock in leshosh/w/s/w/w/get moss/e/e/n/n/n/w/enter car/push green button/out/w/sw/sw/put moss in nest/x pipe/push rod/ne/ne/e/enter car/push yellow button/out/e/s/s/s/s/sw/turn wheel/ne/n/e/se/x panel/open hatch/l in hatch/put gear on post/push lever/push button/s/s/s/s/w/e/s/w/ask man about hammer/ask man about eranti/e/s/wear shield/wield sword/kill eranti/kill eranti/kill eranti/kill eranti/n/w/get hammer/e/n/n/n/n/w/w/read sign/w/sw/w/s/e/n/w/e/x clover/get clover/read sign/s/e/e/e/n/nw/w/n/n/n/w/enter car/push yellow button/out/nw/sw/get scraps/open forge/light coal/put scraps in forge/wait/wait/get scraps/put scraps on anvil/hammer scraps/get sharp pick/ne/se/enter car/push red button/out/sw/n/x box/pick lock with sharp pick/open box/l in box/take key/s/unlock door with key/open door/w/ask spirit for name/search valuables/get coin/e/ne/enter car/push yellow button/out/e/s/s/w/ask spirit for name/e/s/w/s/n/w/n/s/w/w/ask spirit for name/e/e/e/e/n/e/look up Kebarn in Leshosh/look up Partaim in Fresto/look up Indanaz in Leshosh/look up Ternalim in Fresto/look up Cholok in Leshosh/look up Yetzuiz in Fresto/look up squirrel in Leshosh/w/s/e/se/s/s/s/s/s/e/x chest/pick lock with sharp pick/open chest/drop sword/drop shield/get net/w/n/n/dig ground with axe/put net on hole/get acorns/put acorns in net/climb tree/climb down/get net/n/n/n/nw/w/w/w/w/w/open net/e/e/e/e/n/n/w/open desk/get paper/e/e/drop hammer/drop pickaxe/get bottle/fill bottle/w/s/w/burn paper/drop coin/pour water on coin/put ashes on coin/put clover on coin/pray to kraxis/manaz/take coin/e/n/e/fill bottle/w/ne/sw/n/u/d/w/enter car/push red button/out/sw/w/drop net/open net/put coin in net/pour water on net/get coin/pray to kraxis/manaz",
"grammar" : "answer/say/shout/speak manaz;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;climb/scale down;credits;damn/fuck/shit/sod;diag/diagnose/health;die/q/quit;dive/swim;exit/out/outside/stand;exits;flip;full/fullscore;full/fullscore score;get out/off/up;hear/listen;help;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;manaz;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;plugh;pray;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;sorry;stand up;think;verify;version;wait/z;wave;xyzzy;y/yes;adjust/set OBJ;answer/say/shout/speak OBJ;answer/say/shout/speak manaz to OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;beat/hammer OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;flip OBJ;get in/into/on/onto OBJ;get off OBJ;get out of OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit at OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ;pick OBJ up;pick up OBJ;pour OBJ;pray to OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;spread OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;use OBJ;wave OBJ;wield OBJ;adjust/set OBJ to OBJ;attach/fasten/fix/tie OBJ to OBJ;beat/hammer OBJ with OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;dig hole in OBJ with OBJ;dig hole with OBJ in OBJ;dig in OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;pick OBJ with OBJ;pour OBJ on OBJ;put OBJ in/inside/into OBJ;put OBJ over OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;spread OBJ on OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
detective = {
"name": "detective",
"rom": "detective.z5",
"seed": 0,
# Walkthrough adapted from https://solutionarchive.com/file/id,1554/
"walkthrough": "TAKE PAPER/READ PAPER/DROP PAPER/INVENTORY/W/TAKE GUN/E/N/W/E/E/TAKE WOOD/W/W/TAKE NOTE/READ NOTE/E/N/N/W/N/N/W/E/N/E/E/E/S/TAKE HAMBURGER/N/N/E/N/N/N/N/E/W/W/E/N/W/N/W/N/N/w/n/n/up",
"grammar" : "about/help/info/informati;actions;actions off;actions on;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;changes;changes off;changes on;daemons/timers;daemons/timers off;daemons/timers on;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;footnote/note;full/fullscore;full/fullscore score;get out/off/up;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;manual;manual pronouns;manual pronouns off;manual pronouns on;melenkuri/noside/plover/plugh/samoht/xyzzy/zorton;messages/routines;messages/routines off;messages/routines on;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;nouns/pronouns off;nouns/pronouns on;objects;places;pray;random;read footnote;read footnotes;recording;recording off;recording on;replay;restart;restore;save;scope;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;showobj;sing;smell/sniff;sorry;stand up;think;trace;trace off;trace on;tree;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;check/describe/examine/watch/x OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;footnote/note OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;purloin OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;read footnote OBJ;read note OBJ;rotate/screw/turn/twist/unscrew OBJ;scope OBJ;search OBJ;showobj OBJ;showverb OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;trace OBJ;tree OBJ;wave OBJ;abstract OBJ to OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;shoot OBJ at OBJ;shoot OBJ with OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
dragon = {
"name": "dragon",
"rom": "dragon.z5",
"seed": 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/games/glulx/dragon.zip
"walkthrough": "s/e/d/get coin/examine barrels/get mug/u/w/w/buy box/get box/w/s/w/n/get lamp/s/e/n/e/s/s/e/get key/examine table/get bananas/rub lamp/s/w/n/w/s/s/tip boot/get screwdriver/n/n/n/n/n/get bell/unlock door with key/drop key/n/u/give bananas/build glider/drop screw/get sword/get booklet/get glider/jump/e/ring bell/n/hello/give bell/get horn/s/s/examine reeds/get sack/open sack/get water/s/e/fill bath/get candle/w/examine stump/get matches/light candle/drop matches/e/n/n/read booklet/drop booklet/get flute/e/s/s/play flute/examine bones/get bottle/examine bottle/break bottle/examine parchment/n/n/e/u/examine chair/examine carvings/examine goblin/d/w/s/s/blow horn/e",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;candle off;candle on;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;escape/run/leave/yes;exit/out/outside/stand;extinguis candle;extinguis light;find dragon;find gold;find treasure;fix/sing;fly glider;fly/hop/jump/skip;full/fullscore;full/fullscore score;games/instructi;get out/off/up;go/walk;greetings/hello/hi/hiya/hullo;hear/listen;help/hint;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;info;l/look;long/verbose;nap/sleep;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;rescue;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;smell/sniff;sorry;stand up;strike match;think;verify;version;wait/z;wave;y;adjust/set OBJ;attach/fasten/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;build/construct/fit/make OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ out;empty out OBJ;exam/read/check/describe/examine/watch/x OBJ;feel/fondle/grope/touch OBJ;fill OBJ;find OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;greetings/hello/hi/hiya/hullo/greet OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;ignite/burn/light OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave/yes OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;no/open/uncover/undo/unwrap OBJ;off OBJ;on OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;play OBJ;pour OBJ;put OBJ down;put down OBJ;put on OBJ;ride OBJ;ring OBJ;rotate/screw/turn/twist/unscrew OBJ;run/go/walk/cross/enter OBJ;run/go/walk/leave/yes into/in/inside/through OBJ;search OBJ;shatter OBJ;smell/sniff OBJ;spend OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;throw OBJ;throw OBJ down;throw down OBJ;tip/empty OBJ;unlock/lock OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop OBJ in/into/down OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;throw/discard/drop OBJ at/against/on/onto OBJ;throw/discard/drop/put OBJ on/onto OBJ;throw/insert OBJ in/into OBJ;unlock/no/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
enchanter = {
"name" : "enchanter",
"rom" : "enchanter.z3",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "NE/enter shack/OPEN OVEN/GET BREAD/take JUG/take LANTERN/exit shack/NE/SE/NE/FILL JUG/SW/SE/SW/SW/S/READ SCROLL/GNUSTO REZROV SPELL/NE/NE/E/E/LEARN REZROV/REZROV GATE/E/N/FROTZ LANTERN/N/U/GET EGG/EXAMINE IT/TURN HANDLE/PRESS KNOB/PULL SLIDE/MOVE CRANK/PUSH BUTTON/GET SHREDDED SCROLL/READ IT/D/E/E/E/E/E/LEARN REZROV/REZROV GATE/N/GET CRUMPLED SCROLL/READ IT/GNUSTO KREBF SPELL/LEARN KREBF/KREBF SHREDDED SCROLL/READ FADED SCROLL/GNUSTO ZIFMIA SPELL/E/LEARN NITFOL/NITFOL FROGS/LOOK UNDER LILY PAD/READ DAMP SCROLL/GNUSTO CLEESH SPELL/W/drink water/S/S/EXAMINE TRACKS/REACH INTO HOLE/Take book/EAT BREAD/N/EXAMINE dusty book/READ OF UNSEEN TERROR/DROP DUSTY BOOK/READ FRAYED SCROLL/GNUSTO GONDAR SPELL/W/W/W/W/W/S/S/S/S/U/wait/LIE DOWN/STAND UP/EXAMINE POST/PRESS BUTTON/GET GOLD LEAF SCROLL/READ IT/GNUSTO VAXUM SPELL/D/E/S/OPEN DOOR/N/READ WRITINGS/MOVE BLOCK/E/GET STAINED SCROLL/READ IT/GNUSTO EXEX SPELL/W/S/U/E/E/S/SE/LEARN NITFOL/NITFOL TURTLE/TURTLE, FOLLOW ME/NW/N/E/U/LEARN EXEX/EXEX TURTLE/drink water/TURTLE, SE AND GET SCROLL THEN NW/READ BRITTLE SCROLL/GET IT/D/W/W/TURN OFF LANTERN/LOOK/MOVE LIGHTED PORTRAIT/GET BLACK SCROLL/READ IT/GNUSTO OZMOO SPELL/GET CANDLE/LEARN FROTZ/FROTZ CANDLE/LEARN FROTZ/LEARN OZMOO/W/N/FROTZ LANTERN/DROP ALL BUT CANDLE/eat bread/S/E/E/N/N/N/WAIT/OZMOO MYSELF/WAIT/D/W/W/S/CUT ROPE WITH DAGGER/DROP DAGGER/GET ALL BUT DAGGER/S/W/U/LIE DOWN/STAND UP/D/N/N/N/N/LEARN ZIFMIA/LEARN VAXUM/E/E/E/E/ZIFMIA ADVENTURER/LEARN BLORB/VAXUM ADVENTURER/SHOW EGG TO ADVENTURER THEN EAST/E/ADVENTURER, OPEN DOOR/WAIT/BLORB ADVENTURER/U/DROP EGG/drink water/GET PENCIL/GET MAP/D/W/W/W/W/W/W/S/S/S/S/EAT BREAD/E/S/D/READ MAP/DRAW LINE FROM F TO P/WAIT/ERASE LINE BETWEEN B AND R/ERASE LINE BETWEEN M AND V/DRAW LINE FROM B TO J/E/SE/SE/SW/DROP MAP/DROP PENCIL/GET SCROLL/READ IT/NE/NW/NW/W/U/U/N/OPEN BOX/GET VELLUM SCROLL/READ IT/GNUSTO MELBOR SPELL/N/W/W/W/drink water/W/NW/NE/FILL JUG/SW/SE/E/E/E/S/S/LEARN MELBOR/SLEEP/LEARN VAXUM/LEARN GONDAR/LEARN MELBOR/E/E/MELBOR MYSELF/EAT BREAD/E/N/N/N/E/E/KULCAD STAIRS/READ ORNATE SCROLL/IZYUK MYSELF/E/GONDAR DRAGON/VAXUM BEING/GUNCHO KRILL",
"grammar" : "again/g;answer/reply;back;bathe/swim/wade;brief;call/say/talk;chase/come/follow/pursue;concea/hide;curse/damn;diagno;dive/jump/leap;enter;filfre;gaze/l/look/stare;hello/hi;hop/skip;i/invent;krebf;leave;mumble/sigh;nap/sleep/snooze;plugh/xyzzy;pray;q/quit;repent;restar;restor;save;score;scream/shout/yell;script;spells;stand;stay;super/superb;t/time;thank/thanks;unscri;verbos;versio;wait/z;zork;answer/reply OBJ;attack/fight/hit/hurt/injure OBJ;avoid OBJ;awake/startl/surpri/wake OBJ;awake/startl/surpri/wake up OBJ;banish/begone/drive/exorci OBJ;banish/begone/drive/exorci away OBJ;banish/begone/drive/exorci out OBJ;bathe/swim/wade in OBJ;beckon/brandi/motion/wave OBJ;beckon/brandi/motion/wave to OBJ;beckon/brandi/motion/wave/scream/shout/yell at OBJ;bite/kick OBJ;blorb OBJ;blow out OBJ;board OBJ;break/crack/damage/destro/hatch/smash OBJ;call/say/talk to OBJ;carry/get/grab/hold/remove/take OBJ;carry/get/grab/hold/remove/take off OBJ;carry/get/grab/hold/remove/take out OBJ;cast/incant OBJ;chase/come/follow/pursue OBJ;cleesh OBJ;climb/scale/sit OBJ;climb/scale/sit/carry/get/grab/hold/remove/take in OBJ;climb/scale/sit/carry/get/grab/hold/remove/take on OBJ;climb/scale/sit/go/procee/run/step/walk down OBJ;climb/scale/sit/go/procee/run/step/walk up OBJ;close OBJ;concea/hide OBJ;connec OBJ;consum/eat/taste OBJ;count OBJ;cross/ford OBJ;curse/damn OBJ;deflat OBJ;descri/examin/inspec/what/whats OBJ;descri/examin/inspec/what/whats on OBJ;descri/examin/inspec/what/whats/gaze/l/look/stare in OBJ;dig in OBJ;dig with OBJ;discon/erase/rub OBJ;discon/erase/rub betwee OBJ;discon/erase/rub from OBJ;discon/erase/rub out OBJ;disemb OBJ;dispat/kill/murder/slay/stab OBJ;dive/jump/leap across OBJ;dive/jump/leap from OBJ;dive/jump/leap in OBJ;dive/jump/leap off OBJ;dive/jump/leap/go/procee/run/step/walk over OBJ;douse/exting OBJ;draw/make betwee OBJ;draw/make from OBJ;draw/make on OBJ;drink/sip/swallo OBJ;drink/sip/swallo from OBJ;drop/exit/releas OBJ;enter OBJ;escape OBJ;escape from OBJ;exex OBJ;feel/pat/pet/touch OBJ;filfre OBJ;fill OBJ;find/see/seek/where OBJ;flip/set/shut/turn OBJ;flip/set/shut/turn off OBJ;flip/set/shut/turn on OBJ;fly/go/procee/run/step/walk OBJ;forget/unlear/unmemo OBJ;free/unatta/unfast/unhook/untie OBJ;frotz OBJ;gaze/l/look/stare OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare out OBJ;gaze/l/look/stare throug OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare with OBJ;gaze/l/look/stare/search for OBJ;gestur/point at OBJ;gestur/point to OBJ;gnusto OBJ;go/procee/run/step/walk around OBJ;go/procee/run/step/walk behind OBJ;go/procee/run/step/walk in OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk to OBJ;go/procee/run/step/walk under OBJ;go/procee/run/step/walk with OBJ;gondar OBJ;gross out OBJ;guncho OBJ;hello/hi OBJ;insert/lay/place/put/stuff down OBJ;insert/lay/place/put/stuff on OBJ;izyuk OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;krebf OBJ;kulcad OBJ;launch OBJ;lean on OBJ;learn OBJ;leave OBJ;lie down OBJ;lift/raise OBJ;lift/raise up OBJ;light OBJ;listen for OBJ;listen to OBJ;lower OBJ;melbor OBJ;memori OBJ;molest/rape OBJ;move/pull/tug OBJ;nap/sleep/snooze in OBJ;nap/sleep/snooze on OBJ;nitfol OBJ;open OBJ;open up OBJ;ozmoo OBJ;pick OBJ;pick up OBJ;play OBJ;pour/spill OBJ;press/push OBJ;press/push on OBJ;pull/tug on OBJ;pump up OBJ;reach in OBJ;read/skim OBJ;rezrov OBJ;roll up OBJ;search OBJ;search in OBJ;send for OBJ;shake OBJ;slide OBJ;smell/sniff OBJ;spin OBJ;squeez OBJ;stand/carry/get/grab/hold/remove/take up OBJ;stay OBJ;strike OBJ;swing/thrust OBJ;tell OBJ;thank/thanks OBJ;vaxum OBJ;wait/z for OBJ;wear OBJ;who OBJ;wind OBJ;wind up OBJ;write on OBJ;write with OBJ;zifmia OBJ;apply OBJ to OBJ;ask OBJ for OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie up OBJ with OBJ;attack/fight/hit/hurt/injure OBJ with OBJ;beckon/brandi/motion/wave OBJ at OBJ;blind/jab/poke OBJ with OBJ;break/crack/damage/destro/hatch/smash OBJ with OBJ;break/crack/damage/destro/hatch/smash down OBJ with OBJ;burn/ignite OBJ with OBJ;burn/ignite down OBJ with OBJ;carry/get/grab/hold/remove/take OBJ from OBJ;carry/get/grab/hold/remove/take OBJ off OBJ;carry/get/grab/hold/remove/take OBJ out OBJ;carry/get/grab/hold/remove/take/bring OBJ OBJ;cast/incant OBJ at OBJ;cast/incant OBJ on OBJ;chuck/hurl/throw/toss OBJ at OBJ;chuck/hurl/throw/toss OBJ in OBJ;chuck/hurl/throw/toss OBJ off OBJ;chuck/hurl/throw/toss OBJ on OBJ;chuck/hurl/throw/toss OBJ over OBJ;chuck/hurl/throw/toss OBJ with OBJ;concea/hide OBJ from OBJ;connec OBJ to OBJ;connec OBJ with OBJ;cut/pierce/slice OBJ with OBJ;cut/pierce/slice with OBJ with OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;discon/erase/rub betwee OBJ to OBJ;discon/erase/rub betwee OBJ with OBJ;discon/erase/rub from OBJ to OBJ;discon/erase/rub from OBJ with OBJ;discon/erase/rub out OBJ with OBJ;dispat/kill/murder/slay/stab OBJ with OBJ;donate/feed/give/hand/offer OBJ OBJ;donate/feed/give/hand/offer OBJ to OBJ;draw/make betwee OBJ to OBJ;draw/make betwee OBJ with OBJ;draw/make from OBJ to OBJ;draw/make from OBJ with OBJ;draw/make on OBJ with OBJ;drop/exit/releas OBJ down OBJ;drop/exit/releas OBJ on OBJ;drop/exit/releas/insert/lay/place/put/stuff OBJ in OBJ;feel/pat/pet/touch OBJ with OBJ;fill OBJ at OBJ;fill OBJ with OBJ;fix/glue/patch/plug/repair OBJ with OBJ;flip/set/shut/turn OBJ for OBJ;flip/set/shut/turn OBJ to OBJ;flip/set/shut/turn OBJ with OBJ;free/unatta/unfast/unhook/untie OBJ from OBJ;gag OBJ with OBJ;gaze/l/look/stare at OBJ with OBJ;grease/lubric/oil OBJ with OBJ;hone/sharpe OBJ with OBJ;inflat OBJ with OBJ;insert/lay/place/put/stuff OBJ behind OBJ;insert/lay/place/put/stuff OBJ on OBJ;insert/lay/place/put/stuff OBJ under OBJ;light OBJ with OBJ;liquif/melt OBJ with OBJ;lock OBJ with OBJ;open OBJ with OBJ;pick OBJ with OBJ;pour/spill OBJ from OBJ;pour/spill OBJ in OBJ;pour/spill OBJ on OBJ;pump up OBJ with OBJ;read/skim OBJ with OBJ;show OBJ OBJ;show OBJ to OBJ;slide/press/push OBJ OBJ;slide/press/push OBJ to OBJ;slide/press/push OBJ under OBJ;spray OBJ on OBJ;spray OBJ with OBJ;squeez OBJ on OBJ;strike OBJ with OBJ;swing/thrust OBJ at OBJ;unlock OBJ with OBJ;write on OBJ with OBJ;",
"max_word_length" : 6
}
enter = {
"name": "enter",
"rom": "enter.z5",
"seed" : 0,
# Walkthrough obtained by issuing the command WALKTHRU.
"walkthrough": "open door/talk to alltext/3/g/3/g/1/1/talk to queenie/2/g/3/g/4/look at mural/look at queenie/4/n/take list/take envelope/read list/s/close door/w/n/e/give gram to meghan/w/s/s/e/w/s/talk to Stephanie/1/g/2/look at Stephanie/put gram in basket/3/open door/w/give gram to Danielle/say -120 to garrulous/e/n/n/n/n/wear badge/open door/knock on door/talk to empirious/1/w/take jar/give gram to ed/e/nw/talk to woman/1/w/w/e/n/talk to reunite/1/g/3/s/w/w/s/give gram to Alicia/give gram to andrea/n/e/n/say rat to Picasso/give gram to silas/s/e/n/give pass to reunite/s/se/s/s/w/1/give gram to jim/say firefly to jim/look at list/talk to alltext/2/e/give gram to queenie/talk to queenie/4/e/open door/n",
"grammar" : "about;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;get out/off/up;hear/listen;hint;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;intro;l/look;leave/go/run/walk;long/verbose;menu;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;smile;sorry;stand up;think;verify;version;wait/z;walkthru;wave;wink;y/yes;0;1;2;3;4;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;knock on OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;lock OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;smile at OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;talk to OBJ;taste OBJ;wave OBJ;wink at OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
gold = {
"name": "gold",
"rom": "gold.z5",
"seed": 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/games/adrift/Goldilocks.zip and http://www.allthingsjacq.com/intfic_clubfloyd_20100301.html
"walkthrough": "examine pedlar/ask pedlar about suitcase/ask pedlar about beans/south/west/north/north/get milkbottle/south/west/examine vegetable plot/pull leaves/inventory/north/feed turnip to daisy/south/east/south/east/north/inventory/examine packet/open packet/get magic bean/south/west/north/west/in/get secateurs/get paintbrush/examine plant pots/get pots/get trowel/open trunk/get dynamite/out/plant bean in vegetable plot/fill milkbottle with pondwater/water bean/look/get secateurs/get dynamite/get paintbrush/climb beanstalk/jump to roof/climb down chimney/north/examine mousetrap/get cheese/open freezer/get pork chops/south/east/open door/up/ask wolf about pigs/give pork to wolf/down/north/drop secateurs/south/west/north/get matchbox/south/east/north/sit in tiny little chair/look/get firewood/south/west/north/south/examine fireplace/examine ash/put firewood on hearth/light firewood with match/cook pork chops/get pork chops/east/up/give pork chops to wolf/down/wedge door with cheese/out/south/west/north/ask wolf to blow turbine/get oil can/south/east/north/in/inventory/drop matchbox/get rug/open trapdoor/west/north/get porridge oats/examine porridge oats/open carton/get pendant/drop carton/wear pendant/south/east/down/examine wardrobe/unlock wardrobe with key/drop key/open wardrobe/get dumbells/up/west/examine table/get spoon/examine dresser/open dresser/get gigantic bowl/pour hot porridge in gigantic bowl/pour cold porridge in gigantic bowl/eat tiny bowl of porridge/eat gigantic quantity of just-right porridge/drop gigantic bowl/east/down/get dumbells/up/north/sit in medium-sized chair/get spectacles/drop dumbells/out/south/down/move wardrobe/west/north/examine skeleton/south/east/examine washing machine/open washing machine/open washing machine with poker/examine washing machine/search sludge/examine tiny metal/get brass key/examine fusebox/examine volt meter/examine switch A/examine switch b/press switch b/up/north/examine large chair/get antimacassar/examine electrodes/drop brass key/drop antimacassar/drop spoon/get secateurs/cut cable/oil secateurs/cut cable/south/down/press switch b/up/north/sit in large chair/press button/get remote control/examine television/plug in television/watch television/sit in large chair/press button/stand up/south/out/south/west/climb beanstalk/jump to roof/adjust aerial/jump to beanstalk/down/east/north/in/north/sit in large chair/press button/watch television/examine remote control/press channel/press channel/press buy/drop remote control/open fridge/get beer/drink beer/drop beer can/sit in large chair/press button/stand up/south/out/look/get package/open package/get leaflet/drop secateurs/get toaster/drop package/inventory/in/west/look/examine dining table/examine dynamite/examine toaster/put dynamite in toaster/east/north/south/down/press switch a/up/north/sit in large chair/press button/unplug tv/plug in toaster/press lever/examine toaster/inventory/wear spectacles/read leaflet/sit on large chair/press button/stand up/south/down/press switch a/up/inventory/drop spectacles/drop leaflet/north/sit in large chair/press button/west/open crate with poker/look/get magic porridge pot/unlock filing cabinet with key/open filing cabinet/read instructions/examine bench/get wrench/get paint/east/sit in large chair/press button/stand up/south/up/open window with wrench/examine huge bed/get blanket/drop blanket/examine snooker table/examine pockets/get cue ball/down/west/look/paint cue ball/east/out/south/west/throw ball in pond/get frog/east/north/in/up/look/kiss frog/look/ask fairy godmother about cinderella/down/out/south/west/examine vegetable plot/get pumpkin/east/north/in/up/give pumpkin to fairy godmother/ask fairy godmother about horse/down/west/north/get mousetrap/examine mousetrap/get dead mouse/drop mousetrap/south/east/up/give mouse to fairy godmother/ask fairy godmother about dress/look/get wand/down/north/sit in large chair/press button/west/wave wand at magic porridge pot/wave wand at underwear/east/sit in large chair/press button/stand up/south/down/look/say alakazam/up/get bearskin rug/cover trapdoor with bearskin rug/look/up/lie in tiny little bed",
"grammar" : "about/credits;alakazam;alakazoo;awake/awaken/wake;awake/awaken/wake up;bend;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;full/fullscore;full/fullscore score;get out/off/up;hear/listen;help/hint;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;nap/sleep;no;noscript/unscript;notify;notify off;notify on;nouns/pronouns;objects;places;pray;recording;recording off;recording on;replay;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;sorry;stand up;stand/exit/out/outside;think;verify;version;wait/z;wave;y/yes;zoom;zoom away/absent;zoom in;zoom out;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;bury/plant OBJ;buy/purchase OBJ;carry/hold/take off OBJ;carry/hold/take/get OBJ;check/describe/examine/watch/x OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;climb/scale OBJ;climb/scale down OBJ;climb/scale up OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cook OBJ;damn/fuck/shit/sod OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull/unplug OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty/pour OBJ;empty/pour OBJ out;empty/pour out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;flip OBJ;get in/into/on/onto OBJ;get off OBJ;go/run/walk/cross/enter OBJ;have sex with OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;hop/jump/skip to OBJ;knock OBJ;knock on OBJ;l/look at OBJ;l/look behind OBJ;l/look inside/in/into/through/on OBJ;l/look under OBJ;leave/exit/out/outside OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;milk OBJ;mix OBJ;oil OBJ;open/uncover/undo/unwrap OBJ;paint OBJ;peel OBJ;peel off OBJ;pet OBJ;pick OBJ up;pick up OBJ;plug OBJ;plug OBJ in;plug in OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;ring/clear/move/press/push/shift OBJ;rotate/turn/twist/unscrew OBJ;screw OBJ;screw/rotate/turn/twist/unscrew/switch OBJ off;screw/rotate/turn/twist/unscrew/switch OBJ on;screw/rotate/turn/twist/unscrew/switch on OBJ;screw/rotate/turn/twist/unscrew/switch/close/cover/shut off OBJ;search OBJ;sell OBJ;separate OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;taste OBJ;use OBJ;water OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;ask that OBJ OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;bury/plant OBJ in OBJ;carry/hold/take OBJ off OBJ;carry/hold/take/remove/get OBJ from OBJ;chop/cut/prune/slice OBJ with OBJ;clear/move/press/push/shift OBJ OBJ;close/cover/shut OBJ with OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty/pour OBJ in OBJ;empty/pour OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;fill OBJ with OBJ;force/jemmy/lever/prise/prize/pry OBJ apart/open with OBJ;force/jemmy/lever/prise/prize/pry apart/open OBJ with OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;oil OBJ with OBJ;plug OBJ into OBJ;put OBJ in/inside/into OBJ;put OBJ under OBJ;put/discard/drop/throw OBJ on/onto OBJ;read OBJ in OBJ;read about OBJ in OBJ;tell OBJ about OBJ;tell/ask OBJ to OBJ;transfer/clear/move/press/push/shift OBJ to OBJ;unlock/force/jemmy/lever/prise/prize/pry/open/uncover/undo/unwrap OBJ with OBJ;wave OBJ at OBJ;wedge OBJ with OBJ;",
"max_word_length" : 9
}
hhgg = {
"name": "hhgg",
"rom": "hhgg.z3",
"seed" : 4,
# Walkthrough adapted from http://www.eristic.net/games/infocom/hhg.html
'walkthrough': 'Get Up/Turn on Light/Get Gown/Wear Gown/Look in pocket/take analgesic/Get all/South/Get Mail/South/Lie Down In Front Of Bulldozer/Wait/Wait/Wait/Wait/Wait/Wait/Wait/Follow Ford/Enter Pub/Look Shelf/Buy Sandwich/Drink Beer/Drink Beer/Drink Beer/Exit/Feed dog sandwich/Get towel/Wait/wait/wait/wait/wait/wait/wait/wait/get device/push green button/n/s/n/s/smell/examine shadow/Eat Peanuts/Take off gown/Put Gown on hook/put towel over drain/wait/get satchel/put satchel in front of panel/put mail on satchel/push dispenser button/get gown/wear gown/get all/push switch/z/z/z/z/z/z/z/enjoy poetry/z/z/z/z/z/type \"bleem\"/get plotter/z/z/z/examine thumb/press green button/n/s/n/s/listen/Aft/read brochure/z/z/inventory/take pincer/put all into thing/d/port/touch pad/get cup/starboard/aft/aft/yes/yes/aft/no/look/look/drop thing/get rasp/get pliers/put rasp into thing/put pliers into thing/get improbability drive/fore/fore/up/drop drive/drop cup/drop plotter/z/drop plotter/z/drop plotter/put small plug in plotter/put bit in cup/turn on drive/n/s/n/s/smell/look at shadow/say my name/e/examine memorial/get sharp stone/put towel on your head/carve my name on the memorial/remove towel/drop stone/w/sw/get interface/z/z/z/z/z/hear the dark/aft/aft/up/d/port/open panel/take circuit/insert interface in nutrimat/close panel/touch pad/starboard/up/put large plug in large receptacle/z/turn on drive/d/port/get tea/starboard/up/drop tea/z/z/z/z/z/z/z/z/z/z/remove bit/put bit in tea/turn on drive/z/touch/touch/drink liquid/examine arthur/drop wine/get fluff/open bag/put fluff in bag/get wine/z/z/z/z/z/z/hear/aft/aft/up/open handbag/get tweezers/get fluff/put all in thing/turn on drive/z/touch/touch/touch/touch/drink liquid/get flowerpot/put it in thing/examine thumb/press red button/give thumb to robot/z/show warranty/press green button/z/z/z/z/hear/aft/aft/up/turn on drive/see/see/see/see/examine light/look under seat/unlock box with key/get glass/get wrench/push autopilot button/steer towards rocky spire/z/z/z/stand up/out/wave at crowd/z/z/z/guards, drop rifles/Trillian, shoot rifles/enter/z/z/z/z/hear/aft/aft/aft/down/get fluff/get tools/up/fore/up/turn on drive/see/examine light/n/open satchel/get fluff/get towel/give towel to Arthur/idiot/walk around bulldozer/Prosser, lie in the mud/s/w/buy beer/buy peanuts/drink beer/drink beer/e/n/give fluff to Arthur/z/z/z/z/z/z/z/hear/aft/aft/up/i/turn on drive/hear/hear/hear/z/hear/hear/hear/hear/aft/get awl/z/z/z/z/n/n/n/get particule/z/z/z/z/hear/aft/aft/up/remove bit/take tea/i/take no tea/i/plant fluff in flowerpot/seat/plant fluff in flowerpot/satchel/plant fluff in flowerpot/jacket/plant fluff in flowerpot/z/z/z/port/examine plant/get fruit/d/aft/port/open door/drink tea/port/get chisel/Marvin, open the hatch/starboard/d/eat fruit/drop all but pincer/take pincer/drop thing/i/starboard/z/z/give pincer to Marvin/port/d',
"grammar" : "again/g;answer/reply;applau/cheer/clap;argue/protes;blast/fire/shoot;bleem/frippi/gashee/lyshus/misera/morpho/thou/venchi/wimbgu;brief;crawl/kneel/peek;depart/exit/withdr;diagno;disrob;dive/jump/leap;doze/nap/snooze;enter;escape/flee;footno;gaze/l/look/stare;go/procee/run/step/walk;hello/hi;help/hint/hints;hide;hitch/hitchh;hop/skip;howl/scream/shout/yell;idiot;invent/i/i'm/im;leave;no;ok/okay/sure/y/yes;panic;q/quit;relax;restar;restor;rise/stand;save;say/speak/talk;score;script;sleep;smile;stay/wait/z;super/superb;type;unscri;verbos;versio;wave;why;activa/start OBJ;addres/tell OBJ;answer/reply OBJ;answer/reply to OBJ;apprec OBJ;approa OBJ;assaul/attack/fight/hit/kill/murder/punch/slap/strike OBJ;attach/fasten/secure/tie OBJ;attach/fasten/secure/tie togeth OBJ;awake/rouse/wake OBJ;awake/rouse/wake up OBJ;bite OBJ;blast/fire/shoot OBJ;block/stop OBJ;board/embark OBJ;break/crack/damage/demoli/destro/smash/wreck OBJ;break/crack/damage/demoli/destro/smash/wreck down OBJ;brush OBJ;buy/order/purcha OBJ;call/phone OBJ;carry/catch/get/grab/hold/take OBJ;carry/catch/get/grab/hold/take dresse OBJ;carry/catch/get/grab/hold/take drunk OBJ;carry/catch/get/grab/hold/take off OBJ;carry/catch/get/grab/hold/take out OBJ;carry/catch/get/grab/hold/take undres OBJ;chase/follow/pursue OBJ;clean/tidy/wash OBJ;clean/tidy/wash up OBJ;climb/scale OBJ;climb/scale down OBJ;climb/scale over OBJ;climb/scale up OBJ;climb/scale/dive/jump/leap/go/procee/run/step/walk throug OBJ;climb/scale/rest/sit/squat/carry/catch/get/grab/hold/take in OBJ;climb/scale/rest/sit/squat/carry/catch/get/grab/hold/take on OBJ;close/shut OBJ;close/shut off OBJ;count OBJ;debark/disemb OBJ;depart/exit/withdr OBJ;descen OBJ;descri/examin/inspec/observ/scour/see/study OBJ;descri/examin/inspec/observ/scour/see/study on OBJ;descri/examin/inspec/observ/scour/see/study/gaze/l/look/stare in OBJ;descri/examin/inspec/observ/scour/see/study/gaze/l/look/stare/frisk/rummag/search for OBJ;devour/eat/gobble/ingest OBJ;dig in OBJ;dig throug OBJ;dig with OBJ;discon/unplug OBJ;dive/jump/leap across OBJ;dive/jump/leap from OBJ;dive/jump/leap off OBJ;dive/jump/leap/go/procee/run/step/walk in OBJ;dive/jump/leap/go/procee/run/step/walk out OBJ;dive/jump/leap/go/procee/run/step/walk over OBJ;doff/remove/shed OBJ;don/wear OBJ;donate/give/hand/offer/sell up OBJ;draw/open/part OBJ;draw/open/part up OBJ;drink/guzzle/imbibe/quaff/sip/swallo/swill OBJ;drink/guzzle/imbibe/quaff/sip/swallo/swill from OBJ;drop OBJ;enjoy OBJ;enter OBJ;escape/flee OBJ;escape/flee from OBJ;exting OBJ;feed OBJ;feel/pat/pet/rub/touch OBJ;fill OBJ;find/seek OBJ;fix/repair/unjam OBJ;flick/flip/switch/toggle/turn OBJ;flick/flip/switch/toggle/turn around OBJ;flick/flip/switch/toggle/turn off OBJ;flick/flip/switch/toggle/turn on OBJ;footno OBJ;free/unatta/unfast/unknot/untie OBJ;frisk/rummag/search OBJ;frisk/rummag/search in OBJ;gaze/l/look/stare OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare down OBJ;gaze/l/look/stare on OBJ;gaze/l/look/stare out OBJ;gaze/l/look/stare throug OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare up OBJ;go/procee/run/step/walk OBJ;go/procee/run/step/walk around OBJ;go/procee/run/step/walk away OBJ;go/procee/run/step/walk behind OBJ;go/procee/run/step/walk down OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk to OBJ;go/procee/run/step/walk up OBJ;hear OBJ;hello/hi OBJ;hide behind OBJ;hide under OBJ;howl/scream/shout/yell at OBJ;howl/scream/shout/yell to OBJ;hurl/throw/toss OBJ;hurl/throw/toss in OBJ;i/i'm/im OBJ;insert/lay/place/put/stuff down OBJ;insert/lay/place/put/stuff on OBJ;kick OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;leave OBJ;lick/taste OBJ;lie/reclin before OBJ;lie/reclin down OBJ;lie/reclin in OBJ;lie/reclin on OBJ;lift/raise OBJ;lift/raise up OBJ;light OBJ;listen to OBJ;lock OBJ;lower OBJ;make OBJ;move/pull OBJ;move/pull togeth OBJ;pay for OBJ;pick OBJ;pick up OBJ;point/steer at OBJ;point/steer to OBJ;pour/spill/sprink OBJ;press/push on OBJ;press/push/slide OBJ;rape OBJ;read/skim OBJ;refuse OBJ;replac OBJ;rest/sit/squat down OBJ;rise/stand before OBJ;rise/stand in OBJ;rise/stand on OBJ;rise/stand/carry/catch/get/grab/hold/take up OBJ;rotate/spin/whirl OBJ;save/help/hint/hints OBJ;say/speak/talk OBJ;say/speak/talk to OBJ;shake OBJ;sleep in OBJ;sleep on OBJ;smell/sniff/whiff OBJ;smile at OBJ;stay/wait/z for OBJ;thank/thanks OBJ;type on OBJ;unlock OBJ;wave OBJ;wave at OBJ;wave to OBJ;what/what'/whats OBJ;what/what'/whats about OBJ;where/wheres OBJ;who/whos OBJ;ask/consul/query OBJ about OBJ;ask/consul/query OBJ for OBJ;ask/consul/query OBJ on OBJ;assaul/attack/fight/hit/kill/murder/punch/slap/strike OBJ with OBJ;blast/fire/shoot OBJ at OBJ;blast/fire/shoot OBJ with OBJ;block/stop OBJ with OBJ;break/crack/damage/demoli/destro/smash/wreck OBJ with OBJ;brush OBJ with OBJ;bury/plant OBJ in OBJ;call/phone OBJ on OBJ;call/phone OBJ with OBJ;carry/catch/get/grab/hold/take OBJ in OBJ;carry/catch/get/grab/hold/take OBJ off OBJ;carry/catch/get/grab/hold/take OBJ out OBJ;carve/inscri/scratc/write OBJ in OBJ;carve/inscri/scratc/write OBJ on OBJ;carve/inscri/scratc/write OBJ with OBJ;connec/plug/attach/fasten/secure/tie OBJ to OBJ;cover OBJ with OBJ;cut/slice OBJ with OBJ;cut/slice throug OBJ with OBJ;dangle/drop/hang/insert/lay/place/put/stuff OBJ in OBJ;descri/examin/inspec/observ/scour/see/study OBJ throug OBJ;doff/remove/shed/carry/catch/get/grab/hold/take OBJ from OBJ;drape/wrap OBJ in OBJ;draw/open/part OBJ with OBJ;drop OBJ down OBJ;drop/insert/lay/place/put/stuff OBJ before OBJ;drop/insert/lay/place/put/stuff/drape/wrap OBJ on OBJ;feed/donate/give/hand/offer/sell OBJ OBJ;feed/donate/give/hand/offer/sell OBJ to OBJ;feel/pat/pet/rub/touch OBJ with OBJ;flick/flip/switch/toggle/turn OBJ to OBJ;flick/flip/switch/toggle/turn OBJ with OBJ;flick/flip/switch/toggle/turn off OBJ OBJ;flick/flip/switch/toggle/turn on OBJ OBJ;gaze/l/look/stare at OBJ throug OBJ;hang OBJ from OBJ;hang OBJ on OBJ;hurl/throw/toss OBJ at OBJ;hurl/throw/toss OBJ in OBJ;hurl/throw/toss OBJ off OBJ;hurl/throw/toss OBJ over OBJ;hurl/throw/toss OBJ throug OBJ;hurl/throw/toss OBJ to OBJ;hurl/throw/toss OBJ up OBJ;insert/lay/place/put/stuff OBJ across OBJ;insert/lay/place/put/stuff OBJ at OBJ;insert/lay/place/put/stuff OBJ behind OBJ;insert/lay/place/put/stuff OBJ down OBJ;insert/lay/place/put/stuff/attach/fasten/secure/tie/drape/wrap OBJ around OBJ;insert/lay/place/put/stuff/drape/wrap OBJ over OBJ;lock OBJ with OBJ;move/pull/press/push/flick/flip/switch/toggle/turn/hurl/throw/toss OBJ OBJ;my OBJ OBJ;pick OBJ with OBJ;plug OBJ in OBJ;plug in OBJ in OBJ;plug in OBJ to OBJ;point/steer OBJ at OBJ;point/steer OBJ to OBJ;pour/spill/sprink OBJ in OBJ;pour/spill/sprink OBJ on OBJ;pour/spill/sprink OBJ over OBJ;press/push/insert/lay/place/put/stuff/slide OBJ under OBJ;read/skim OBJ throug OBJ;read/skim OBJ with OBJ;shake OBJ with OBJ;show OBJ OBJ;show OBJ to OBJ;tell OBJ OBJ;tell OBJ about OBJ;tell OBJ to OBJ;unlock OBJ with OBJ;water OBJ with OBJ;what/what'/whats OBJ OBJ;",
"max_word_length" : 6
}
hollywood = {
"name": "hollywood",
"rom": "hollywood.z3",
"seed" : 0,
# walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "N/OPEN MAILBOX/GET PIECE OF PAPER AND BUSINESS CARD/OPEN DOOR/N/TURN ON FLASHLIGHT/N/EXAMINE MODEL/PRESS GREEN/PRESS GREEN/PRESS GREEN/PRESS BLACK/PRESS BLACK/PRESS WHITE/PRESS WHITE/PRESS GREEN/PRESS GREEN/PRESS GREEN/PRESS BLACK/PRESS BLUE/PRESS GREEN/PRESS GREEN/PRESS GREEN/PRESS RED/PRESS RED/PRESS RED/GET RING/E/E/S/GET FILM AND SLIDE/EXAMINE FILM PROJECTOR/REMOVE CAP/DROP IT/TURN ON SLIDE PROJECTOR/INSERT SLIDE IN IT/FOCUS SLIDE LENS/INSERT FILM IN FILM PROJECTOR/TURN ON FILM PROJECTOR/EXAMINE SCREEN/N/GET YELLOW CARD/W/W/S/W/EXAMINE RED STATUETTE/EXAMINE WHITE STATUETTE/EXAMINE BLUE STATUETTE/E/E/LOOK BEHIND PAINTING/GET GREEN CARD/EXAMINE SAFE/TURN DIAL RIGHT 3/TURN IT LEFT 7/TURN IT RIGHT 5/OPEN SAFE/GET GRATER/E/PLAY people/PUSH PIANO NORTH/D/S/GET PILLAR/DROP IT/N/U/PUSH PIANO SOUTH/AGAIN/D/N/GET METER/S/U/OPEN PIANO/GET VIOLET CARD/W/W/DROP RING/DROP GRATER/DROP METER/N/UNLOCK DOOR/OPEN IT/N/GET ORANGE CARD/S/S/OPEN CLOSET/IN/PULL THIRD PEG/OPEN DOOR/OUT/TURN NEWEL/W/S/LOOK UNDER MAT/GET RED CARD/N/E/E/GET SACK/OPEN WINDOW/OPEN SACK/S/GET BLUE CARD/N/W/D/DROP SACK/W/IN/REMOVE BRICK/DROP IT/GET INDIGO CARD/drop all but flashlight/U/U/U/E/D/GET PENGUIN/U/W/D/D/D/take all/OUT/N/W/D/READ BUSINESS CARD/DROP IT/EXAMINE COMPUTER/TURN IT ON/INSERT RED CARD IN SLOT/INSERT ORANGE CARD IN SLOT/INSERT YELLOW CARD IN SLOT/INSERT GREEN CARD IN SLOT/INSERT BLUE CARD IN SLOT/INSERT INDIGO CARD IN SLOT/INSERT VIOLET CARD IN SLOT/EXAMINE LIGHTS/U/GET MATCHBOX/E/GET PAPER/PUT IT ON YELLOWED PAPER/S/GET RED STATUETTE/DIAL 576-3190/E/N/W/W/D/TAKE TOUPEE/U/E/E/S/U/IN/GET SKIS/PULL SECOND PEG/OPEN DOOR/OUT/N/W/W/D/U/E/E/S/DROP TOUPEE/DROP PHOTO/DROP LETTER/GET FINCH/DROP FINCH/N/N/N/NW/GET SHOVEL/NE/N/N/W/N/W/N/W/S/W/W/N/W/S/E/S/E/N/E/S/W/N/W/S/W/N/W/S/W/N/E/N/E/N/E/E/N/E/S/E/E/S/E/N/E/N/E/S/S/S/W/W/S/E/N/W/S/DIG IN GROUND/DROP SHOVEL/GET STAMP/N/E/S/W/N/E/E/N/N/N/W/S/W/S/W/N/W/W/N/W/S/W/W/S/W/S/W/S/E/N/E/S/E/N/E/S/E/N/W/S/W/N/W/N/E/S/E/E/N/E/S/E/S/E/S/S/N/E/N/GET BALL/PUT IT IN CANNON/OPEN MATCHBOX/GET MATCH/LIGHT IT/LIGHT FUSE WITH MATCH/OPEN COMPARTMENT/GET MASK/E/E/DROP FLASHLIGHT/WEAR SKIS/D/REMOVE SKIS/DROP THEM/GET MATCH/LIGHT CANDLE WITH FIRE/PUT MATCH IN WAX/S/W/DIVE/D/D/W/U/U/N/LIGHT MATCH/LIGHT CANDLE WITH MATCH/N/U/PULL CHAIN/BURN ROPE WITH CANDLE/ENTER RIGHT END OF PLANK/WAIT/DROP ALL BUT CANDLE/GET LADDER/PUT IT IN HOLE/D/GET LADDER/HANG IT ON HOOKS/EXAMINE SAFE/READ PLAQUE/TURN DIAL LEFT 4/TURN IT RIGHT 5/TURN IT LEFT 7/OPEN SAFE/GET FILM/U/GET ALL/U/E/E/GET FLASHLIGHT/W/W/S/GET BUCKET/FILL IT WITH WATER/SE/SW/S/S/S/IN/HANG BUCKET ON THIRD PEG/OUT/U/OPEN DOOR/IN/WAIT/WAIT/OPEN DOOR/OUT/OPEN PANEL/GET HYDRANT/GET PEG/READ NOTE/D/OPEN DOOR/IN/PUT PEG IN HOLE/take gun/throw gun at herman/take stick/throw stick at herman/take club/throw club at herman/stop blade/throw club at belt/turn off saw",
"grammar" : "aftern/bye/farewe/goodby/greet/greeti/hello/hi/salute/affirm/aye/naw/nay/negati/no/nope/ok/okay/positi/sure/y/yes/yup;back/ski/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk;bathe/swim;brief;diagno;fly;gaze/l/look/peek/peer/stare;hint/hints/aid/help/pray;i/invent;loiter/wait/z;nap/rest/sleep/snooze;play;q/quit;restar;restor;rise/stand;save;score;script;super/superb;t/time;tten;unscri;verbos;versio;advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk off OBJ;affirm/aye/naw/nay/negati/no/nope/ok/okay/positi/sure/y/yes/yup OBJ;aftern/bye/farewe/goodby/greet/greeti/hello/hi/salute OBJ;aid/help/save OBJ;answer/reply/respon OBJ;answer/reply/respon to OBJ;ascend OBJ;ask/interr/query/questi/quiz OBJ;assaul/attack/fight/hit/hurt/injure/kill/murder/punch/slap/slay/stab/strike/whack/wound OBJ;awake/awaken/rouse/startl/surpri/wake OBJ;awake/awaken/rouse/startl/surpri/wake up OBJ;bathe/swim OBJ;bathe/swim in OBJ;bathe/swim to OBJ;bite OBJ;blow OBJ;blow in OBJ;blow out OBJ;blow throug OBJ;blow up OBJ;board/embark OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck in OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck throug OBJ;brush/clean/sweep OBJ;brush/clean/sweep off OBJ;buy OBJ;call/dial/phone OBJ;call/dial/phone up OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take off OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take out OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take up OBJ;chase/follow/pursue OBJ;check/descri/examin/inspec/observ/see/study/survey/watch OBJ;check/descri/examin/inspec/observ/see/study/survey/watch/gaze/l/look/peek/peer/stare in OBJ;check/descri/examin/inspec/observ/see/study/survey/watch/gaze/l/look/peek/peer/stare on OBJ;check/descri/examin/inspec/observ/see/study/survey/watch/gaze/l/look/peek/peer/stare/frisk/ransac/rummag/search for OBJ;chuck/fling/hurl/pitch/throw/toss OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale throug OBJ;climb/scale under OBJ;climb/scale/carry/catch/confis/get/grab/hold/seize/snatch/steal/take in OBJ;climb/scale/carry/catch/confis/get/grab/hold/seize/snatch/steal/take on OBJ;climb/scale/go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk down OBJ;climb/scale/go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk up OBJ;climb/scale/go/dive/jump/leap/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk out OBJ;close/shut/slam OBJ;concea/hide OBJ;concea/hide behind OBJ;concea/hide under OBJ;consum/devour/eat/gobble/nibble/swallo OBJ;count/tally OBJ;cross/traver OBJ;crush/squash/squeez OBJ;debark/disemb OBJ;defile/molest/rape OBJ;depart/exit/scram/withdr OBJ;descen OBJ;detach/free/unatta/undo/unfast/unhook/untie OBJ;dig/excava at OBJ;dig/excava in OBJ;dig/excava throug OBJ;dig/excava up OBJ;dig/excava with OBJ;distur/feel/pat/pet/rub/touch OBJ;dive/jump/leap OBJ;dive/jump/leap from OBJ;dive/jump/leap off OBJ;dive/jump/leap over OBJ;doff/remove/shed OBJ;don/wear OBJ;douse/exting/quench/snuff OBJ;drag/pull/shove/tug/yank OBJ;drag/pull/shove/tug/yank down OBJ;drag/pull/shove/tug/yank on OBJ;drag/pull/shove/tug/yank up OBJ;drink/guzzle/sip OBJ;drink/guzzle/sip from OBJ;drop/dump OBJ;elevat/hoist/lift/raise OBJ;elevat/hoist/lift/raise up OBJ;employ/exploi/use OBJ;empty OBJ;empty out OBJ;enter OBJ;face/flip/rotate/set/spin/turn/twist/whirl OBJ;face/flip/rotate/set/spin/turn/twist/whirl off OBJ;face/flip/rotate/set/spin/turn/twist/whirl on OBJ;fill OBJ;find/seek OBJ;fire/shoot OBJ;flush OBJ;fly OBJ;fly on OBJ;fly with OBJ;focus OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge down OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge on OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge out OBJ;frisk/ransac/rummag/search OBJ;frisk/ransac/rummag/search in OBJ;gaze/l/look/peek/peer/stare OBJ;gaze/l/look/peek/peer/stare around OBJ;gaze/l/look/peek/peer/stare at OBJ;gaze/l/look/peek/peer/stare behind OBJ;gaze/l/look/peek/peer/stare down OBJ;gaze/l/look/peek/peer/stare out OBJ;gaze/l/look/peek/peer/stare throug OBJ;gaze/l/look/peek/peer/stare up OBJ;gaze/l/look/peek/peer/stare/frisk/ransac/rummag/search under OBJ;go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk OBJ;go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk around OBJ;go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk behind OBJ;go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk over OBJ;go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk throug OBJ;go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk to OBJ;go/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk under OBJ;go/dive/jump/leap/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk in OBJ;grin/laugh/nod/smile/sneer/wave at OBJ;grin/laugh/nod/smile/sneer/wave to OBJ;hang OBJ;hang up OBJ;hear OBJ;howl/scream/shout/yell OBJ;howl/scream/shout/yell at OBJ;howl/scream/shout/yell to OBJ;ignite/light OBJ;jostle/rattle/shake OBJ;kick OBJ;kick down OBJ;kick in OBJ;kiss/smooch OBJ;knock/pound/rap at OBJ;knock/pound/rap down OBJ;knock/pound/rap on OBJ;leave OBJ;let go OBJ;lie/reclin down OBJ;lie/reclin in OBJ;lie/reclin on OBJ;listen OBJ;listen for OBJ;listen in OBJ;listen to OBJ;lock OBJ;loiter/wait/z for OBJ;lower OBJ;move/roll/shift OBJ;move/roll/shift up OBJ;nap/rest/sleep/snooze in OBJ;nap/rest/sleep/snooze on OBJ;nudge/press/push/ring/thrust OBJ;nudge/press/push/ring/thrust down OBJ;nudge/press/push/ring/thrust on OBJ;nudge/press/push/ring/thrust up OBJ;open/pry/unseal OBJ;open/pry/unseal up OBJ;pick OBJ;pick up OBJ;play OBJ;reach in OBJ;read/skim OBJ;releas OBJ;replac OBJ;ride OBJ;ride in OBJ;ride on OBJ;rise/stand under OBJ;rise/stand up OBJ;rise/stand/dive/jump/leap/advanc/crawl/hike/hop/procee/run/skip/step/tramp/trudge/walk on OBJ;say/speak/talk/utter OBJ;scrape off OBJ;sit/squat OBJ;sit/squat at OBJ;sit/squat down OBJ;sit/squat in OBJ;sit/squat on OBJ;ski OBJ;ski down OBJ;smell/sniff OBJ;splice OBJ;swing OBJ;swing on OBJ;taste OBJ;tell OBJ;unlock OBJ;unroll OBJ;ask/interr/query/questi/quiz OBJ about OBJ;ask/interr/query/questi/quiz OBJ for OBJ;assaul/attack/fight/hit/hurt/injure/kill/murder/punch/slap/slay/stab/strike/whack/wound OBJ with OBJ;attach/fasten/hook/secure/tie OBJ to OBJ;attach/fasten/hook/secure/tie up OBJ with OBJ;bestow/delive/donate/give/hand/offer/presen OBJ OBJ;bestow/delive/donate/give/hand/offer/presen OBJ to OBJ;blind/jab/poke OBJ with OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck OBJ off OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck OBJ with OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck down OBJ with OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck in OBJ with OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck throug OBJ with OBJ;burn OBJ with OBJ;burn down OBJ with OBJ;buy OBJ with OBJ;call/dial/phone OBJ on OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ from OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ in OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ off OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ on OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ out OBJ;check/descri/examin/inspec/observ/see/study/survey/watch OBJ throug OBJ;check/descri/examin/inspec/observ/see/study/survey/watch OBJ with OBJ;chuck/fling/hurl/pitch/throw/toss OBJ at OBJ;chuck/fling/hurl/pitch/throw/toss OBJ down OBJ;chuck/fling/hurl/pitch/throw/toss OBJ in OBJ;chuck/fling/hurl/pitch/throw/toss OBJ off OBJ;chuck/fling/hurl/pitch/throw/toss OBJ on OBJ;chuck/fling/hurl/pitch/throw/toss OBJ out OBJ;chuck/fling/hurl/pitch/throw/toss OBJ over OBJ;chuck/fling/hurl/pitch/throw/toss OBJ throug OBJ;chuck/fling/hurl/pitch/throw/toss OBJ to OBJ;clip/cut/slash OBJ with OBJ;clip/cut/slash throug OBJ with OBJ;compar OBJ to OBJ;concea/hide OBJ behind OBJ;concea/hide OBJ from OBJ;concea/hide OBJ under OBJ;cover OBJ with OBJ;crush/squash/squeez OBJ on OBJ;detach/free/unatta/undo/unfast/unhook/untie OBJ from OBJ;dig/excava OBJ in OBJ;dig/excava OBJ with OBJ;dig/excava in OBJ with OBJ;distur/feel/pat/pet/rub/touch OBJ with OBJ;doff/remove/shed OBJ from OBJ;drag/pull/shove/tug/yank OBJ out OBJ;drip/pour/spill/sprink OBJ from OBJ;drip/pour/spill/sprink OBJ in OBJ;drip/pour/spill/sprink OBJ on OBJ;drip/pour/spill/sprink out OBJ in OBJ;drop/dump OBJ down OBJ;drop/dump OBJ in OBJ;drop/dump OBJ on OBJ;empty OBJ from OBJ;empty OBJ out OBJ;face/flip/rotate/set/spin/turn/twist/whirl OBJ OBJ;face/flip/rotate/set/spin/turn/twist/whirl OBJ left OBJ;face/flip/rotate/set/spin/turn/twist/whirl OBJ right OBJ;face/flip/rotate/set/spin/turn/twist/whirl OBJ to OBJ;face/flip/rotate/set/spin/turn/twist/whirl OBJ with OBJ;feed OBJ OBJ;feed OBJ to OBJ;feed OBJ with OBJ;fill OBJ with OBJ;fire/shoot OBJ at OBJ;fire/shoot OBJ with OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge OBJ behind OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge OBJ down OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge OBJ in OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge OBJ on OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge OBJ over OBJ;force/insert/lay/load/place/put/stash/stuff/thread/wedge OBJ under OBJ;gaze/l/look/peek/peer/stare at OBJ throug OBJ;hang OBJ from OBJ;hang OBJ on OBJ;hang up OBJ from OBJ;hang up OBJ on OBJ;ignite/light OBJ with OBJ;leave OBJ in OBJ;leave OBJ on OBJ;lock OBJ with OBJ;move/roll/shift OBJ down OBJ;nudge/press/push/ring/thrust OBJ down OBJ;nudge/press/push/ring/thrust OBJ under OBJ;nudge/press/push/ring/thrust OBJ up OBJ;nudge/press/push/ring/thrust/move/roll/shift OBJ OBJ;nudge/press/push/ring/thrust/move/roll/shift OBJ to OBJ;open/pry/unseal OBJ with OBJ;pick OBJ with OBJ;play OBJ on OBJ;read/skim OBJ throug OBJ;read/skim OBJ to OBJ;scrape OBJ off OBJ;show OBJ OBJ;show OBJ to OBJ;splice OBJ with OBJ;swing OBJ at OBJ;tell OBJ about OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
huntdark = {
"name": "huntdark",
"rom": "huntdark.z5",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/huntdark.sol
"walkthrough": "cock crossbow/shoot prey/left/x shape/take rags/bandage arm with rags/down/ahead/ahead/ahead/ahead/ahead/ahead/ahead/ahead/ahead/ahead/ahead/ahead/wait/wait/wait/wait/wait/wait/x bats/up/wait/wait/wait/wait/wait/x bats/ahead/wait/wait/wait/wait/wait/x bats/left/wait/wait/wait/x bats/up/wait/wait/x bats/right/wait/wait/x bats/up/x pool/remove bandage/wait/wait/wait/wait/wait/ahead/wait/wait/wait/ahead/up",
"grammar" : "about/info;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;breathe/inhale/smell/sniff;brief/normal;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;get out/off/up;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;pray;rest/wait/z;restart;restore;rotate/screw/turn/twist/unscrew around;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;sorry;stand up;swing;think;verify;version;wave;y/yes;adjust/set OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;breathe/inhale/smell/sniff OBJ;burn OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;climb/scale OBJ;climb/scale in/into OBJ;climb/scale up/over/on/onto OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cock OBJ;cross/enter OBJ;damn/fuck/shit/sod OBJ;detach/unfasten/untie OBJ;dig OBJ;discard/drop/throw OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;fill OBJ;fix/repair OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip OBJ;hop/jump/skip in/into OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave/go/run/walk into/in/inside/through OBJ;let OBJ go;let go OBJ;let go of OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;light/switch OBJ;load/reload OBJ;move/clear/press/push/shift OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;shoot OBJ;shoot/fire at OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on/from OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;tap/feel/fondle/grope/touch OBJ;taste OBJ;uncock OBJ;unstrap/disrobe/doff/shed/remove OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;bandage OBJ with OBJ;carry/hold/take OBJ off OBJ;consult OBJ about OBJ;consult OBJ on OBJ;detach/unfasten/untie OBJ from OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;fix/attach/fasten/tie OBJ to/onto/around OBJ;insert OBJ in/into OBJ;load/reload OBJ in/into OBJ;load/reload OBJ with OBJ;lock OBJ with OBJ;move/clear/press/push/shift OBJ OBJ;move/clear/press/push/shift/transfer OBJ to OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;shoot OBJ with OBJ;shoot/fire OBJ at OBJ;strap OBJ to/on/onto/around OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
infidel = {
"name": "infidel",
"rom": "infidel.z3",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "get out of cot/s/sw/w/get pickaxe/get shovel/e/se/s/read note/get knapsack/n/n/get matchbook/n/n/smash padlock with pickaxe/remove padlock/drop padlock/open trunk/get beef/get map/eat beef/s/w/w/drink water/drop knapsack/get canteen/open canteen/fill canteen/close canteen/put canteen in knapsack/get knapsack/e/e/e/se/se/dig in sand/dig in sand/dig in sand/dig in sand/dig in sand/open map/insert cube in opening/d/drop map/open jar/get torch/dip torch in liquid/close jar/drop knapsack/put jar in knapsack/open matchbook/get match/close matchbook/strike match/light torch/s/s/ne/nw/w/n/e/n/n/n/n/n/w/s/get golden chalice/n/e/e/s/get silver chalice/n/w/s/s/s/s/s/w/s/e/n/w/get scroll/e/e/d/w/examine slot/put torch in golden chalice/remove shim/drop all/raise beam/get all/drop shovel/drop shim/e/u/w/get beam/s/se/sw/n/n/put golden chalice in knapsack/put silver chalice in knapsack/get rope/get torch/tie rope to altar/throw rope north/get knapsack/n/push statue/get head/roll statue sw/drop knapsack/ne/ne/ne/get ruby cluster/read hieroglyphs/sw/sw/get golden cluster/sw/put golden cluster in knapsack/put ruby cluster in knapsack/get all/roll statue ne/roll statue ne/drop all/get torch/sw/sw/sw/get emerald cluster/read hieroglyphs/ne/ne/ne/put emerald cluster in knapsack/get all/roll statue sw/roll statue nw/drop all/get torch/se/se/se/get opal cluster/read hieroglyphs/nw/nw/nw/put opal cluster in knapsack/get all/roll statue se/roll statue se/drop all/get torch/nw/nw/nw/get diamond cluster/read hieroglyphs/se/se/se/get all/drop head/nw/u/w/s/e/read scroll/remove first brick/drop first brick/remove third brick/drop third brick/remove fifth brick/drop fifth brick/drop scroll/e/n/d/hit plaster with pickaxe/w/w/w/insert beam in niches/stand on beam/hit plaster with pickaxe/open door/w/get beam/s/put beam across doorway/open door/w/put diamond cluster in first hole/drop knapsack/get jar/open jar/pour liquid on torch/put jar in knapsack/get opal/get ruby/get emerald/insert ruby cluster in second hole/insert emerald cluster in third hole/insert opal cluster in fourth hole/open slab/get book/get knapsack/e/get beam/n/n/put beam under lintel/break seal with pickaxe/open door/n/put book in large recess/e/drop knapsack/get canteen/get silver chalice/get golden chalice/put golden chalice on left disc/put silver chalice on right disc/open canteen/pour water in silver chalice/get scarab/w/put scarab in small recess/turn neith/turn selkis/turn isis/turn nephthys/open quartz cover",
"grammar" : "again/g;answer/reply;back;barf/chomp/lose;bathe/swim/wade;blast;brief;bug;chant/incant;chase/come/follow/pursue;curse/damn/fuck/hell/shit;dive/jump/leap;enter;exit;gaze/l/look/stare;hello/hi;help;hop/skip;i/invent;leave;lie/rest/sleep;mumble/sigh;plugh/xyzzy;pray;q/quit;repent;restar;restor;save;say/talk;score;scream/shout/yell;script;stand;stay;super/superb;time;unscri;verbos;versio;wait/z;win/winnag;zork;answer/reply OBJ;ask/tell OBJ;awake/startl/surpri/wake OBJ;awake/startl/surpri/wake up OBJ;banish/begone/cast/drive/exorci OBJ;banish/begone/cast/drive/exorci away OBJ;banish/begone/cast/drive/exorci out OBJ;bathe/swim/wade in OBJ;bite/chew/consum/eat OBJ;blow out OBJ;blow up OBJ;board OBJ;brandi/signal/wave OBJ;brandi/signal/wave at OBJ;brandi/signal/wave to OBJ;carry/get/grab/hold/remove/take OBJ;carry/get/grab/hold/remove/take off OBJ;carry/get/grab/hold/remove/take out OBJ;chase/come/follow/pursue OBJ;chuck/hurl/throw/toss OBJ;clean/polish/scrub OBJ;climb OBJ;climb/carry/get/grab/hold/remove/take in OBJ;climb/carry/get/grab/hold/remove/take on OBJ;climb/go/procee/run/step/walk down OBJ;climb/go/procee/run/step/walk up OBJ;close OBJ;compar OBJ;count OBJ;cross/ford OBJ;curse/damn/fuck/hell/shit OBJ;deflat OBJ;descri/examin/what/whats OBJ;descri/examin/what/whats/gaze/l/look/stare in OBJ;descri/examin/what/whats/gaze/l/look/stare on OBJ;dig in OBJ;dig with OBJ;disemb OBJ;dive/jump/leap OBJ;dive/jump/leap across OBJ;dive/jump/leap from OBJ;dive/jump/leap in OBJ;dive/jump/leap off OBJ;dive/jump/leap/go/procee/run/step/walk over OBJ;douse/exting OBJ;drink/imbibe/sip/swallo OBJ;drink/imbibe/sip/swallo from OBJ;drop/releas OBJ;empty/shake OBJ;enter OBJ;exit OBJ;feel/pat/pet/rub/touch OBJ;fill OBJ;fill in OBJ;fill out OBJ;find/see/seek/where OBJ;flip/set/shut/turn OBJ;flip/set/shut/turn off OBJ;flip/set/shut/turn on OBJ;flip/set/shut/turn over OBJ;fold OBJ;fold up OBJ;free/unatta/unfast/unhook/untie OBJ;gaze/l/look/stare OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare down OBJ;gaze/l/look/stare throug OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare up OBJ;gaze/l/look/stare with OBJ;gaze/l/look/stare/search for OBJ;go/procee/run/step/walk OBJ;go/procee/run/step/walk around OBJ;go/procee/run/step/walk in OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk to OBJ;go/procee/run/step/walk with OBJ;hello/hi OBJ;help OBJ;hide/insert/place/put/stuff/wedge down OBJ;hide/insert/place/put/stuff/wedge on OBJ;kick OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;knock/rap over OBJ;launch OBJ;lean on OBJ;leave OBJ;lie/rest/sleep down OBJ;lie/rest/sleep in OBJ;lie/rest/sleep on OBJ;lift/raise OBJ;light OBJ;listen for OBJ;listen to OBJ;lower OBJ;make OBJ;molest/rape OBJ;move/pull/tug/yank OBJ;open OBJ;open up OBJ;pick OBJ;pick up OBJ;play OBJ;pour/spill OBJ;pray for OBJ;press/push OBJ;press/push on OBJ;press/push/lift/raise up OBJ;pull/tug/yank on OBJ;pull/tug/yank up OBJ;pump up OBJ;reach in OBJ;read/skim OBJ;read/skim in OBJ;roll OBJ;roll up OBJ;say/talk to OBJ;search OBJ;search in OBJ;send OBJ;send for OBJ;sit on OBJ;slide OBJ;smell/sniff OBJ;smoke OBJ;spin OBJ;squeez OBJ;stand on OBJ;stand/carry/get/grab/hold/remove/take under OBJ;stand/carry/get/grab/hold/remove/take up OBJ;strike OBJ;swing/thrust OBJ;taste OBJ;unfold OBJ;wear OBJ;weigh OBJ;wind OBJ;wind up OBJ;apply OBJ to OBJ;attach/fasten/secure/tie OBJ around OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie up OBJ with OBJ;attack/fight/hurt/injure OBJ with OBJ;blind/jab/poke OBJ with OBJ;blow up OBJ with OBJ;brandi/signal/wave OBJ at OBJ;break/chip/chop/damage/destro/hit/smash OBJ with OBJ;break/chip/chop/damage/destro/hit/smash down OBJ with OBJ;break/chip/chop/damage/destro/hit/smash in OBJ with OBJ;burn/ignite/incine OBJ with OBJ;burn/ignite/incine down OBJ with OBJ;carry/get/grab/hold/remove/take OBJ from OBJ;carry/get/grab/hold/remove/take OBJ of OBJ;carry/get/grab/hold/remove/take OBJ off OBJ;carry/get/grab/hold/remove/take OBJ out OBJ;carry/get/grab/hold/remove/take OBJ with OBJ;chuck/hurl/throw/toss OBJ OBJ;chuck/hurl/throw/toss OBJ at OBJ;chuck/hurl/throw/toss OBJ down OBJ;chuck/hurl/throw/toss OBJ in OBJ;chuck/hurl/throw/toss OBJ off OBJ;chuck/hurl/throw/toss OBJ on OBJ;chuck/hurl/throw/toss OBJ over OBJ;chuck/hurl/throw/toss OBJ to OBJ;chuck/hurl/throw/toss OBJ with OBJ;clear OBJ from OBJ;compar OBJ to OBJ;compar OBJ with OBJ;cut/pierce/slice OBJ with OBJ;dig OBJ in OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;dip/dunk/immers/submer OBJ in OBJ;dispat/kill/murder/slay/stab OBJ with OBJ;donate/feed/give/hand/offer OBJ OBJ;donate/feed/give/hand/offer OBJ to OBJ;drink/imbibe/sip/swallo OBJ from OBJ;drink/imbibe/sip/swallo OBJ in OBJ;drop/releas OBJ down OBJ;drop/releas OBJ in OBJ;drop/releas OBJ on OBJ;feel/pat/pet/rub/touch OBJ with OBJ;fill OBJ with OBJ;fix/glue/patch/plug/repair OBJ with OBJ;flip/set/shut/turn OBJ for OBJ;flip/set/shut/turn OBJ to OBJ;flip/set/shut/turn OBJ with OBJ;free/unatta/unfast/unhook/untie OBJ from OBJ;gaze/l/look/stare at OBJ with OBJ;grease/lubric OBJ with OBJ;hide/insert/place/put/stuff/wedge OBJ across OBJ;hide/insert/place/put/stuff/wedge OBJ agains OBJ;hide/insert/place/put/stuff/wedge OBJ at OBJ;hide/insert/place/put/stuff/wedge OBJ behind OBJ;hide/insert/place/put/stuff/wedge OBJ betwee OBJ;hide/insert/place/put/stuff/wedge OBJ by OBJ;hide/insert/place/put/stuff/wedge OBJ in OBJ;hide/insert/place/put/stuff/wedge OBJ on OBJ;hide/insert/place/put/stuff/wedge OBJ over OBJ;hide/insert/place/put/stuff/wedge OBJ under OBJ;inflat OBJ with OBJ;is OBJ in OBJ;is OBJ on OBJ;light OBJ with OBJ;liquif/melt OBJ with OBJ;lock OBJ with OBJ;lower OBJ down OBJ;move/roll/pull/tug/yank/press/push/slide OBJ OBJ;open OBJ with OBJ;pick OBJ with OBJ;pour/spill OBJ from OBJ;pour/spill OBJ in OBJ;pour/spill OBJ on OBJ;press/push OBJ throug OBJ;press/push OBJ under OBJ;pull/tug/yank OBJ throug OBJ;pump up OBJ with OBJ;read/skim OBJ with OBJ;roll/press/push/slide OBJ to OBJ;slide OBJ under OBJ;spray OBJ on OBJ;spray OBJ with OBJ;squeez OBJ on OBJ;strike OBJ with OBJ;swing/thrust OBJ at OBJ;unlock OBJ with OBJ;wet OBJ with OBJ;",
"max_word_length" : 6
}
inhumane = {
"name": "inhumane",
"rom": "inhumane.z5",
"seed" : 0,
"walkthrough" : "get amulet/s/w/get torch/get shovel/e/e/e/e/e/n/dig/put amulet in slot/x note/s/w/w/w/w/s/open toilet/d/push button/w/s/s/take pebble/put pebble on second disk/take cube/north/north/e/e/e/e/open door/e/e/e/take necklace/n/n/u/n/n/n/n/n/n/n/n/e/n/open door/n/n/push button/w/w/z/z/z/w/z/z/z/w/n/get key/n/get figure/s/s/s/get sphere/n/z/z/z/z/z/z/e/z/z/s/w/turn crank/e/get diamond/w/turn crank/e/e/n/n/take ruby/s/s/open door/s/close door/take needle/open chest with needle/take dorkmid/open door/e/s/jump/get key/n/e/e/unlock door with iron key/open door/e/s/push coffin n/push coffin w/push coffin w/push coffin w/push coffin s",
"grammar" : "about/rating/help;awake/awaken/wake;awake/awaken/wake up;brief/normal;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;close/cover/shut up;die/q/quit;dig;dive/swim;exit/out/outside/stand;full/fullscore;go/leave/run/walk;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;pray;restart;restore;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;stand/carry/get/hold/take up;think;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake up OBJ;blow OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;close/cover/shut OBJ;cross/enter OBJ;dig with OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;go/leave/run/walk OBJ;go/leave/run/walk into OBJ;go/leave/run/walk through OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look in OBJ;l/look inside OBJ;l/look through OBJ;l/look under OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;sit/go/leave/run/walk inside OBJ;sit/go/leave/run/walk/carry/get/hold/take in OBJ;sit/stand/carry/get/hold/take on OBJ;smell/sniff OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;lock OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
jewel = {
"name": "jewel",
"rom": "jewel.z5",
"seed" : 0,
"walkthrough" : "i/drop all/w/nw/x minerals/pull quartz/u/u/get book/x gaping/x outcrop/x insect/throw book at outcrop/d/d/se/w/w/n/get moss/x moss/smell/smell dirty floor/clean dirty floor with moss/open trapdoor/d/get lye/u/s/s/x skeleton/put lye on moss/clean skeleton with moss/get bladder/get crampons/n/e/e/nw/w/w/x porous wall/wear crampons/climb porous wall/u/u/e/x ariana/x boots/search boots/get lockpick/get crossbow/x crossbow/w/d/d/d/put bladder in geyser/z/z/z/look/drop crampons/get bladder/x bladder/read label/w/search refuse/get axe/e/e/e/se/w/w/w/x block/chop ice with axe/chop ice with axe/chop ice with axe/chop ice with axe/chop ice with axe/drop axe/s/se/x dragon/look in mouth/open claws/get coat/wear coat/s/get treasure/n/nw/n/e/n/n/x fungus/x mushroom/x crossbow/shoot mushroom with crossbow/get arrow/put arrow in crossbow/shoot mushroom with crossbow/put arrow in crossbow/shoot mushroom with crossbow/put arrow in crossbow/s/s/e/e/e/get all/x body/get sack/open sack/wear sack/get salt/x salt/rub bug/light salt with bug/smell smoke/i/read book/ask allarah about white/ask allarah about black/ask allarah about red/ask allarah about jewel/look/touch mist/put bug in sack/put bladder in sack/ne/e/touch flower/ne/rub flower/s/squeeze flower/w/pull flower/n/cut flower/e/x door/unlock door with key/unlock lock with lockpick/drop key/drop lockpick/e/ask dragon about trinket/ask dragon about white/ask dragon about black/ask dragon about red/ask dragon about jewel/read book/x lye/x coat/put lye in coat/z/z/z/z/z/attack dragon with sword/se/push boulders/nw/show crossbow to dragon/dragon, give ring to me/i/wear ring/dragon, open door/ask dragon about jewel/sit on dragon/z/z/z/z/z/z/z/z/z/z/w/z/z/z/z/z/z/z/z/w/remove ring/put ring, book in sack/get bladder/x chandelier/shoot rope with crossbow/put crossbow in sack/look/get embers/put embers in coat/n/z/z/z/z/z/z/n/x murals/n/x murals/n/x murals/n/x mirror/x pedestal/look under cushion/touch mirror",
"grammar" : "about/help/info;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;bypass;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;get out/off/up;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;inhale/smell/smoke/sniff;l/look;leave/go/run/walk;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;sorry;stand up;think;verify;version;wait/z;wave;xyzzy;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kick/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;blow OBJ;blow up OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale on OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;inhale/smell/smoke/sniff OBJ;knock on OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;pour/put OBJ down;pour/put down OBJ;pour/put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;attack/break/crack/destroy/fight/hit/kick/kill/murder/punch/smash/thump/torture/wreck OBJ with OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;chop/cut/prune/slice OBJ with OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ with OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;throw OBJ at OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/pour/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert/load OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;pour/put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;shoot OBJ at OBJ;shoot OBJ with OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
karn = {
"name": "karn",
"rom": "karn.z5",
"seed" : 0,
"walkthrough" : "i/remove jacket/drop jacket/drop yo-yo/drop key/e/open door/s/get toolkit/open toolkit/get screwdriver/get homing/x homing/turn on homing/open cupboard/get pouch/open pouch/put pouch in toolkit/n/w/x scanner/pull lever/w/nw/ne/e/u/search ruins/get scalpel/put scalpel in toolkit/d/ne/nw/w/x creature/x globe/e/se/sw/w/sw/se/e/se/e/n/read sign/take off scarf/tie scarf to signpost/climb scarf/open hatch with screwdriver/get battery/put homing in hatch/close hatch/d/put screwdriver in toolkit/get battery/nw/x tree/climb tree/get branch/w/w/sw/w/sw/se/e/se/e/fill battery/n/get scarf/s/w/nw/w/enter tardis/drop scarf/e/e/get match/put match in toolkit/n/x k9/put battery in panel/close panel/s/w/w/x console/open drawer/get link/ask k9 about k9/k9, follow me/w/k9, shoot boulder/get stone/w/get firework/put firework in toolkit/s/open tapestry/w/put link in chalice/open shield/put stone in fire/close shield/get link/close link/e/close tapestry/n/e/nw/w/s/e/push boulder/put branch under boulder/pull branch/get branch/w/n/e/se/k9, follow me/enter tardis/put link in drawer/close drawer/x console/x slot/get brown/drop brown/get gold/put gold in slot/z/z/z/z/z/z/z/z/z/z/z/z/z/pull lever/push button/x scanner/pull lever/get gold/put gold in toolkit/k9, follow me/w/open hatch/get torch/get mine/put all in toolkit/n/w/ask k9 about door/x bubble/drop mine/drop branch/e/s/d/turn on torch/nw/x plate/open catch/ne/n/open hatch/turn off torch/u/e/s/open door/e/w/close door/turn wheel/x bubble/pull lever/pull lever/push button/turn wheel/open door/e/open hatch/d/turn on torch/n/w/x plate/nw/open hatch/u/open door/turn off torch/x left screen/push left button/x right screen/push right button/e/s/s/s/e/w/w/w/get screwdriver/get scalpel/unlock box with screwdriver/open box/look up/cut green with scalpel/look up/turn on torch/close plate/se/turn off torch/put all in toolkit/u/k9, follow me/n/w/ask k9 about door/k9, shoot door/w/x control/push switch/ask k9 about reactor/ask k9 about control/ask k9 about sequence/d/turn on torch/e/get branch/jam branch under plate/open catch/x mine/push button/w/turn off torch/u/push green/push blue/push yellow/push blue/ask k9 about reactor/ask k9 about sequence/push red/push yellow/push blue/push yellow/k9, follow me/e/get white/put white in toolkit/k9, follow me/turn on torch/ne/n/turn off torch/u/get torch/drop torch/get rope/d/tie rope to k9/u/put rope in unit/k9, follow me/e/s/s/w/s/push table n/push table e/push table n/push table n/push table n/push table e/x door/push table e/x door/get white/put white in slot/push table s/climb on table/x device/push button/exit/get match/get firework/light match/light firework/drop match/drop firework/n/w/w/z/z/e/e/get white/put white in unit/pull pipe/pull pipe/get gold/get pouch/put pouch under pipe/ask k9 about cybermen/put gold in unit/get pouch/w/n/get black/s/w/s/s/s/e/s/x door/put black in slot/w/x apparatus/x respirator/x vent/put dust in vent/drop pouch/get screwdriver/get scalpel/unlock box with screwdriver/open box/cut green with scalpel/open tank/get brain/e/n/w/w/s/enter tardis/put silver in slot/drop all but brain/pull lever/push button/x scanner/pull lever/w/give brain to timelord",
"grammar" : "about;awake/awaken/wake;awake/awaken/wake up;backgroun;bother/curses/darn/drat;brief/normal;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;close/cover/shut up;damn/fuck/shit/sod;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;help/instructi;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look/l/look;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;play;pray;restart;restore;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;spin;stand/carry/get/hold/take up;stay;stay here;stay there;think;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blast/shoot OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;check/describe/examine/watch/x/l/look OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale down OBJ;climb/scale over OBJ;climb/scale/climb/scale up OBJ;climb/scale/lie/sit/stand/carry/get/hold/take on OBJ;close/cover/shut OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;draw/slide OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;feel/fondle/grope/touch OBJ;fill OBJ;flip OBJ;flip/switch/rotate/screw/turn/twist/unscrew OBJ off;flip/switch/rotate/screw/turn/twist/unscrew OBJ on;flip/switch/rotate/screw/turn/twist/unscrew on OBJ;flip/switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;follow OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/hold/take into OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;knock on OBJ;l/look at OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look on OBJ;l/look through OBJ;l/look under OBJ;lie/sit/go/leave/run/walk inside OBJ;lie/sit/go/leave/run/walk/carry/get/hold/take in OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;play with OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;release OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;spin OBJ;squash/squeeze OBJ;stay OBJ;strike OBJ;swing OBJ;swing on OBJ;switch OBJ;taste OBJ;untie OBJ;wave OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;chop/cut/prune/slice OBJ with OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;dig OBJ with OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ in OBJ;empty OBJ into OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;fill OBJ with OBJ;jam/wedge/prop OBJ against OBJ;jam/wedge/prop/put OBJ under OBJ;jam/wedge/prop/put OBJ underneat OBJ;lock OBJ with OBJ;pour OBJ in OBJ;pour OBJ into OBJ;prop OBJ upth OBJ;prop up OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
library = {
"name": "library",
"rom": "library.z5",
"seed" : 0,
"walkthrough" : "X security gates/X attendant/Read tag/Ask Alan about novel/Ask him about Nelson/Ask him about librarian/W/X librarian/Read tag/Ask Marion about Nelson/Ask her about rare books/Ask her about key/E/Ask Alan about key/I/Give card to Alan/W/N/X stairs/Smell/Look under stairs/U/X painting/Give fish to grue/S/Search shelves/Unlock door with key/Open door/S/Get book/N/Close door/Lock door with key/E/X technician/Ask technician about security gates/X printouts/W/N/D/S/Ask Marion about encyclopedia/Give encyclopedia to Marion/X shelves/X magazines/Xyzzy/E/Give key to Alan/Give novel to Alan/X technician/Ask technician about security gates/E",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;close/cover/shut up;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;help;help author;help changes;help comments;help credits;help ftp;help ftp.gmd;help gmd;help inform;help intro;help introduct;help licence;help license;help quotes;help xxx;hint/hints;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;info;l/look;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;quotes;quotes off;quotes on;restart;restore;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand/carry/get/hold/take up;think;verify;version;wait/z;wave;xyzzy;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;close/cover/shut OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;disable OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/hold/take into OBJ;hear/listen OBJ;hear/listen to OBJ;help OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look through OBJ;l/look under OBJ;lie/sit/go/leave/run/walk inside OBJ;lie/sit/go/leave/run/walk/carry/get/hold/take in OBJ;lie/sit/stand/carry/get/hold/take on OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;repair OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;dig OBJ with OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;lock OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
loose = {
"name": "loose",
"rom": "loose.z5",
"seed" : 0,
"walkthrough" : "n/n/take pot/open pot/empty pot/drop pot/take hairpin/sw/w/w/knock knock/boo/give hairpin to mary/ask mary for ladder/sw/take ladder/yes/take ladder/sw/put ladder in well/here kitty kitty/take ladder/ne/ne/e/e/ne/s/s/lean ladder against fence/take ladder/n/n/sw/w/w/sw/drop ladder/sw/nw/s/s/take kitten/take key/unlock door with key/s/x figure/n/z/z",
"grammar" : "about/commands/help/hint/info;anita/annie/apple/arthur/banana/bob/boo/canoe/carl/doris/dorris/gladys/harry/irish/isabelle/ivan/joke/justin/lettuce/minnie/olive/orange/owl/police/radio/venice/wanda/wayne/wendy/yodelx;apologise/apologize/sorry;awake/awaken/wake;awake/awaken/wake up;bend/straighte/unbend spoon;bother/curses/darn/drat;brief/normal;carry/catch/grab/hold/lift/snatch/steal/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside;exit/out/outside/leave/stand;full/fullscore;full/fullscore score;get down/off/up;get out/off/up;go/run/walk;go/run/walk in/into/inside;go/run/walk on;go/run/walk out/outside;hear/listen;hop/jump/skip;hop/jump/skip/climb/scale down/off;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter/cross/enter;l/look/l/look;long/verbose;maybe/possibly/no/deal/ok/okay/y/yes;nap/sleep;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;stand up;think;verify;version;wait/z;wave;xyzzy;activate OBJ;adjust/set OBJ;answer/call/say/speak/talk OBJ;answer/call/say/speak/talk how/what/when/where/who/why OBJ;answer/call/say/speak/talk to OBJ;apologise/apologize/sorry to OBJ;attach/fasten/fix/tie OBJ;attack/beat/break/crack/destroy/fight/hit/kick/kill/murder/punch/rip/smash/tear/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;bang/knock/pound/tap OBJ;bang/knock/pound/tap OBJ down/over;bang/knock/pound/tap down/over OBJ;bang/knock/pound/tap on/at OBJ;blow OBJ;boil/cook/eat/fry/poach/scramble OBJ;boil/cook/eat/fry/poach/scramble OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/catch/grab/hold/lift/snatch/steal/take off OBJ;chase/follow/pursue/track/trail OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift/shove/slide OBJ;clear/move/press/push/shift/shove/slide OBJ in/out;clear/move/press/push/shift/shove/slide OBJ over;climb/scale OBJ;climb/scale down OBJ;climb/scale down/off from/off OBJ;climb/scale in/into/through OBJ;climb/scale out/through OBJ;climb/scale up/over OBJ;climb/scale/go/run/walk on OBJ;close/cover/shut/slam OBJ;close/cover/shut/slam up OBJ;cross/enter OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;disrobe/doff/shed/remove/untie/unwind OBJ;don/wear OBJ;drag/pull/tug/yank OBJ;drag/pull/tug/yank OBJ down/off/over;drag/pull/tug/yank on OBJ;draw OBJ;draw OBJ closed;draw OBJ open;drink/sip/swallow OBJ;dump/empty/pour OBJ;dump/empty/pour OBJ out;dump/empty/pour out OBJ;embrace/hug/kiss OBJ;exit/out/outside OBJ;feed OBJ;feel/fondle/grope/pat/pet/shake/stroke/touch OBJ;fill OBJ;flick/flip/switch OBJ;flick/flip/switch OBJ off/down;flick/flip/switch OBJ on/up;flick/flip/switch off/down OBJ;flick/flip/switch on/up OBJ;get down/off/up from/of/off OBJ;get in/into/on/onto OBJ;get off OBJ;give/offer/pay OBJ back;give/offer/pay back OBJ;go/run/walk in/into/inside OBJ;go/run/walk inside of OBJ;go/run/walk out/outside OBJ;go/run/walk out/outside of OBJ;go/run/walk through OBJ;hear/listen OBJ;hear/listen at OBJ;hear/listen to OBJ;hop/jump/skip down/off from/of/off OBJ;hop/jump/skip from/off OBJ;hop/jump/skip in/into/on/onto OBJ;hop/jump/skip over OBJ;introduce me/myself/self to OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look out OBJ;l/look over/around/underneat OBJ;l/look under OBJ;lean/put/stick/wind OBJ down;lean/put/stick/wind down OBJ;lean/put/stick/wind head in/into OBJ;lean/put/stick/wind on OBJ;leave OBJ;leave into/in/inside/through OBJ;lick/taste OBJ;lie/sit at OBJ;lie/sit down on/in OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel off OBJ;peel/pick OBJ;pick OBJ up;pick up OBJ;pry OBJ;read/check/describe/examine/watch/x OBJ;remove/untie/unwind/get/carry/catch/grab/hold/lift/snatch/steal/take OBJ;rotate/screw/turn/twist/unscrew OBJ;rotate/screw/turn/twist/unscrew OBJ off;rotate/screw/turn/twist/unscrew OBJ on;rotate/screw/turn/twist/unscrew on OBJ;rotate/screw/turn/twist/unscrew/close/cover/shut/slam off OBJ;search OBJ;shout/yell at/to OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;stir OBJ;stir OBJ with spoon;stop/deactivat OBJ;swing OBJ;swing on OBJ;throw/discard/drop/toss OBJ;use OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/call/say/speak/talk OBJ to OBJ;answer/call/say/speak/talk OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;ask/tell OBJ how/what/when/where/who/why OBJ;ask/tell OBJ to OBJ;attach/fasten/fix/tie OBJ on/onto/with OBJ;attach/fasten/fix/tie/attach/fasten/fix/tie OBJ to OBJ;attack/beat/break/crack/destroy/fight/hit/kick/kill/murder/punch/rip/smash/tear/thump/torture/wreck OBJ with OBJ;bang/knock/pound/tap OBJ on/at OBJ;boil/cook/eat/fry/poach/scramble OBJ with OBJ;burn/light OBJ with OBJ;carry/catch/grab/hold/lift/snatch/steal/take OBJ from OBJ;carry/catch/grab/hold/lift/snatch/steal/take OBJ off OBJ;clear/move/press/push/shift/shove/slide OBJ OBJ;clear/move/press/push/shift/shove/slide OBJ down/off from/of/off OBJ;clear/move/press/push/shift/shove/slide OBJ from/off/of OBJ;clear/move/press/push/shift/shove/slide OBJ in/out OBJ;clear/move/press/push/shift/shove/slide OBJ to OBJ;clear/move/press/push/shift/shove/slide/drag/pull/tug/yank OBJ through OBJ;clear/move/press/push/shift/shove/slide/lean/put/stick/wind OBJ under OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;display/present/show OBJ OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;drag/pull/tug/yank OBJ down/off from/of/off OBJ;drag/pull/tug/yank OBJ from/of/off OBJ;dump/empty/pour OBJ by/in/into/on/onto OBJ;dump/empty/pour OBJ next to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;feel/fondle/grope/pat/pet/shake/stroke/touch OBJ with OBJ;give/offer/pay OBJ OBJ back;give/offer/pay OBJ back OBJ;give/offer/pay OBJ back to OBJ;give/offer/pay back OBJ to OBJ;insert/lower OBJ in/into OBJ;insert/lower OBJ in/into/inside OBJ;l/look up OBJ in OBJ;lean/put/stick/wind OBJ against/around OBJ;lean/put/stick/wind OBJ back in/inside/into OBJ;lean/put/stick/wind OBJ back on/onto OBJ;lean/put/stick/wind OBJ in/inside/into OBJ;lean/put/stick/wind OBJ through OBJ;lock OBJ with OBJ;pick OBJ with OBJ;point out OBJ to OBJ;pry OBJ from/off/with OBJ;pry OBJ off with OBJ;remove/untie/unwind OBJ from OBJ;remove/untie/unwind/get/carry/catch/grab/hold/lift/snatch/steal/take OBJ from OBJ;rotate/screw/turn/twist/unscrew OBJ in OBJ;shout/yell OBJ at/to OBJ;stuff OBJ in/into/inside OBJ;tell OBJ OBJ;tell OBJ about OBJ;throw OBJ OBJ;throw OBJ at/to OBJ;throw/discard/drop/toss OBJ at/against/on/onto OBJ;throw/discard/drop/toss OBJ in/into/down OBJ;throw/discard/drop/toss/lean/put/stick/wind OBJ on/onto OBJ;transfer OBJ to OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;use OBJ in OBJ;use OBJ on/with OBJ;",
"max_word_length" : 9
}
lostpig = {
"name": "lostpig",
"rom": "lostpig.z8",
"seed" : 0,
"walkthrough" : "X ME/INVENTORY/X FARM/X FOREST/LOOK FOR PIG/LISTEN/NORTHEAST/X STAIRS/X METAL THING/TAKE TUBE AND TORCH/LOOK INSIDE TUBE/BLOW IN TUBE/X CRACK/EAST/X PIG/FOLLOW PIG/CATCH IT/X FOUNTAIN/X BOWL/X COIN/X CURTAIN/X MAN/NORTH/X WEST MURAL/X EAST MURAL/X STATUE/X HAT/TAKE IT/WEAR IT/SOUTH/SOUTHWEST/X BOX/PUT COIN IN SLOT/PULL LEVER/X BRICK/TAKE IT/SMELL IT/TASTE IT/EAT IT/X DENT/HIT BOX/TAKE COIN/PUT COIN IN SLOT/PULL LEVER/HIT BOX/TAKE ALL FROM BASKET/PUT COIN IN SLOT/TAKE ALL FROM BASKET/X CHAIR/TAKE IT/EAST/X SHADOW/LISTEN/SHOUT/GREET GNOME/TELL GNOME ABOUT GRUNK/ASK GNOME ABOUT STATUE/ASK WHAT GNOME LOOKING FOR/LOOK UNDER BED/TALK TO GNOME ABOUT MOGGLEV/LOOK/LOOK UNDER BED/OPEN TRUNK/X BALL/TAKE BALL/SHOW TORCH TO GNOME/ASK GNOME ABOUT FIRE/SHOW BRICK TO GNOME/ASK GNOME ABOUT MOTHER/EAST/X SHELF/X TOP SHELF/DROP CHAIR/STAND ON CHAIR/X TOP SHELF/TAKE BOOK/X IT/GET DOWN/OPEN CHEST/TAKE POLE/X IT/WEST/SHOW POLE TO GNOME/ASK GNOME ABOUT COLOR MAGNET/SHOW BOOK TO GNOME/GIVE BOOK TO GNOME/EAST/ASK GNOME ABOUT PAGE/EAST/NORTHWEST/EAST/X RIVER/X THING/TAKE THING/CROSS RIVER/TOUCH THING WITH POLE/X KEY/TAKE WATER/FILL HAT WITH WATER/WEST/SOUTHEAST/UNLOCK CHEST/OPEN IT/POUR WATER ON POWDER/LIGHT TORCH WITH FIRE/NORTHWEST/WEST/X CRACK/TAKE PAPER/TAKE PAPER WITH POLE/BURN POLE WITH TORCH/TAKE PAPER WITH POLE/EAST/SOUTHWEST/EAST/GIVE PAPER TO GNOME/WAIT/GO TO PIG/SHOW BRICK TO PIG/DROP ALL BRICKS/Z/Z/Z/Z/TAKE PIG/GO TO STATUE/X HAND/PUT TORCH IN HAND/NORTH/X WINDY TUNNEL/NORTH/SOUTH/TAKE TORCH/GO TO GNOME/ASK GNOME FOR BALL/GIVE TORCH TO GNOME/THANK GNOME/GO TO WINDY CAVE/NORTH/EAST/DROP POLE/NORTHWEST/PLAY WHISTLE/SOUTHEAST/ENTER HOLE/FOLLOW GNOME",
"grammar" : "a/t/talk piglish;about/author/clue/clues/credit/credits/help/hint/hints/info/menu/walkthrou/walkthru;arr/arrr/arrrr/growl/grr/grrr/grrrr/rar/rarr/rarrr/roar/rrr/rrrr/scream/yell/shout;awake/awaken/wake;awake/awaken/wake up;back/step/stand;back/step/stand back;back/step/stand up;bah/grumble/sigh;bathe/dive/swim;burp/fart;bye/farewell/good-bye/goodbye;consider/contempla/think;cough;crap/pee/piss/poop/wee/wizz;cry/frown/grimace/scowl/sniffle/sob;curse/dummy/idiot/stupid/damn/fuck/shit/sod;dance;declaim/deliver;declaim/deliver at wall;declaim/deliver from table top;declaim/deliver from table/tabletop;declaim/deliver monologue/monologue;declaim/deliver monologue/monologue at wall;declaim/deliver monologue/monologue from table/tabletop/top;declaim/deliver monologue/monologue to wall;declaim/deliver to wall;die/q/quit;dig;drink/sip;drool/spit;dross/bother/curses/darn/drat;dunno/shrug;exit/out/outside;exits;fly;fly away;fly up;frotz/plover/plugh/rezrov/wazzum/xyzzy/yoho/zarf/zork;full/fullscore;full/fullscore score;get down;get out/off/up;good bye;greet/hello/hi/a/t/talk/wave;grin/smile/smirk;groan/ugh/ugh!/uh/um;ha/ha!/haw/hee/laugh;ha/ha!/haw/hee/laugh ha/haw/ha!/hee;ha/ha!/haw/hee/laugh ha/haw/hee ha/haw/ha!/hee;ha/ha!/haw/hee/laugh ha/haw/hee ha/haw/hee ha/haw/ha!/hee;ha/ha!/haw/hee/laugh ha/haw/hee ha/haw/hee ha/haw/hee ha/haw/ha!/hee;have/take a bath;have/take bath;hear/listen;heat/warm up;hiccough/hiccup;hide;hooray/hurrah/hurray/woo/woohoo/wooo/woot/yay;hop/jump/skip;i dont/don't know/know!;i dunno/dunno!;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/enter;l/look;la/lalala la;la/lalala la la;la/lalala/sing;leave/go/run/walk;nah/never/nope/no;nod/ok/sure/yeah/yup/y/yes;noscript/unscript;notify;notify off;notify on;nouns/pronouns;objects;places;play;pray;recording;recording off;recording on;relax/rest/wait/z;replay;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie/long/verbose/brief/normal;smell/sniff;sneeze;snooze/nap/sleep;snort/grunt/knio/oink/squeal;sorry;stand by;stick out tongue;stick tongue out;stretch;stretch out;take inventory;thank/thanks;thank/thanks you;topic/topics;topic/topics off;topic/topics on;verify;version;whistle;win;win game;win lost pig;win story;yawn;zobleb;ztorf;a/t/talk piglish at/to OBJ;a/t/talk/answer/say/speak to OBJ;adjust/set OBJ;adjust/set/burn/light OBJ on fire;answer OBJ;apologise/apologize to OBJ;arr/arrr/arrrr/growl/grr/grrr/grrrr/rar/rarr/rarrr/roar/rrr/rrrr/scream/yell at OBJ;attach/fasten/tie OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;back/step/stand away from OBJ;bah/grumble/sigh at/to OBJ;beat/kick/poke/prod/slap/spank/attack/break/crack/destroy/fight/hit/murder/punch/smash/thump/wreck OBJ;bite/chew/lick/taste/eat OBJ;blow OBJ;blow on/at/in OBJ;bother/curses/darn/drat OBJ;buy/purchase OBJ;call OBJ;call to/for OBJ;carry/hold OBJ;chase OBJ;chase after OBJ;check/describe/examine/watch/x OBJ;chop/cut/prune/slice OBJ;climb/scale OBJ;climb/scale into/in/inside OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;complain to/at OBJ;consider/contempla OBJ;consider/contempla/think about OBJ;cook/burn/light/burn/light OBJ;count OBJ;crap/pee/piss/poop/wee/wizz on/in OBJ;cry/frown/grimace/scowl/sniffle/sob at/to OBJ;curse/damn/fuck/shit/sod OBJ;dance with OBJ;discard/drop OBJ;disrobe/doff/shed/remove OBJ;dive/swim OBJ;dive/swim in OBJ;don/wear OBJ;douse/extinguis/smother OBJ;drag/pull OBJ;draw OBJ;draw OBJ;drink/sip OBJ;drink/sip from OBJ;drool/spit at/in/on OBJ;drown/splash OBJ;dry OBJ;dry OBJ off;dry off OBJ;dummy/idiot/stupid OBJ;dust/polish/rub/scrub/shine/sweep/wipe OBJ;earn OBJ;empty OBJ out;equip/wield OBJ;feel/fondle/grope/touch in/inside OBJ;fill OBJ;flip OBJ;frotz/plover/plugh/rezrov/wazzum/xyzzy/yoho/zarf/zork OBJ;get in/into/on/onto OBJ;get off OBJ;get/catch/grab/pin/steal/swipe/trap/take OBJ;go/run/walk back to OBJ;go/run/walk to OBJ;go/run/walk/enter OBJ;goto/find/follow/seek/go/run/walk OBJ;greet/hello/hi OBJ;grin/smile/smirk at/to OBJ;groan/ugh/ugh!/uh/um OBJ;grunt/knio/oink/squeal at OBJ;ha/ha!/haw/hee/laugh OBJ;ha/ha!/haw/hee/laugh at OBJ;ha/ha!/haw/hee/laugh ha/haw/hee OBJ;ha/ha!/haw/hee/laugh ha/haw/hee ha/haw/ha!/hee OBJ;ha/ha!/haw/hee/laugh ha/haw/hee ha/haw/hee ha/haw/ha!/hee OBJ;ha/ha!/haw/hee/laugh ha/haw/hee ha/haw/hee ha/haw/hee ha/haw/hee OBJ;hear/listen OBJ;hear/listen for OBJ;hear/listen to OBJ;heat/warm OBJ;heat/warm OBJ up;heat/warm up OBJ;heat/warm up by OBJ;heat/warm up over by OBJ;heat/warm up with OBJ;hide/get/go/run/walk behind/under/in OBJ;hooray/hurrah/hurray/woo/woohoo/wooo/woot/yay OBJ;hooray/hurrah/hurray/woo/woohoo/wooo/woot/yay for OBJ;hop/jump/skip on/at OBJ;huff/puff and puff at OBJ;huff/puff at OBJ;kill OBJ;l/look at OBJ;l/look behind OBJ;l/look inside/in/into/through/on OBJ;l/look under OBJ;leave/exit/out/outside OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit at OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;lock OBJ;molest/embrace/hug/kiss OBJ;nod/ok/sure/yeah/yup at/to OBJ;nudge/straighte/clear/move/press/push/shift OBJ;open/uncover/undo/unwrap OBJ;paw OBJ;paw at OBJ;peel off OBJ;pet/scratch/feel/fondle/grope/touch OBJ;pick OBJ up;pick up OBJ;play OBJ;play with/on/in OBJ;pour/empty OBJ;pour/empty out OBJ;put OBJ down;put down OBJ;put on OBJ;put/blow OBJ out;put/blow out OBJ;reach into/in OBJ;read OBJ;remember OBJ;repair/fix OBJ;rip/tear OBJ;rock/shake OBJ;rotate/screw/turn/twist/unscrew OBJ;rotate/screw/turn/twist/unscrew/switch OBJ off;rotate/screw/turn/twist/unscrew/switch OBJ on;rotate/screw/turn/twist/unscrew/switch on OBJ;rotate/screw/turn/twist/unscrew/switch/close/cover/shut off OBJ;scare/threaten OBJ;scoop up OBJ;scoop/peel OBJ;search OBJ;search/l/look for OBJ;shout at/to OBJ;shout for OBJ;sing at/to OBJ;smoke OBJ;snort/smell/sniff OBJ;stand by OBJ;stand in OBJ;stand on OBJ;step in/into OBJ;step on/onto OBJ;step/stand near OBJ;step/stand next to OBJ;stick out tongue at OBJ;stick tongue out at OBJ;swallow OBJ;switch OBJ;take OBJ off;take off OBJ;thank/thanks OBJ;thank/thanks you OBJ;topic/topics OBJ;torture OBJ;toss/throw OBJ;unlock OBJ;vault/cross/hop/jump/skip OBJ;vault/hop/jump/skip over OBJ;wash/clean OBJ;wave at/to OBJ;wave/swing OBJ;waylay/wrestle OBJ;wrestle with OBJ;wring/squash/squeeze OBJ out;wring/squash/squeeze out OBJ;wring/squidge/squish/squash/squeeze OBJ;adjust/set OBJ to OBJ;adjust/set/burn/light OBJ on fire with/using OBJ;answer/say/speak OBJ to OBJ;attach/fasten/tie OBJ to OBJ;beat/kick/poke/prod/slap/spank OBJ at OBJ;beat/kick/poke/prod/slap/spank/attack/break/crack/destroy/fight/hit/murder/punch/smash/thump/wreck OBJ with OBJ;burn/light OBJ with/using OBJ;catch/grab/pin/steal/swipe/trap/take OBJ off OBJ;chase OBJ OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift OBJ in/into/to/toward OBJ;consult OBJ about OBJ;consult OBJ on OBJ;cook/burn/light OBJ with OBJ;dip/dunk OBJ in/inside/into OBJ;discard/drop OBJ at/against/on/onto OBJ;discard/drop OBJ in/into/down OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;display/present/show/display/present/show OBJ OBJ;display/present/show/display/present/show OBJ to OBJ;douse/extinguis/smother OBJ with/in OBJ;draw OBJ with OBJ;drown/splash OBJ on/at OBJ;drown/splash OBJ with/in OBJ;dry OBJ off on OBJ;dry OBJ off on OBJ;dry OBJ off with OBJ;dry OBJ off with OBJ;dry OBJ on OBJ;dry OBJ on OBJ;dry OBJ with OBJ;dry OBJ with OBJ;dry off OBJ with OBJ;dry off OBJ with OBJ;dust/polish/rub/scrub/shine/sweep/wipe OBJ on/over/across OBJ;empty OBJ on/onto OBJ;empty OBJ to/in/into OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feel/fondle/grope/touch OBJ to OBJ;feel/fondle/grope/touch in/inside OBJ with OBJ;fill OBJ from/with OBJ;force/jemmy/lever/prise/prize/pry OBJ apart/open with OBJ;force/jemmy/lever/prise/prize/pry apart/open OBJ with OBJ;get/catch/grab/pin/steal/swipe/trap/take/remove OBJ from OBJ;get/catch/grab/pin/steal/swipe/trap/take/remove/peel OBJ with OBJ;get/take OBJ in OBJ;give/offer/pay OBJ and OBJ;give/offer/pay over OBJ to OBJ;heat/warm OBJ up with OBJ;heat/warm OBJ up with OBJ;heat/warm OBJ with OBJ;heat/warm OBJ with OBJ;heat/warm up OBJ with OBJ;heat/warm up OBJ with OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;open/uncover/undo/unwrap OBJ with OBJ;pick OBJ up in OBJ;pick OBJ up with OBJ;pick up OBJ in OBJ;pick up OBJ with OBJ;point OBJ at OBJ;point at OBJ with OBJ;pour OBJ into/in OBJ;pour OBJ onto/on OBJ;pour out OBJ into/in OBJ;pour out OBJ onto/on OBJ;put OBJ out with/in OBJ;put out OBJ with/in OBJ;reach OBJ in/into OBJ;reach into/in OBJ with OBJ;reach/feel/fondle/grope/touch/dust/polish/rub/scrub/shine/sweep/wipe/clear/move/press/push/shift OBJ with OBJ;rip/tear/chop/cut/prune/slice OBJ with OBJ;scoop OBJ up with/into OBJ;scoop OBJ with/into OBJ;scoop up OBJ with/into OBJ;shout OBJ at/to OBJ;stick/put OBJ over/across OBJ;stick/put/discard/drop OBJ on/onto OBJ;take OBJ off of OBJ;torture/kill OBJ with OBJ;toss/throw OBJ at/against/on/onto OBJ;toss/throw/stick/put OBJ in/inside/into OBJ;transfer OBJ to OBJ;unlock/force/jemmy/lever/prise/prize/pry OBJ with OBJ;vault OBJ with OBJ;vault over OBJ with OBJ;wash/clean OBJ with/in OBJ;wring/squash/squeeze OBJ out over/into OBJ;wring/squidge/squish/squash/squeeze OBJ over/into OBJ;",
"max_word_length" : 9
}
ludicorp = {
"name": "ludicorp",
"rom": "ludicorp.z5",
"seed" : 0,
"walkthrough" : "x building/s/se/x car/open car/enter car/x car/smell car/x gun/take gun/out/nw/n/e/e/s/e/x ladder/x pots/x wire/x statue/take all/w/s/x well/x footprints/x plants/d/x water/x fuse/take fuse/u/n/n/w/x bushes/s/s/d/ne/x label/x generator/put fuse in generator/sw/u/n/n/w/n/x door/open door/s/w/w/e/e/s/w/x gate/x panel/x button/x grill/push button/w/n/x boxes/x clingfilm/n/n/n/x keypad/e/n/x cupboard/x counter/e/x cooler/s/x trees/x flowers/x paper/u/d/e/s/x key/take key/w/x fountain/x water/n/n/e/x copier/x tray/open tray/take paper/x paper/e/x shelves/u/x pen/take pen/d/s/x desks/x chairs/x computers/s/w/s/e/s/x cupboard/unlock cupboard with key/open cupboard/x knife/take knife/n/w/w/s/x desk/x cup/take cup/x coffee/n/w/w/s/cut clingfilm with knife/x patch/x knife/n/e/n/u/s/x window/x carpark/shoot window/e/x whiteboard/x marker/take marker/write on whiteboard with marker/s/x toner/take toner/n/e/e/n/n/x screen/n/x machine/w/x bar/take bar/eat bar/w/w/s/w/x cubicles/s/drop all/s/x dispenser/push button/x blank card/n/take all/n/e/n/e/e/e/s/s/w/x printer/x slot/open slot/put toner in slot/close slot/x tray/open tray/put sheet in tray/close tray/x light/x button/push button/x sheet/e/s/w/w/w/n/d/s/w/n/n/x keypad/push button 1/push button 7/push button 3/push button 9/push button enter/n/u/w/u/x door/x papers/d/e/d/s/s/s/e/n/u/u/s/w/x city/n/n/s/s/e/e/x arcade/play arcade/x pool/play pool/s/n/e/e/n/x duct/x pipe/put patch on pipe/n/x console/n/x servers/w/w/x door/open door/w/x slot/e/e/s/s/s/w/w/w/n/u/w/ne/se/x drain/pour coffee in drain/x pass/sw/nw/e/d/s/e/e/e/n/n/n/w/w/close outer door/put pass in slot/w/x robot/x plaque/e/put pass in slot/open outer door/e/e/s/s/s/w/w/w/n/d/s/e/e/e/n/n/n/x machine/punch cat_robot->desist(); on blank card/s/s/s/w/w/w/n/u/s/e/e/e/n/n/n/w/w/close outer/put pass in slot/w/put card in slot/s/x keys/take keys/n/e/put pass in slot/open outer/e/e/s/s/s/w/w/w/n/d/d/s/w/n/n/n/u/w/u/unlock door with keys",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;get out/off/up;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;sorry;stand up;think;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;clear/move/press/push/shift OBJ 1;clear/move/press/push/shift OBJ 2;clear/move/press/push/shift OBJ 3;clear/move/press/push/shift OBJ 4;clear/move/press/push/shift OBJ 5;clear/move/press/push/shift OBJ 6;clear/move/press/push/shift OBJ 7;clear/move/press/push/shift OBJ 8;clear/move/press/push/shift OBJ 9;clear/move/press/push/shift OBJ enter;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;fire OBJ;fire/shoot OBJ;fire/shoot at OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ;pick OBJ up;pick up OBJ;play OBJ;play with OBJ;pour/tip OBJ;pour/tip OBJ away;pour/tip OBJ out;pour/tip away OBJ;pour/tip out OBJ;punch/attack/break/crack/destroy/fight/hit/kill/murder/smash/thump/torture/wreck OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rip/tear OBJ;rotate/screw/turn/twist/unscrew OBJ;scribble/scribe/write on OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;start OBJ;stop OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;uncoil OBJ;use OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;chop/cut/prune/slice OBJ using OBJ;chop/cut/prune/slice OBJ with OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;fire OBJ at OBJ;fire/shoot OBJ with OBJ;fire/shoot at OBJ with OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;pour/tip OBJ in OBJ;pour/tip OBJ into OBJ;pour/tip OBJ on OBJ;pour/tip OBJ onto OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;scribble/scribe/write on OBJ with OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;use OBJ on OBJ;use OBJ with OBJ;",
"max_word_length" : 9
}
lurking = {
"name": "lurking",
"rom": "lurking.z3",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip and http://www.eristic.net/games/infocom/lurking.html
"walkthrough": "STAND/S/W/OPEN FRIDGE/GET COKE AND CARTON/OPEN CARTON/OPEN OVEN/PUT CARTON IN OVEN/CLOSE OVEN/PRESS 4/PRESS 0/PRESS 5/PRESS MED/PRESS START/Z/Z/Z/Z/OPEN OVEN/GET CARTON/E/N/GIVE CARTON TO HACKER/ASK HACKER ABOUT KEYS/ASK HACKER FOR MASTER KEY/SIT DOWN/CLICK ON EDIT/CLICK ON PAPER/READ PAGE/CLICK ON MORE/AGAIN/AGAIN/AGAIN/D/Z/GET STONE/Z/Z/STAND UP/Z/Z/Z/S/PRESS DOWN BUTTON/Z/Z/S/OPEN PANEL/GET FLASHLIGHT/OPEN DOOR/N/D/D/E/GET GLOVES AND CROWBAR/WEAR GLOVES/U/LIGHT FLASHLIGHT/GET FLASK/D/TURN OFF FLASHLIGHT/W/W/W/U/S/GET CONTAINER/E/Z/Z/Z/E/U/CLIMB ROPE/LOWER LADDER/OPEN DOOR/OUT/U/REMOVE PLUG/DROP PLUG/GET PAPER/D/IN/D/D/E/SMASH CABINET/GET AXE/W/THROW AXE AT CORD/OPEN CONTAINER/POUR WAX ON FLOOR/DROP CONTAINER AND ASSIGNMENT/E/E/N/D/SE/GET BOOTS/WEAR THEM/U/U/UNLOCK DOOR WITH KEY/OPEN DOOR/OUT/U/D/THROW STONE AT CREATURE/U/EXAMINE TREE/DIG IN TUB/GET HAND/D/IN/D/S/GET STONE/N/D/NW/U/S/W/W/W/W/N/D/E/GET IN FORKLIFT/START IT/E/E/E/LIGHT FLASHLIGHT/REMOVE JUNK WITH FORKLIFT/AGAIN/AGAIN/AGAIN/E/GET OUT OF FORKLIFT/OPEN MANHOLE WITH CROWBAR/D/N/D/GET KNIFE/U/S/U/W/W/TURN OFF FLASHLIGHT/W/W/W/U/S/E/E/E/E/S/KNOCK ON DOOR/Z/GIVE PAPER TO PROFESSOR/S/Z/Z/CUT LINE WITH KNIFE/GET OUT OF PENTAGRAM/MOVE BENCH/OPEN TRAPDOOR/D/OPEN TRAPDOOR/LIGHT FLASHLIGHT/U/PUT HAND IN LIQUID/Z/Z/GET HAND/GET HYRAX/PUT IT ON HAND/DRINK COKE/N/OPEN DOOR/N/TURN OFF FLASHLIGHT/N/W/W/W/W/N/S/E/E/E/E/W/W/W/W/N/D/SHOW HAND TO URCHIN/GET CUTTER/D/DROP CUTTER, FLASK AND AXE/NW/UNLOCK PADLOCK WITH KEY/GET PADLOCK/OPEN HATCH/LIGHT FLASHLIGHT/D/E/TURN VALVE WITH CROWBAR/Z/Z/Z/TURN VALVE/CLOSE VALVE/E/E/REMOVE BRICK WITH CROWBAR/REMOVE NEW BRICK WITH CROWBAR/W/W/W/U/TURN OFF FLASHLIGHT/DROP KNIFE AND CROWBAR/SE/GET ALL/U/E/E/U/PRESS DOWN/D/Z/Z/WEDGE DOORS WITH AXE/D/GET CHAIN/TIE CHAIN TO ROD/LOCK IT WITH PADLOCK/U/PUT CHAIN ON HOOK/GET AXE/U/U/PRESS UP BUTTON/D/D/Z/Z/Z/WEDGE DOORS WITH AXE/D/GET AXE/LIGHT FLASHLIGHT/N/W/W/W/W/W/D/D/CUT GROWTH WITH CUTTER/D/N/D/S/S/D/OPEN FLASK/LOOK IN IT/POUR LIQUID ON SLIME/UNLOCK DOOR WITH KEY/OPEN DOOR/S/REACH INTO POOL/PULL LINE/CUT LINE WITH AXE/AGAIN/AGAIN/GET LINE/OPEN METAL COVER/UNPLUG COAXIAL CABLE/PUT LINE IN SOCKET/Z/THROW STONE AT THING/GET STONE",
"grammar" : "answer/reply/respon;brief;call/say/talk;chase/follow;clear/move/shift/drive/go/procee/run/steer/step/walk;concea/hide;damn/fuck/shit;depart/exit/withdr;diagno;die;disemb;dive/jump/leap;enter;gaze/l/look/stare;greeti/hello/hi;hack;help/hint;hop/skip;i/invent;leave;listen;nap/sleep;no/nope;okay/y/yes;pray;q/quit;restar;restor;rise/stand;save;score;scream/shout/yell;script;smell/sniff;super/superb;swim/wade;t/time;thank/thanks;unscri;verbos;versio;wait/z;where;who;yawn;admire/compli OBJ;answer/reply/respon OBJ;approa OBJ;ask/quiz about OBJ;ask/quiz for OBJ;attack/fight/hit/strike OBJ;awake/wake OBJ;awake/wake up OBJ;bargai with OBJ;beckon/wave OBJ;beckon/wave to OBJ;beckon/wave/scream/shout/yell at OBJ;bite OBJ;blow out OBJ;board/ride OBJ;break/crack/destro/scrape/scratc/smash/wreck OBJ;bury OBJ;call/say/talk OBJ;call/say/talk to OBJ;carry/catch/get/grab/hold/snatch/take OBJ;carry/catch/get/grab/hold/snatch/take off OBJ;carry/catch/get/grab/hold/snatch/take out OBJ;carry/catch/get/grab/hold/snatch/take/drag/pull/tug down OBJ;chase/follow OBJ;check/descri/examin/watch/x OBJ;check/descri/examin/watch/x/gaze/l/look/stare in OBJ;check/descri/examin/watch/x/gaze/l/look/stare on OBJ;check/descri/examin/watch/x/gaze/l/look/stare/rummag/search for OBJ;choose/click/select OBJ;choose/click/select on OBJ;clear/move/shift/drag/pull/tug OBJ;climb/scale OBJ;climb/scale down OBJ;climb/scale off OBJ;climb/scale out OBJ;climb/scale over OBJ;climb/scale up OBJ;climb/scale/carry/catch/get/grab/hold/snatch/take on OBJ;climb/scale/rest/sit/carry/catch/get/grab/hold/snatch/take in OBJ;close/shut OBJ;compar OBJ;concea/hide OBJ;concea/hide behind OBJ;concea/hide from OBJ;concea/hide under OBJ;concea/hide/rise/stand in OBJ;consum/eat/gobble OBJ;cook/heat/warm OBJ;count OBJ;cross OBJ;curdle/scare/startl/surpri OBJ;depart/exit/withdr OBJ;descen OBJ;detach/discon/free/unatta/unfast/unhook/untie/unwrap OBJ;dig in OBJ;dig throug OBJ;dig with OBJ;disemb OBJ;dive/jump/leap across OBJ;dive/jump/leap down OBJ;dive/jump/leap from OBJ;dive/jump/leap in OBJ;dive/jump/leap off OBJ;dive/jump/leap on OBJ;dive/jump/leap to OBJ;dive/jump/leap/drive/go/procee/run/steer/step/walk over OBJ;drag/pull/tug on OBJ;drink/sip/swallo OBJ;drink/sip/swallo from OBJ;drive/go/procee/run/steer/step/walk OBJ;drive/go/procee/run/steer/step/walk around OBJ;drive/go/procee/run/steer/step/walk away OBJ;drive/go/procee/run/steer/step/walk down OBJ;drive/go/procee/run/steer/step/walk in OBJ;drive/go/procee/run/steer/step/walk on OBJ;drive/go/procee/run/steer/step/walk throug OBJ;drive/go/procee/run/steer/step/walk to OBJ;drive/go/procee/run/steer/step/walk under OBJ;drive/go/procee/run/steer/step/walk up OBJ;drop/dump OBJ;drop/dump from OBJ;edit OBJ;empty/pour/spill OBJ;enter OBJ;erase OBJ;erect/lift/raise OBJ;erect/lift/raise/insert/lay/place/put up OBJ;exting OBJ;feel/pat/pet/rub/squeez/touch OBJ;fill OBJ;find OBJ;fix/patch/repair OBJ;flip/power/set/turn OBJ;flip/power/set/turn around OBJ;flip/power/set/turn off OBJ;flip/power/set/turn on OBJ;flip/power/set/turn over OBJ;gaze/l/look/stare OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare down OBJ;gaze/l/look/stare throug OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare up OBJ;gestur/point at OBJ;gestur/point to OBJ;greeti/hello/hi OBJ;hack OBJ;hang from OBJ;help/hint OBJ;hurl/pitch/throw/toss OBJ;hurl/pitch/throw/toss away OBJ;input/type OBJ;input/type login OBJ;input/type passwo OBJ;insert/lay/place/put down OBJ;insert/lay/place/put on OBJ;kick OBJ;kill/murder/slay/stab OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;lean on OBJ;leave OBJ;let go OBJ;lie down OBJ;lie on OBJ;light OBJ;light up OBJ;listen for OBJ;listen to OBJ;login OBJ;login as OBJ;login on OBJ;lower OBJ;nap/sleep in OBJ;nap/sleep on OBJ;open OBJ;open up OBJ;passwo OBJ;pick OBJ;pick up OBJ;plug/screw in OBJ;press/push/shove OBJ;press/push/shove down OBJ;press/push/shove on OBJ;press/push/shove up OBJ;rattle/shake OBJ;reach in OBJ;reach under OBJ;read OBJ;releas OBJ;remove/shed OBJ;rest/sit at OBJ;rest/sit down OBJ;rest/sit on OBJ;rise/stand on OBJ;rise/stand/carry/catch/get/grab/hold/snatch/take up OBJ;roll up OBJ;rummag/search OBJ;rummag/search in OBJ;sample/taste OBJ;shoot OBJ;slide OBJ;slide on OBJ;smell/sniff OBJ;start OBJ;start/flip/power/set/turn up OBJ;stop OBJ;swim/wade in OBJ;swing/thrust OBJ;tell OBJ;tell about OBJ;thank/thanks OBJ;tortur OBJ;trade OBJ;unplug/unscre OBJ;wait/z OBJ;wait/z for OBJ;wear OBJ;what OBJ;what/read about OBJ;where OBJ;who OBJ;apply OBJ to OBJ;ask/quiz OBJ about OBJ;ask/quiz OBJ for OBJ;attach/connec/fasten/hook/loop/tie/wrap OBJ around OBJ;attach/connec/fasten/hook/loop/tie/wrap OBJ on OBJ;attach/connec/fasten/hook/loop/tie/wrap OBJ to OBJ;attach/connec/fasten/hook/loop/tie/wrap up OBJ with OBJ;attack/fight/hit/strike OBJ with OBJ;beckon/wave OBJ at OBJ;break/crack/destro/scrape/scratc/smash/wreck OBJ with OBJ;break/crack/destro/scrape/scratc/smash/wreck down OBJ with OBJ;burn/ignite OBJ with OBJ;burn/ignite down OBJ with OBJ;bury OBJ in OBJ;buy/purcha OBJ from OBJ;buy/purcha OBJ with OBJ;carry/catch/get/grab/hold/snatch/take OBJ from OBJ;carry/catch/get/grab/hold/snatch/take OBJ in OBJ;carry/catch/get/grab/hold/snatch/take OBJ off OBJ;carry/catch/get/grab/hold/snatch/take OBJ out OBJ;carry/catch/get/grab/hold/snatch/take OBJ with OBJ;choose/click/select OBJ with OBJ;choose/click/select on OBJ with OBJ;chop/cut/prune/slash/slice/split OBJ with OBJ;chop/cut/prune/slash/slice/split throug OBJ with OBJ;clear/move/shift OBJ with OBJ;close/shut OBJ with OBJ;compar OBJ to OBJ;compar OBJ with OBJ;concea/hide OBJ from OBJ;count OBJ in OBJ;cover OBJ with OBJ;curdle/scare/startl/surpri OBJ with OBJ;detach/discon/free/unatta/unfast/unhook/untie/unwrap OBJ from OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;drive/go/procee/run/steer/step/walk OBJ OBJ;drive/go/procee/run/steer/step/walk OBJ on OBJ;drive/go/procee/run/steer/step/walk OBJ over OBJ;drop/dump OBJ on OBJ;drop/dump/insert/lay/place/put OBJ down OBJ;drop/dump/insert/lay/place/put OBJ in OBJ;empty/pour/spill OBJ from OBJ;empty/pour/spill OBJ in OBJ;empty/pour/spill OBJ on OBJ;empty/pour/spill OBJ over OBJ;empty/pour/spill OBJ throug OBJ;erect/lift/raise up OBJ with OBJ;feed/give/offer OBJ OBJ;feed/give/offer OBJ to OBJ;feed/give/offer OBJ with OBJ;feel/pat/pet/rub/squeez/touch OBJ to OBJ;feel/pat/pet/rub/squeez/touch OBJ with OBJ;fill OBJ at OBJ;fill OBJ with OBJ;fix/patch/repair OBJ with OBJ;flip/power/set/turn OBJ to OBJ;flip/power/set/turn OBJ with OBJ;gaze/l/look/stare at OBJ throug OBJ;gaze/l/look/stare up OBJ in OBJ;hang OBJ from OBJ;hang OBJ on OBJ;hone/sharpe OBJ on OBJ;hone/sharpe OBJ with OBJ;hurl/pitch/throw/toss OBJ at OBJ;hurl/pitch/throw/toss OBJ away OBJ;hurl/pitch/throw/toss OBJ down OBJ;hurl/pitch/throw/toss OBJ in OBJ;hurl/pitch/throw/toss OBJ off OBJ;hurl/pitch/throw/toss OBJ on OBJ;hurl/pitch/throw/toss OBJ over OBJ;hurl/pitch/throw/toss OBJ throug OBJ;hurl/pitch/throw/toss OBJ to OBJ;insert/lay/place/put OBJ across OBJ;insert/lay/place/put OBJ around OBJ;insert/lay/place/put OBJ behind OBJ;insert/lay/place/put OBJ betwee OBJ;insert/lay/place/put OBJ on OBJ;insert/lay/place/put OBJ over OBJ;insert/lay/place/put OBJ under OBJ;kill/murder/slay/stab OBJ with OBJ;lean OBJ on OBJ;lean/rise/stand OBJ agains OBJ;lever/pry out OBJ with OBJ;lock OBJ to OBJ;lock OBJ with OBJ;lower OBJ down OBJ;lower OBJ in OBJ;melt/thaw OBJ with OBJ;open OBJ with OBJ;pick OBJ with OBJ;plug/screw OBJ in OBJ;plug/screw OBJ with OBJ;press/push/shove OBJ OBJ;press/push/shove/clear/move/shift OBJ on OBJ;press/push/shove/clear/move/shift OBJ to OBJ;prop/wedge OBJ betwee OBJ;prop/wedge OBJ in OBJ;prop/wedge OBJ with OBJ;rattle/shake OBJ at OBJ;reach in OBJ with OBJ;read OBJ to OBJ;remove/shed OBJ from OBJ;remove/shed/unplug/unscre/lever/pry/erect/lift/raise OBJ with OBJ;sell OBJ OBJ;sell OBJ to OBJ;show OBJ OBJ;show OBJ to OBJ;slide/press/push/shove OBJ under OBJ;swing/thrust OBJ at OBJ;tell OBJ OBJ;tell OBJ about OBJ;trade OBJ with OBJ;trade/feed/give/offer OBJ for OBJ;unlock OBJ with OBJ;unplug/unscre OBJ from OBJ;",
"max_word_length" : 6
}
moonlit = {
"name": "moonlit",
"rom": "moonlit.z5",
"seed" : 0,
"walkthrough" : "take mask/x mask/x self/d/x light/x leaf/x kite/x comb/x shadow/take dagger/n/take comb/x comb/hit comb/x compass/x feather/x hawk/x swallow/x crane/set compass to hawk/look/x trees/x leaf/take leaf/unfold leaf/i/x crane/set compass to crane/get fan/x fan/cut blue tassel/i/attach paper to teeth/i/attach string to kite/set compass to swallow/fly kite/s/u/look up/x horse/x flower/d/n/w/x bones/get bones with chopsticks/d/x arrangement/set constellations to horse/x arrangement/d/x hawks/s/wear mask/l/u/x storm/u",
"grammar" : "about/help;awake/awaken/wake;awake/awaken/wake up;bathe;bother/curses/darn/drat;breathe/smell/sniff;brief;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;fly;full/fullscore/score;get out/off/up;hear/listen;hint;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;l/look up;leave/go/run/walk;nap/sleep;no;normal/long/verbose;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;sorry;stand up;think;verify;version;wait/z;wave;xyzzy;y/yes;arrange/clear/move/press/push/shift OBJ;attach/fasten/fix/tie OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;blow into OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take apart OBJ;carry/hold/take off OBJ;change/spin/rotate/screw/turn/twist/unscrew/adjust/set OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;comb OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;fly OBJ;fold OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;play OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;recite OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;tear/bash/strike/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;unfold OBJ;unknot/untie OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;arrange OBJ as OBJ;arrange OBJ to OBJ;arrange/get/carry/hold/take OBJ with OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;change/spin/rotate/screw/turn/twist/unscrew/rotate/screw/turn/twist/unscrew/adjust/set OBJ to OBJ;chop/cut/prune/slice OBJ with OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
murdac = {
"name": "murdac",
"rom": "murdac.z5",
"seed" : 0,
"walkthrough" : "s/n/s/n/n/n/take shawm/s/blow shawm/s/s/s/take plank/take rod/e/take key/n/e/w/n/dig/take lamp/s/read grave/e/e/howl arac/take sword/w/unlock door/open door/n/close door/lock door/drop key/d/w/w/throw plank/w/w/take plank/throw rod/throw plank/n/s/w/w/take bowl/s/take toad/no/take stone/n/e/e/e/e/e/take pillow/e/e/e/e/w/e/w/e/n/s/n/e/drop pillow/take peridots/n/e/n/blow shawm/n/take perfume/s/s/e/s/drop perfume/drop peridots/drop bowl/drop shawm/drop sword/s/se/take beads/e/n/take erbium/eat toadstone/s/w/d/drop lamp/look/s/gnoeval/take all/n/take lamp/u/nw/n/e/ne/wave beads/take thorn/sw/w/drop all/take lamp/ne/ne/d/s/take bread/oozelumny/n/u/ne/sw/e/s/nw/w/d/se/take myrrh/d/nw/nw/e/e/n/ne/ne/n/d/sw/sw/drop myrrh/ne/feed pigeon/take pigeon/sw/n/w/take dummy/s/drop pigeon/look/agganrw/take statuette/n/w/w/n/take dodo/s/e/e/e/n/d/wait/s/s/drop dodo/drop statuette/take bowl/s/sw/fill bowl/ne/n/w/w/w/w/w/w/w/s/look/n/e/e/e/e/e/e/e/drop bowl/take beads/take tiepin/take sword/e/d/d/prick dummy/drop dummy/u/u/w/drop pin/ne/get wig/u/u/take staff/d/d/sw/e/d/s/u/w/ne/u/u/d/d/sw/w/w/w/w/w/w/w/s/wave scroll/w/w/w/wave beads/w/ne/e/se/sw/dig/take hoard/w/e/ne/exodus/e/e/n/e/e/e/e/e/e/e/drop bible/drop sword/drop hoard/e/drop wig/d/s/u/w/ne/u/u/take amethysts/d/d/sw/take all/n/n/n/drop all/take lamp/s/s/s/take all/n/n/n/drop all/get lamp/s/s/s/take all/n/n/n/sword/throw sword into lake",
"grammar": "allhints;allpuzzle/puzzles;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;close/cover/shut up;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;help;hint/hints;hints off;hints on;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;off;on;places;pray;restart;restore;review;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand/carry/get/hold/take up;think;verify;version;wait/z;wave;xyzzy;y/yes;dig;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;chase/follow/pursue/trail OBJ;chase/follow/pursue/trail after OBJ;check/describe/examine/watch/x OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;click OBJ;click on OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;close/cover/shut OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;drop OBJ;throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;drink/sip/swallow at OBJ;drink/sip/swallow from OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/hold/take into OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;howl OBJ;l/look behind OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look through OBJ;l/look under OBJ;lie/sit/go/leave/run/walk inside OBJ;lie/sit/go/leave/run/walk/carry/get/hold/take in OBJ;lie/sit/stand/carry/get/hold/take on OBJ;open/uncover/undo/unwrap OBJ;unlock OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;unfold OBJ;wave OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;click OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;lock OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
night = {
"name": "night",
"rom": "night.z5",
"seed" : 0,
"walkthrough" : "ne/s/e/open box/pull hose/take light/w/n/w/d/d/e/s/e/open door/s/listen/n/w/n/w/u/e/s/push fountain/open panel/turn on light/go panel/go panel/e/z/z/follow mouse/follow mouse/follow mouse/follow mouse/follow mouse/get printer/ask gnome about prinout/w/x sign/w/w/w/e/e/w/u/n/w/u/e/sw/click ftp/click if-archive/click designers_manual/click lpr/take manual/ne/w/d/e/s/d/z/z/z/z/z/follow mouse/follow mouse/follow mouse/follow mouse/follow mouse/follow mouse/give manual/take printer/w/w/w/e/e/w/u/n/w/u/e/sw/put printer on table",
"grammar" : "allhints;allpuzzle/puzzles;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;close/cover/shut up;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;help;hint/hints;hints off;hints on;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;off;on;places;pray;restart;restore;review;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand/carry/get/hold/take up;think;verify;version;wait/z;wave;xyzzy;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;chase/follow/pursue/trail OBJ;chase/follow/pursue/trail after OBJ;check/describe/examine/watch/x OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;click OBJ;click on OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;close/cover/shut OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;drink/sip/swallow at OBJ;drink/sip/swallow from OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/hold/take into OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look behind OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look through OBJ;l/look under OBJ;lie/sit/go/leave/run/walk inside OBJ;lie/sit/go/leave/run/walk/carry/get/hold/take in OBJ;lie/sit/stand/carry/get/hold/take on OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;unfold OBJ;wave OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;click OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;lock OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
omniquest = {
"name": "omniquest",
"rom": "omniquest.z5",
"seed" : 0,
"walkthrough" : "x tree/e/s/sw/take match/ne/se/take raincoat/open raincoat/put match in raincoat/close raincoat/nw/n/e/e/n/take shovel/s/w/nw/wear snorkel/n/n/n/dig earth/s/push rock/pull torch/turn on lamp/se/ne/se/take sword/n/take parka/e/dig ground/w/s/se/se/open raincoat/give match to man/read scroll/sw/se/put on parka/se/fish/fish/x boot/nw/sw/take rice/make sushi/n/n/nw/w/give sushi to samurai/w/x cage/put crystal in space/take wings/e/e/ne/nw/n/ne/n/play organ/s/sw/w/w/wear wings/fly",
"grammar" : "actions;actions off;actions on;awake/awaken/wake;awake/awaken/wake up;bait/fish;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;changes;changes off;changes on;daemons/timers;daemons/timers off;daemons/timers on;damn/fuck/shit/sod;diag/diagnose;die/q/quit;dive/swim;exit/out/outside/stand;fly;footnote;full/fullscore;full/fullscore score;geomancer;geomancer is cool/great/awesome;geomancer is great/awesome in bed;get out/off/up;hear/listen;hint/help;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;make sushi;messages/routines;messages/routines off;messages/routines on;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;play/hop/jump/skip;popcpr/pop/cprpop/cpr;pray;random;recording;recording off;recording on;replay;restart;restore;save;scope;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;showobj;sing;smell/sniff;sorry;stand up;think;trace;trace off;trace on;tree;use rod;use shovel;verify;version;wait/z;wave;what is a grue;who am i;who/what are you;y/yes;yell/scream;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;bait OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;ex/read/check/describe/examine/watch/x OBJ;feel/fondle/grope/touch OBJ;fill OBJ;footnote OBJ;geomancer OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;lift/open/uncover/undo/unwrap OBJ;light OBJ;make OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;play OBJ;play with OBJ;pull on OBJ;pull/drag OBJ;purloin OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;rotate/screw/turn/twist/unscrew OBJ;scope OBJ;search OBJ;showobj OBJ;showverb OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;talk/speak to OBJ;taste OBJ;trace OBJ;tree OBJ;use OBJ;wave OBJ;what OBJ;you OBJ;abstract OBJ to OBJ;adjust/set OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;light/burn OBJ with OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;speak/answer/say/shout OBJ to OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
partyfoul = {
"name": "partyfoul",
"rom": "partyfoul.z8",
"seed" : 0,
"walkthrough" : "l/give drink to frank/look at frank/look at ron/w/z/take purse/e/n/take jar/put jar in purse/w/l/take knife/put knife in purse/e/l/take peanut butter/s/w/take knife/spread peanut butter on frank/e/n/open door/l/open closet/take hair dryer/plug hair dryer into wall/turn on hair dryer/turn on space heater/drop hair dryer/e/nw/plug toaster into wall/turn on toaster/e/z/z/l/take napkin/s/order drink/take celery/wipe celery with napkin/put celery in purse/n/take peanut butter/w/take knife/put peanut butter on celery/put raisins on celery/give celery to barb/z/z/z",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;blow my nose;blow nose;blow your nose;bother/curses/darn/drat;carry/hold/take inventory;chuckle/laugh;cringe/swear/pout/sigh;damn/fuck/shit;exit/leave/out/stand;get out/off/up;go/run/walk;hocus pocus;hop/jump/skip;i/inv/inventory;info/about/hints/hint/help;l/look;listen;long/verbose;magic word;magic words;magic/pocus/hocus/abracadab/plough/frotz/plover/sneeze/xyzzy;nap/sleep;no;normal/brief;notify;notify off;notify on;pray;pronouns/nouns;q/quit;remember;restart;restore;save;score;short/superbrie;sing;smell/sniff;sorry;stand up;stomp/yell/scream/answer/say/shout/speak;think;think harder;transcrip/script;transcrip/script off;transcrip/script on;tutorial mode;tutorial mode off;tutorial mode on;tutorial off;tutorial on;verify;version;wait/z;wave;y/yes;yawn;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;converse with OBJ;create/make OBJ;create/make a OBJ;create/make the OBJ;cross/enter/go/run/walk OBJ;discard/drop/throw OBJ;disconnec/unplug OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;dry OBJ;eat OBJ;embrace/hug/kiss OBJ;feel/touch OBJ;flirt with OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;go/run/walk into/in/inside/through OBJ;hear OBJ;kick/throttle/smother/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;knock over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;listen to OBJ;mess up OBJ;mess/tilt/disturb OBJ;open/uncover/unwrap OBJ;order/buy/purchase OBJ;order/buy/purchase a OBJ;pick OBJ up;pick up OBJ;play OBJ;pour/spill OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;shoving/clear/move/press/push/shift OBJ;sit on OBJ;sit on top of OBJ;sit on/in/inside OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;talk OBJ;talk to OBJ;taste OBJ;un plug OBJ;wave OBJ;wink at OBJ;adjust/set OBJ to OBJ;aim/point OBJ at OBJ;aim/point OBJ towards OBJ;answer/say/shout/speak OBJ to OBJ;apply OBJ to OBJ;apply/paint/spread/smear OBJ onto OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;carry/hold/take OBJ off OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ with OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe down OBJ with OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe up OBJ with OBJ;connect OBJ to OBJ;connect/plug OBJ into OBJ;consult OBJ on/about OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;draw on OBJ with OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;paint/spread/smear OBJ on OBJ;plug in OBJ OBJ;plug in OBJ into OBJ;plug in OBJ to OBJ;plug/plug OBJ in OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
pentari = {
"name": "pentari",
"rom": "pentari.z5",
"seed" : 0,
"walkthrough" : "north/north/in/city/east/covert/south/smash seal/north/north/north/south/east/get in floor/up/north/open towel/north/south/south/west/west/put sword down/take dagger/north/take chest/south/east/put all down/south/south/north/north/take chest/put all down/north/enter/take scroll/get up/south/fwoosh/take emerald/put dagger in chest/west/take sword/hit elf/take emerald/put all on box/l",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;city;covert;damn/fuck/shit/sod;defiant;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;fwoosh;get out/off/up;hear/listen;help;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;info;l/look;leave/go/run/walk;long/verbose;luminus;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;sorry;stand up;think;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chase/follow/pursue/trail OBJ;chase/follow/pursue/trail after OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;fold OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;shatter/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;unfold OBJ;wave OBJ;adjust/set OBJ to OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
planetfall = {
"name": "planetfall",
"rom": "planetfall.z3",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/W/GET IN WEB/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/GET OUT OF WEB/GET KIT/OPEN DOOR/OUT/U/U/U/U/N/NE/E/E/E/S/S/S/SW/GET BAR/NE/N/N/N/N/HOLD BAR OVER CREVICE/DROP BAR/S/W/W/UNLOCK PADLOCK WITH KEY/REMOVE PADLOCK/DROP IT AND KIT/OPEN DOOR/N/DROP KEY AND BRUSH/GET LADDER/S/E/E/N/N/DROP LADDER/EXTEND LADDER/PUT LADDER ACROSS RIFT/N/W/OPEN DESK/GET KITCHEN CARD AND UPPER CARD/W/OPEN DESK/GET SHUTTLE CARD/E/E/S/S/S/W/W/GET KIT/OPEN IT/EAT RED GOO/DROP KIT/S/GET CANTEEN/SLIDE KITCHEN CARD THROUGH SLOT/S/EXAMINE MACHINE/OPEN CANTEEN/PUT IT IN NICHE/PRESS BUTTON/GET CANTEEN/CLOSE IT/N/DROP KITCHEN CARD/N/E/E/E/PRESS BLUE BUTTON/WAIT/WAIT/N/SLIDE UPPER CARD THROUGH SLOT/PRESS UP BUTTON/WAIT/WAIT/S/NE/SW/N/PRESS DOWN BUTTON/WAIT/WAIT/S/W/S/S/S/SW/GET FLASK/E/PUT FLASK UNDER SPOUT/PRESS GRAY BUTTON/GET FLASK/E/OPEN FLOYD/TURN ON FLOYD/NW/N/N/N/E/N/SLIDE UPPER CARD THROUGH SLOT/PRESS UP BUTTON/WAIT/WAIT/S/NE/POUR FLUID IN HOLE/SW/N/PRESS DOWN BUTTON/WAIT/WAIT/S/W/W/S/LIE DOWN/WAIT/STAND UP/GET ALL/N/W/GET KIT/EAT GREEN GOO/E/E/DROP KIT/S/S/S/S/PUT FLASK UNDER SPOUT/PRESS BLUE BUTTON/GET FLASK/N/N/N/N/E/N/SLIDE UPPER CARD THROUGH SLOT/PRESS UP BUTTON/WAIT/WAIT/S/NE/POUR FLUID IN HOLE/SW/N/PRESS DOWN BUTTON/WAIT/WAIT/S/W/S/S/S/S/PUT FLASK UNDER SPOUT/PRESS brown button/GET FLASK/N/N/N/N/E/N/SLIDE UPPER CARD THROUGH SLOT/PRESS UP BUTTON/WAIT/WAIT/S/NE/POUR FLUID IN HOLE/SW/N/PRESS DOWN BUTTON/WAIT/WAIT/S/W/DROP FLASK/GET KIT/EAT BROWN GOO/DROP KIT/S/E/DROP UPPER CARD/GET BOX/W/S/S/SW/GET PLIERS/NE/N/N/N/E/PRESS RED BUTTON/WAIT/WAIT/WAIT/S/SLIDE LOWER CARD THROUGH SLOT/PRESS DOWN BUTTON/WAIT/WAIT/WAIT/N/E/S/E/SLIDE SHUTTLE CARD THROUGH SLOT/OPEN CANTEEN/DRINK LIQUID/PRESS LEVER/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/PULL LEVER/PULL LEVER/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/W/N/E/E/NE/N/TELL FLOYD TO GO NORTH/TELL FLOYD TO GET SHINY BOARD/S/E/N/OPEN PANEL/REMOVE SECOND BOARD/DROP IT/INSERT SHINY BOARD IN PANEL/S/DROP CANTEEN AND SHUTTLE CARD/W/NW/GET BOTTLE/OPEN IT/DRINK MEDICINE/DROP BOTTLE/SE/E/E/N/OPEN CUBE/REMOVE FUSED BEDISTOR WITH PLIERS/DROP IT AND PLIERS/INSERT GOOD BEDISTOR IN CUBE/S/S/S/E/S/OPEN LAB UNIFORM/GET TELEPORTATION CARD AND BATTERY/N/W/N/E/SLIDE TELEPORTATION CARD THROUGH SLOT/PRESS 2/W/W/S/S/S/SW/REMOVE OLD BATTERY/DROP IT/INSERT NEW BATTERY IN LASER/GET LASER/NE/N/N/N/E/E/SLIDE TELEPORTATION CARD THROUGH SLOT/PRESS 3/W/S/S/N/E/OPEN BIO-LOCK DOOR/SE/E/LOOK THROUGH WINDOW/OPEN DOOR/CLOSE DOOR/WAIT/OPEN DOOR/CLOSE DOOR/DROP BOX/GET MINIATURIZATION CARD/W/OPEN DOOR/W/SW/EXAMINE OUTPUT/S/SLIDE MINIATURIZATION CARD THROUGH SLOT/TYPE 384/E/N/N/TURN DIAL TO 1/LOOK INTO RELAY/BURN SPECK/BURN SPECK/BURN SPECK/BURN SPECK/BURN SPECK/BURN SPECK/BURN SPECK/FIRE LASER/FIRE LASER/S/THROW LASER INTO VOID/S/W/N/OPEN DESK/GET MASK/WEAR IT/PRESS EMERGENCY BUTTON/OPEN DOOR/W/OPEN LAB DOOR/W/W/OPEN DOOR/W/W/W/S/S/PRESS BUTTON/WAIT/WAIT/WAIT/N",
"grammar" : "affirm/y/yes;again/g;answer/reply;bathe/swim/wade;brief;climb;curse/damn/fuck/krip/megakr/shit/trot/trotti;diagno;dunno/maybe/possib;enter;escape;exit;fly;gaze/l/look/stare;grin/smile;hello/hi;help/hint/hints;hop/skip;i/invent;jump/leap;lay/lie/reclin/sit;leave;negati/no;q/quit;restar;restor;save;say/talk;score;scream/shout/yell;script;sleep;sneeze/zork;stand;super/superb;t/time;unscri;verbos;versio;wait/z;activa OBJ;affirm/y/yes OBJ;aim/gestur/point at OBJ;aim/gestur/point to OBJ;answer/reply OBJ;ask/tell OBJ;attack/dispat/fight/hit/hurt/injure/kill/murder/slay/strike OBJ;awake/startl/surpri/wake OBJ;awake/startl/surpri/wake up OBJ;bathe/swim/wade OBJ;bathe/swim/wade down OBJ;bathe/swim/wade in OBJ;bathe/swim/wade to OBJ;bathe/swim/wade up OBJ;blast/burn/fire/shoot/zap OBJ;blast/burn/fire/shoot/zap at OBJ;board OBJ;brandi/wave OBJ;break/damage/destro/smash OBJ;break/damage/destro/smash down OBJ;brush/clean/mop/polish/scrub/swab OBJ;brush/clean/mop/polish/scrub/swab up OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;carry/get/hold/take out OBJ;chase/follow/pursue OBJ;chuck/hurl/throw/toss OBJ;climb OBJ;climb in OBJ;climb on OBJ;climb to OBJ;climb up OBJ;climb/go/procee/run/walk down OBJ;climb/go/procee/run/walk with OBJ;close/collap/fold/shorte OBJ;descri/examin/what/whats OBJ;descri/examin/what/whats on OBJ;descri/examin/what/whats/gaze/l/look/stare in OBJ;disemb OBJ;drink/eat/swallo OBJ;drop/releas OBJ;empty OBJ;enter OBJ;exit OBJ;extend/length/open/unfold OBJ;extend/length/open/unfold up OBJ;feel/pat/pet/rub/touch OBJ;find/see/seek/where OBJ;fix/repair OBJ;flush OBJ;fly OBJ;gaze/l/look/stare OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare down OBJ;gaze/l/look/stare on OBJ;gaze/l/look/stare out OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare with OBJ;gaze/l/look/stare/rummag/search/sift/sort for OBJ;go/procee/run/walk OBJ;go/procee/run/walk around OBJ;go/procee/run/walk in OBJ;go/procee/run/walk out OBJ;go/procee/run/walk to OBJ;go/procee/run/walk up OBJ;grin/smile at OBJ;hello/hi OBJ;insert/place/put/stuff OBJ;insert/place/put/stuff down OBJ;insert/place/put/stuff on OBJ;jump/leap across OBJ;jump/leap from OBJ;jump/leap in OBJ;jump/leap off OBJ;jump/leap over OBJ;kick OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;lay/lie/reclin/sit down OBJ;lay/lie/reclin/sit/carry/get/hold/take in OBJ;lay/lie/reclin/sit/carry/get/hold/take on OBJ;leave OBJ;listen to OBJ;lubric/oil OBJ;molest/rape OBJ;move OBJ;move/pull/tug/press/push down OBJ;move/pull/tug/press/push up OBJ;pick up OBJ;play OBJ;play with OBJ;pour/spill OBJ;press/push OBJ;press/push agains OBJ;press/push off OBJ;press/push on OBJ;pull/tug OBJ;pull/tug on OBJ;reach for OBJ;reach in OBJ;read/skim OBJ;remove OBJ;rummag/search/sift/sort OBJ;rummag/search/sift/sort in OBJ;rummag/search/sift/sort with OBJ;salute OBJ;say/talk to OBJ;scold OBJ;scream/shout/yell at OBJ;set/turn OBJ;set/turn off OBJ;set/turn on OBJ;shake OBJ;smell/sniff OBJ;stand on OBJ;stand/carry/get/hold/take up OBJ;step in OBJ;step on OBJ;taste OBJ;type OBJ;type in OBJ;wear OBJ;aim/gestur/point OBJ at OBJ;apply OBJ to OBJ;ask/tell OBJ for OBJ;attack/dispat/fight/hit/hurt/injure/kill/murder/slay OBJ with OBJ;blast/burn/fire/shoot/zap OBJ at OBJ;blast/burn/fire/shoot/zap OBJ with OBJ;brandi/wave OBJ at OBJ;break/damage/destro/smash OBJ with OBJ;brush/clean/mop/polish/scrub/swab OBJ with OBJ;carry/get/hold/take OBJ from OBJ;carry/get/hold/take OBJ off OBJ;carry/get/hold/take OBJ out OBJ;chuck/hurl/throw/toss OBJ at OBJ;chuck/hurl/throw/toss OBJ in OBJ;chuck/hurl/throw/toss OBJ off OBJ;chuck/hurl/throw/toss OBJ on OBJ;chuck/hurl/throw/toss OBJ over OBJ;chuck/hurl/throw/toss OBJ with OBJ;donate/feed/give/hand/offer OBJ OBJ;donate/feed/give/hand/offer OBJ to OBJ;drop/releas OBJ down OBJ;drop/releas OBJ in OBJ;drop/releas OBJ on OBJ;empty OBJ in OBJ;extend/length/open/unfold OBJ with OBJ;feel/pat/pet/rub/touch OBJ to OBJ;insert/place/put/stuff OBJ on OBJ;insert/place/put/stuff/slide/swing/go/procee/run/walk OBJ with OBJ;lay/lie/reclin/sit/insert/place/put/stuff OBJ in OBJ;lay/lie/reclin/sit/insert/place/put/stuff OBJ under OBJ;lay/lie/reclin/sit/insert/place/put/stuff/carry/get/hold/take/brandi/wave OBJ near OBJ;lay/lie/reclin/sit/insert/place/put/stuff/carry/get/hold/take/brandi/wave OBJ over OBJ;lay/lie/reclin/sit/insert/place/put/stuff/slide/swing OBJ across OBJ;lock OBJ with OBJ;lubric/oil OBJ with OBJ;pick up OBJ with OBJ;play OBJ with OBJ;pour/spill OBJ in OBJ;pour/spill OBJ on OBJ;pour/spill OBJ out OBJ;pour/spill OBJ over OBJ;press/push/slide/swing OBJ under OBJ;pull/tug/feel/pat/pet/rub/touch/carry/get/hold/take/remove/attrac OBJ with OBJ;remove OBJ from OBJ;set/turn OBJ to OBJ;set/turn OBJ with OBJ;shake OBJ with OBJ;show OBJ to OBJ;strike OBJ with OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
plundered = {
"name": "plundered",
"rom": "plundered.z3",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "Z/STAND UP/Z/Z/GET COFFER/Z/HIT CRULLEY WITH COFFER/Z/Z/READ MISSIVE/YES/Z/Z/STAND UP/Z/Z/Z/N/OPEN CUPBOARD/IN/GET CLOTHES/REMOVE FROCK/TEAR IT/WEAR BREECHES/WEAR SHIRT/OUT/Z/D/N/N/GET BOTTLE/S/S/OPEN COFFER/GET INVITATION/U/S/SMASH WINDOW WITH COFFER/S/CLIMB ROPE/U/U/U/U/N/N/EXAMINE BARRELS/DIP RAG IN WATER/OPEN HATCH/D/THROW RAG AT FIRE/U/N/EXAMINE WINCH/RAISE LEVER/IN/GET DAGGER/OUT/S/S/SIT IN CASK/CUT LINE WITH DAGGER/CUT LINE WITH DAGGER/GET PORK/Z/Z/GET OUT OF CASK/W/N/W/N/E/Z/GET GARTER/W/S/NE/U/REMOVE CLOTHES/GET GOWN/WEAR IT/N/E/D/SHOW INVITATION TO BUTLER/S/Z/Z/Z/Z/E/N/N/EXAMINE BOOKCASE/GET TREATISE/GET HAT/PRESS ISLAND/N/D/E/E/GET KEY/GET HORN/W/W/S/EXAMINE BOTTLE/READ LABEL/PUT LAUDANUM ON PORK/GIVE PORK TO CROCODILE/Z/Z/S/W/UNLOCK DOOR WITH KEY/OPEN IT/N/GIVE GARTER TO PAPA/S/E/N/N/U/S/S/W/S/Z/Z/Z/N/Z/Z/Z/N/U/E/OPEN DOOR/N/DRINK WINE/POUR WINE IN GREEN GOBLET/POUR WINE IN BLUE GOBLET/PUT LAUDANUM IN BLUE GOBLET/GET SPICES/BLOW SPICES AT LAFOND/GET TRAY/WAVE IT IN MOONLIGHT/S/GIVE BLUE GOBLET TO BUTLER/Z/Z/Z/Z/S/W/D/E/N/Open portrait/N/D/S/S/GET RAPIER/ATTACK CRULLEY/AGAIN/WAKE FALCON WITH SALTS/UNLOCK CHAIN WITH BROOCH/N/N/U/S/S/W/U/E/S/UNTIE ROPE/CLIMB DOWN ROPE/GET ALL/S/S/S/Z/Z/YES/GET PISTOL/LOAD IT/SHOOT CRULLEY",
"grammar" : "aaieee/scream/shout/shriek/yell;applau/clap;brief;cry/gasp/sob/weep;debark/disemb;depart/exit/withdr;disrob/strip/undres;dive/jump/leap/vault;doze/nap/sleep/snooze;dress;duck/go/wade;enter;greet/hello/hi;help/hint/warn;hide;i/invent;l/look/watch;laugh/smile;leave;listen;moan/sigh;nay/never/no/nope;nod/ok/okay/sure/y/yes/yup;procee/run/sidle/step/walk;q/quit;rescue/save;restar;restor;rise/stand;score;script;smell/sniff/whiff;super/superb;swim;thank/thanks;unscri;verbos;versio;wait/z;aaieee/scream/shout/shriek/yell at OBJ;aaieee/scream/shout/shriek/yell to OBJ;aim/point/shine/signal OBJ;applau/clap OBJ;approa OBJ;ask for OBJ;attack/bash/fight/hit/kill/murder/punch/slap/strike/whack OBJ;awake/revive/rouse/wake OBJ;awake/revive/rouse/wake up OBJ;bind/fetter/hobble/manacl/shackl OBJ;bite OBJ;blow OBJ;blow/dip/hang/insert/lay/place/put/sprink/stick out OBJ;board/mount/ride OBJ;boost/lift/raise OBJ;bounce/shake OBJ;bow/curtse/curtsy to OBJ;break/crush/damage/demoli/destro/smash/trampl/wreck OBJ;break/crush/damage/demoli/destro/smash/trampl/wreck down OBJ;break/crush/damage/demoli/destro/smash/trampl/wreck out OBJ;bribe OBJ;browse/read/skim OBJ;browse/read/skim throug OBJ;burn/melt OBJ;burn/melt up OBJ;carry/get/grab/hold/take OBJ;carry/get/grab/hold/take down OBJ;carry/get/grab/hold/take dresse OBJ;carry/get/grab/hold/take drunk OBJ;carry/get/grab/hold/take off OBJ;carry/get/grab/hold/take on OBJ;carry/get/grab/hold/take out OBJ;carry/get/grab/hold/take undres OBJ;chase/follow/pursue OBJ;circle OBJ;clean/wash/wipe OBJ;clean/wash/wipe off OBJ;clean/wash/wipe up OBJ;climb/crawl/scale OBJ;climb/crawl/scale off OBJ;climb/crawl/scale on OBJ;climb/crawl/scale out OBJ;climb/crawl/scale over OBJ;climb/crawl/scale up OBJ;climb/crawl/scale/carry/get/grab/hold/take in OBJ;climb/crawl/scale/duck/go/wade under OBJ;climb/crawl/scale/duck/go/wade/dive/jump/leap/vault/procee/run/sidle/step/walk throug OBJ;climb/crawl/scale/slide down OBJ;close/shut OBJ;close/shut off OBJ;close/shut up OBJ;cut/slice/stab OBJ;dance/piroue/twirl OBJ;dance/piroue/twirl with OBJ;deacti/douse/exting OBJ;debark/disemb OBJ;depart/exit/withdr OBJ;descen OBJ;descri/examin/inspec/observ/study/x OBJ;devour/eat/nibble/taste OBJ;dig in OBJ;dig throug OBJ;dip/hang/insert/lay/place/put/sprink/stick down OBJ;dip/hang/insert/lay/place/put/sprink/stick on OBJ;disrob/strip/undres OBJ;dive/jump/leap/vault down OBJ;dive/jump/leap/vault off OBJ;dive/jump/leap/vault out OBJ;dive/jump/leap/vault over OBJ;dive/jump/leap/vault overbo OBJ;dive/jump/leap/vault to OBJ;dive/jump/leap/vault up OBJ;dive/jump/leap/vault/dive/jump/leap/vault across OBJ;don/wear OBJ;doze/nap/sleep/snooze in OBJ;doze/nap/sleep/snooze on OBJ;dress OBJ;drink/quaff/sip/swallo OBJ;drink/quaff/sip/swallo from OBJ;drop/dump OBJ;dry/squeez/wring OBJ;duck/go/wade OBJ;duck/go/wade/come with OBJ;duck/go/wade/dive/jump/leap/vault/procee/run/sidle/step/walk in OBJ;duck/go/wade/hide/rise/stand/procee/run/sidle/step/walk behind OBJ;duck/go/wade/move/pull/procee/run/sidle/step/walk around OBJ;duck/go/wade/procee/run/sidle/step/walk away OBJ;duck/go/wade/procee/run/sidle/step/walk down OBJ;duck/go/wade/procee/run/sidle/step/walk to OBJ;duck/go/wade/procee/run/sidle/step/walk up OBJ;embrac/hug OBJ;empty OBJ;empty out OBJ;enter OBJ;faint/swoon OBJ;feed OBJ;feel/grip/rub/smooth/touch throug OBJ;fiddle/joggle/wiggle with OBJ;fill/load OBJ;find/locate/seek OBJ;fire/shoot/sling OBJ;fire/shoot/sling at OBJ;fix/repair/sharpe/whet OBJ;flick/flip/rotate/set/spin/switch/turn OBJ;flick/flip/rotate/set/spin/switch/turn around OBJ;flick/flip/rotate/set/spin/switch/turn off OBJ;flick/flip/rotate/set/spin/switch/turn on OBJ;free/loosen/unatta/unknot/untie/unweav OBJ;greet/hello/hi OBJ;hear OBJ;help/hint/warn OBJ;hide under OBJ;hurl/throw/toss OBJ;hurl/throw/toss away OBJ;hurl/throw/toss overbo OBJ;i/invent love OBJ;kick OBJ;kiss OBJ;knock/pound/rap at OBJ;knock/pound/rap down OBJ;knock/pound/rap on OBJ;knock/pound/rap over OBJ;l/look/watch OBJ;l/look/watch around OBJ;l/look/watch at OBJ;l/look/watch behind OBJ;l/look/watch down OBJ;l/look/watch in OBJ;l/look/watch on OBJ;l/look/watch out OBJ;l/look/watch over OBJ;l/look/watch throug OBJ;l/look/watch to OBJ;l/look/watch up OBJ;l/look/watch/rummag/search for OBJ;l/look/watch/rummag/search under OBJ;laugh/smile at OBJ;launch OBJ;lean agains OBJ;leave OBJ;let go OBJ;lie down OBJ;lie in OBJ;lie on OBJ;light OBJ;listen to OBJ;lock OBJ;lower OBJ;make love OBJ;make out OBJ;marry/wed OBJ;move/pull OBJ;move/pull down OBJ;move/pull in OBJ;move/pull up OBJ;oar/row OBJ;oar/row to OBJ;open OBJ;open up OBJ;pick OBJ;pick up OBJ;play with OBJ;pour/spill OBJ;press/push OBJ;press/push down OBJ;press/push on OBJ;press/push/boost/lift/raise up OBJ;procee/run/sidle/step/walk OBJ;procee/run/sidle/step/walk across OBJ;procee/run/sidle/step/walk out OBJ;procee/run/sidle/step/walk over OBJ;rape OBJ;reach in OBJ;remove OBJ;rescue/save OBJ;return OBJ;rip/tear OBJ;rip/tear up OBJ;rise/stand in OBJ;rise/stand on OBJ;rise/stand/carry/get/grab/hold/take up OBJ;roll/tip OBJ;rummag/search OBJ;rummag/search in OBJ;rummag/search throug OBJ;sink OBJ;sit down OBJ;sit in OBJ;sit on OBJ;smell/sniff/whiff OBJ;soak/wet OBJ;speak/talk to OBJ;stop OBJ;swim in OBJ;swing OBJ;swing down OBJ;swing from OBJ;swing on OBJ;tap on OBJ;tap/feel/grip/rub/smooth/touch OBJ;tell OBJ;thank/thanks OBJ;unbar/unlock OBJ;unroll OBJ;use OBJ;wave OBJ;wave at OBJ;what/what'/whats/who/whos OBJ;where/wheres/whithe OBJ;zzmgck OBJ;aim/point/shine/signal OBJ at OBJ;aim/point/shine/signal OBJ from OBJ;aim/point/shine/signal OBJ in OBJ;aim/point/shine/signal OBJ on OBJ;aim/point/shine/signal OBJ out OBJ;aim/point/shine/signal OBJ with OBJ;ask OBJ about OBJ;ask OBJ for OBJ;ask OBJ to OBJ;attach/fasten/moor/secure/tie/weave OBJ around OBJ;attach/fasten/moor/secure/tie/weave OBJ to OBJ;attach/fasten/moor/secure/tie/weave OBJ with OBJ;attach/fasten/moor/secure/tie/weave up OBJ with OBJ;attack/bash/fight/hit/kill/murder/punch/slap/strike/whack OBJ with OBJ;awake/revive/rouse/wake OBJ with OBJ;blow OBJ at OBJ;blow OBJ on OBJ;bounce/shake OBJ with OBJ;break/crush/damage/demoli/destro/smash/trampl/wreck OBJ with OBJ;bribe OBJ with OBJ;browse/read/skim OBJ throug OBJ;browse/read/skim OBJ with OBJ;burn/melt OBJ in OBJ;carry/get/grab/hold/take OBJ from OBJ;carry/get/grab/hold/take OBJ in OBJ;carry/get/grab/hold/take OBJ off OBJ;carry/get/grab/hold/take OBJ on OBJ;carry/get/grab/hold/take OBJ out OBJ;carry/get/grab/hold/take OBJ with OBJ;close/shut OBJ on OBJ;cover OBJ with OBJ;cut/slice/stab OBJ with OBJ;cut/slice/stab throug OBJ with OBJ;dip/hang/insert/lay/place/put/sprink/stick OBJ around OBJ;dip/hang/insert/lay/place/put/sprink/stick OBJ behind OBJ;dip/hang/insert/lay/place/put/sprink/stick OBJ over OBJ;dip/hang/insert/lay/place/put/sprink/stick OBJ throug OBJ;drink/quaff/sip/swallo OBJ from OBJ;drop/dump OBJ down OBJ;drop/dump OBJ in OBJ;drop/dump OBJ throug OBJ;drop/dump/dip/hang/insert/lay/place/put/sprink/stick OBJ out OBJ;dry/squeez/wring OBJ from OBJ;dry/squeez/wring OBJ in OBJ;dry/squeez/wring OBJ on OBJ;dry/squeez/wring OBJ out OBJ;empty OBJ from OBJ;empty OBJ in OBJ;empty OBJ on OBJ;feed OBJ OBJ;feed OBJ with OBJ;feel/grip/rub/smooth/touch OBJ on OBJ;feel/grip/rub/smooth/touch OBJ with OBJ;fill/load OBJ in OBJ;fill/load OBJ with OBJ;find/locate/seek OBJ on OBJ;fire/shoot/sling OBJ at OBJ;fire/shoot/sling OBJ in OBJ;fire/shoot/sling OBJ with OBJ;flick/flip/rotate/set/spin/switch/turn OBJ to OBJ;give/hand/offer/presen OBJ OBJ;give/hand/offer/presen OBJ with OBJ;give/hand/offer/presen/pay/feed/pass OBJ to OBJ;hurl/throw/toss OBJ OBJ;hurl/throw/toss OBJ at OBJ;hurl/throw/toss OBJ down OBJ;hurl/throw/toss OBJ in OBJ;hurl/throw/toss OBJ on OBJ;hurl/throw/toss OBJ out OBJ;hurl/throw/toss OBJ over OBJ;hurl/throw/toss OBJ throug OBJ;hurl/throw/toss OBJ to OBJ;hurl/throw/toss OBJ with OBJ;leave/dip/hang/insert/lay/place/put/sprink/stick OBJ in OBJ;leave/drop/dump/dip/hang/insert/lay/place/put/sprink/stick OBJ on OBJ;light OBJ in OBJ;light OBJ on OBJ;light OBJ with OBJ;lock OBJ with OBJ;lower OBJ out OBJ;lower OBJ throug OBJ;move/pull OBJ to OBJ;muzzle OBJ with OBJ;oar/row OBJ OBJ;oar/row OBJ to OBJ;open OBJ with OBJ;pick OBJ with OBJ;pour/spill OBJ from OBJ;pour/spill OBJ in OBJ;pour/spill OBJ on OBJ;pour/spill OBJ out OBJ;press/push OBJ in OBJ;press/push OBJ on OBJ;press/push/dip/hang/insert/lay/place/put/sprink/stick OBJ under OBJ;press/push/move/pull OBJ OBJ;press/push/move/pull OBJ down OBJ;press/push/move/pull OBJ up OBJ;reflec OBJ in OBJ;remove OBJ from OBJ;return OBJ to OBJ;rip/tear OBJ in OBJ;rip/tear OBJ with OBJ;roll/tip/press/push OBJ to OBJ;show OBJ OBJ;show OBJ to OBJ;soak/wet OBJ in OBJ;soak/wet OBJ with OBJ;swing OBJ at OBJ;tell OBJ OBJ;tell OBJ about OBJ;unbar/unlock OBJ with OBJ;use OBJ on OBJ;wave OBJ at OBJ;wave OBJ in OBJ;wrap OBJ around OBJ;wrap OBJ in OBJ;",
"max_word_length" : 6
}
reverb = {
"name": "reverb",
"rom": "reverb.z5",
"seed" : 0,
"walkthrough" : "x counter/read note/get pizza box/sw/x note/s/w/s/x district attorney/z/open pizza box/search pizza/show file to district attorney/n/w/w/n/e/w/s/e/open window/unlock cabinet with key/open cabinet/get file/jump out window/kick second window/z/z/jill, hold me/jump/w/n/e/e/e/n/get pizza box/open pizza box/get bomb/s/put bomb in sewer/e/s/se/get spray/spray thug/kick thug/nw/n/z/z/s/sw/take hammer/take rope/ne/n/w/w/w/close pizza box/show box to guard/w/n/s/n/tie rope to rod/hold rope/jump/break glass with hammer/take axe/z/hit floor with axe",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;breast/penis;brief/normal;bro/bud/dude/duder;carry/catch/hold/take inventory;die/q/quit;dive/swim;exit/out/outside/stand;fuck/damn/shit/sod;full/fullscore;full/fullscore score;get out/off/up;hear/listen;help;hint;hop/jump/skip;hop/jump/skip off;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;lame/suck;leave/go/run/walk;let go;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;plugh/xyzzy;pray;restart;restore;rule;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand up;swing;think;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/catch/hold/take off OBJ;cast/discard/drop/fling/heave/hurl/launch/lob/pitch/propel/toss OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/shit/sod OBJ;detach/disconnec/untie OBJ;dig OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;fuck OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/catch/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip in OBJ;hop/jump/skip into OBJ;hop/jump/skip off OBJ;hop/jump/skip off of OBJ;hop/jump/skip out OBJ;hop/jump/skip out of OBJ;hop/jump/skip over OBJ;hop/jump/skip through OBJ;kick/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;knock OBJ;knock on OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;let go of OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;pay for OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;release OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;shake OBJ;smell/sniff OBJ;spray OBJ;squash/squeeze OBJ;stand on OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;throw OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ with OBJ;burn/light OBJ with OBJ;carry/catch/hold/take OBJ off OBJ;cast/discard/drop/fling/heave/hurl/launch/lob/pitch/propel/toss OBJ at/against/on/onto OBJ;cast/discard/drop/fling/heave/hurl/launch/lob/pitch/propel/toss OBJ in/into/down OBJ;cast/discard/drop/fling/heave/hurl/launch/lob/pitch/propel/toss/put OBJ on/onto OBJ;chop/cut/prune/slice OBJ with OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer OBJ OBJ;feed/give/offer OBJ to OBJ;feed/give/offer over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;pay OBJ OBJ;pay OBJ to OBJ;pay over OBJ to OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/catch/hold/take OBJ from OBJ;spray OBJ at OBJ;spray OBJ with OBJ;tell OBJ about OBJ;throw OBJ OBJ;throw OBJ against OBJ;throw OBJ at OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
seastalker = {
"name": "seastalker",
"rom": "seastalker.z3",
"seed" : 0,
"walkthrough" : "turn on videophone/adjust videophone/get microphone/turn on microphone/ask bly about problem/ask bly about monster/z/no/drop microphone/n/turn on computestor/ask computestor about videophone/e/open door/e/x electric panel/close circuit breaker/e/ask kemp about circuit breaker/w/w/s/s/w/take capsule/n/s/push test button/read sign/ask tip for tool/open access panel/enter access panel/fix voltage regulator/exit/close access panel/close hatch/insert capsule into reactor/close reactor/turn reactor on/fill tank/open gate/start engines/open throttle/push joystick east/x sonar/d/e/e/e/e/x sonar/e/e/e/e/x sonar/ne/ne/ne/ne/x sonar/ne/ne/ne/ne/x sonar/n/n/n/n/x sonar/ne/ne/ne/ne/x sonar/ne/ne/ne/ne/x sonar/nw/nw/n/n/turn on autopilot/set throttle to fast/z/yes/z/yes/dock/z/open hatch/exit scimitar/n/n/n/open air supply system with tool/get relay/screw relay into space/se/s/get bazooka/n/e/z/yes/ask about evidence/open box with tool/look in box/w/get wrench/open office door/z/yes/yes/yes/yes/yes/yes/yes/give magazine to doc/yes/yes/z/no/put dart on claw/s/put bazooka on claw/yes/no/ask tip about grid/yes/yes/yes/yes/z/no/e/n/n/n/n/n/n/n/se/s/s/s/s/z/no/d/take syringe/exit/n/n/w/n/w/ask doc about syringe/z/no/e/se/s/s/s/arrest bill/turn off electricity/turn on electricity/w/w/s/s/amy, leave/close roof/s/close hatch/fill tank/turn on engine/open throttle/s/e/se/z/z/yes/no/yes/no/no/yes/stop/nw/open throttle/e/e/z/aim bazooka at pod/shoot pod with bazooka/yes",
"grammar" : "10-/affirm/aye/ok/okay/okeh/okey/roger/sure/wilco/y/yeah/yes/yup;abando/debark/disemb/evacua/exit/leave;answer/reply/respon;bathe/swim/wade;brief;bye/good-/goodby;curse/damn/fuck/fudge/pee/piss/poo/shit/snot;diagno;dive/submer;editio/releas/revisi/versio;enter;explai/say/speak/state/talk;fast;gaze/l/look/peek/see/stare;greet/greeti/hello/hi;help;i/invent;lean/prop/stand;medium;nay/negati/no/nope;pause/save/suspen;q/quit;restar;restor/resume;rise;score;scream/shout/yell;script;slow;super/superb;surfac;thank/thanks;unscri;verbos;abando/debark/disemb/evacua/exit/leave OBJ;abando/debark/disemb/evacua/exit/leave from OBJ;activa/engage/start OBJ;activa/engage/start over OBJ;adjust/focus/tune OBJ;adjust/focus/tune in OBJ;analys/analyz/check/test/troubl OBJ;analys/analyz/check/test/troubl out OBJ;answer/reply/respon OBJ;arm OBJ;arrest/captur OBJ;ask/consul/inquir/questi OBJ;ask/consul/inquir/questi about OBJ;ask/consul/inquir/questi for OBJ;assaul/attack/fight/molest/rape/repel OBJ;assaul/attack/fight/molest/rape/repel off OBJ;awake/startl/surpri/wake OBJ;awake/startl/surpri/wake up OBJ;bathe/swim/wade in OBJ;bite/hit/hurt/injure/kick/punch/slap OBJ;blast/fire/shoot OBJ;blow/drain/empty OBJ;board/climb OBJ;board/climb throug OBJ;board/climb/carry/get/grab/hold/lead/remove/seize/steal/take in OBJ;board/climb/carry/get/grab/hold/lead/remove/seize/steal/take on OBJ;board/climb/drive/go/head/procee/run/steer/walk down OBJ;board/climb/drive/go/head/procee/run/steer/walk up OBJ;break/crush/damage/destro/smash OBJ;bring/fetch OBJ;brush/clean/wipe OBJ;bye/good-/goodby OBJ;call/phone/signal/summon OBJ;call/phone/signal/summon for OBJ;call/phone/signal/summon to OBJ;call/phone/signal/summon up OBJ;carry/get/grab/hold/lead/remove/seize/steal/take OBJ;carry/get/grab/hold/lead/remove/seize/steal/take off OBJ;carry/get/grab/hold/lead/remove/seize/steal/take out OBJ;carry/get/grab/hold/lead/remove/seize/steal/take rid OBJ;chase/follow/pursue/trace OBJ;chuck/hurl/throw/toss away OBJ;close/replac/reset OBJ;come OBJ;come to OBJ;come/drive/go/head/procee/run/steer/walk with OBJ;compar/match OBJ;consum/eat OBJ;crouch/hide on OBJ;crouch/hide/drive/go/head/procee/run/steer/walk/crawl in OBJ;curse/damn/fuck/fudge/pee/piss/poo/shit/snot OBJ;curse/damn/fuck/fudge/pee/piss/poo/shit/snot on OBJ;cut/interr OBJ;descri/examin/inspec/scruti/study/x OBJ;detect/find/locate OBJ;detect/find/locate out OBJ;diagno OBJ;dig/search OBJ;dig/search in OBJ;dig/search up OBJ;discus OBJ;dive/submer OBJ;dive/submer by OBJ;dive/submer under OBJ;dive/submer/rise to OBJ;dock OBJ;douse/exting OBJ;drink/imbibe/swallo OBJ;drive/go/head/procee/run/steer/walk/crawl OBJ;drive/go/head/procee/run/steer/walk/crawl around OBJ;drive/go/head/procee/run/steer/walk/crawl out OBJ;drive/go/head/procee/run/steer/walk/crawl throug OBJ;drive/go/head/procee/run/steer/walk/crawl to OBJ;drive/go/head/procee/run/steer/walk/crawl under OBJ;drop/lower OBJ;editio/releas/revisi/versio OBJ;enter OBJ;explai/say/speak/state/talk OBJ;explai/say/speak/state/talk with OBJ;explai/say/speak/state/talk/scream/shout/yell in OBJ;explai/say/speak/state/talk/scream/shout/yell to OBJ;explai/say/speak/state/talk/tell about OBJ;feel/pat/pet/rub/shade/touch OBJ;fill/flood OBJ;fix/repair OBJ;flip/shut/swing/switch/turn OBJ;flip/shut/swing/switch/turn down OBJ;flip/shut/swing/switch/turn in OBJ;flip/shut/swing/switch/turn off OBJ;flip/shut/swing/switch/turn on OBJ;flip/shut/swing/switch/turn to OBJ;flip/shut/swing/switch/turn up OBJ;free/unatta/uncuff/unfast/unhook/untie OBJ;gaze/l/look/peek/see/stare around OBJ;gaze/l/look/peek/see/stare at OBJ;gaze/l/look/peek/see/stare behind OBJ;gaze/l/look/peek/see/stare down OBJ;gaze/l/look/peek/see/stare in OBJ;gaze/l/look/peek/see/stare on OBJ;gaze/l/look/peek/see/stare out OBJ;gaze/l/look/peek/see/stare over OBJ;gaze/l/look/peek/see/stare throug OBJ;gaze/l/look/peek/see/stare under OBJ;gaze/l/look/peek/see/stare up OBJ;gaze/l/look/peek/see/stare/dig/search for OBJ;greet/greeti/hello/hi OBJ;hang up OBJ;help/pause/save/suspen OBJ;identi/name OBJ;insert/instal/load/mount/place/put/stuff down OBJ;insert/instal/load/mount/place/put/stuff on OBJ;kill/strang OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;launch OBJ;lean/prop/stand/carry/get/grab/hold/lead/remove/seize/steal/take up OBJ;level off OBJ;level out OBJ;lift/raise OBJ;lift/raise up OBJ;light OBJ;listen at OBJ;listen to OBJ;lock OBJ;medita/relax/rest/stay/wait/z OBJ;medita/relax/rest/stay/wait/z for OBJ;medita/relax/rest/stay/wait/z till OBJ;medita/relax/rest/stay/wait/z until OBJ;move/pull/tug OBJ;nod at OBJ;nod to OBJ;open OBJ;open up OBJ;peal/ring OBJ;peruse/read/skim OBJ;pick OBJ;pick up OBJ;play OBJ;play with OBJ;press/push OBJ;press/push on OBJ;pull/tug on OBJ;pump out OBJ;revive OBJ;rise OBJ;rise by OBJ;scream/shout/yell for OBJ;screw in OBJ;send OBJ;send out OBJ;sit down OBJ;sit in OBJ;sit on OBJ;smell/sniff OBJ;smoke OBJ;stop OBJ;strike OBJ;surfac OBJ;tell OBJ;thank/thanks OBJ;unlock OBJ;use OBJ;aim/point OBJ OBJ;aim/point OBJ at OBJ;aim/point OBJ to OBJ;aim/point at OBJ with OBJ;analys/analyz/check/test/troubl OBJ for OBJ;analys/analyz/check/test/troubl OBJ in OBJ;analys/analyz/check/test/troubl OBJ on OBJ;apply/fit OBJ to OBJ;arrest/captur OBJ for OBJ;ask/consul/inquir/questi OBJ about OBJ;ask/consul/inquir/questi OBJ for OBJ;assaul/attack/fight/molest/rape/repel OBJ with OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie OBJ with OBJ;attach/fasten/secure/tie up OBJ to OBJ;attach/fasten/secure/tie up OBJ with OBJ;bite/hit/hurt/injure/kick/punch/slap OBJ with OBJ;blast/fire/shoot OBJ at OBJ;blast/fire/shoot OBJ with OBJ;break/crush/damage/destro/smash OBJ with OBJ;break/crush/damage/destro/smash down OBJ with OBJ;bring/fetch OBJ OBJ;bring/fetch OBJ to OBJ;brush/clean/wipe OBJ with OBJ;build/make/prepar OBJ OBJ;build/make/prepar OBJ from OBJ;call/phone/signal/summon OBJ by OBJ;call/phone/signal/summon OBJ on OBJ;carry/get/grab/hold/lead/remove/seize/steal/take OBJ from OBJ;carry/get/grab/hold/lead/remove/seize/steal/take OBJ out OBJ;carry/get/grab/hold/lead/remove/seize/steal/take OBJ to OBJ;carry/get/grab/hold/lead/remove/seize/steal/take OBJ with OBJ;chase/follow/pursue/trace OBJ OBJ;chase/follow/pursue/trace OBJ to OBJ;chuck/hurl/throw/toss OBJ at OBJ;chuck/hurl/throw/toss OBJ on OBJ;chuck/hurl/throw/toss OBJ over OBJ;chuck/hurl/throw/toss OBJ throug OBJ;close/replac/reset OBJ with OBJ;come with OBJ to OBJ;compar/match OBJ to OBJ;compar/match OBJ with OBJ;confro OBJ with OBJ;crouch/hide/chuck/hurl/throw/toss OBJ in OBJ;crouch/hide/insert/instal/load/mount/place/put/stuff OBJ under OBJ;detect/find/locate OBJ with OBJ;dig/search OBJ for OBJ;discus OBJ with OBJ;donate/feed/give/hand/offer OBJ OBJ;donate/feed/give/hand/offer OBJ to OBJ;drive/go/head/procee/run/steer/walk OBJ over OBJ;drop/lower OBJ down OBJ;drop/lower OBJ in OBJ;drop/lower/fit/hang/insert/instal/load/mount/place/put/stuff OBJ on OBJ;explai/say/speak/state/talk to OBJ about OBJ;feel/pat/pet/rub/shade/touch OBJ on OBJ;feel/pat/pet/rub/shade/touch OBJ over OBJ;feel/pat/pet/rub/shade/touch OBJ with OBJ;fix/repair OBJ with OBJ;flip/shut/swing/switch/turn OBJ on OBJ;flip/shut/swing/switch/turn OBJ to OBJ;flip/shut/swing/switch/turn off OBJ with OBJ;flip/shut/swing/switch/turn on OBJ with OBJ;free/unatta/uncuff/unfast/unhook/untie OBJ from OBJ;gaze/l/look/peek/see/stare at OBJ throug OBJ;gaze/l/look/peek/see/stare up OBJ in OBJ;insert/instal/load/mount/place/put/stuff OBJ agains OBJ;insert/instal/load/mount/place/put/stuff OBJ in OBJ;kill/strang OBJ with OBJ;lean/prop/stand OBJ agains OBJ;lean/prop/stand OBJ on OBJ;lean/prop/stand OBJ up OBJ;move/open/set OBJ to OBJ;move/pull/tug OBJ OBJ;open OBJ with OBJ;peal/ring OBJ with OBJ;peruse/read/skim OBJ OBJ;peruse/read/skim OBJ throug OBJ;peruse/read/skim about OBJ in OBJ;pick OBJ with OBJ;press/push OBJ OBJ;press/push OBJ on OBJ;press/push OBJ to OBJ;press/push OBJ under OBJ;screw OBJ in OBJ;send OBJ OBJ;send OBJ to OBJ;set OBJ OBJ;set OBJ at OBJ;set OBJ for OBJ;set OBJ on OBJ;show OBJ OBJ;show OBJ to OBJ;strike OBJ with OBJ;tell OBJ OBJ;tell OBJ about OBJ;unlock OBJ with OBJ;use OBJ agains OBJ;",
"max_word_length" : 6
}
sherbet = {
"name": "sherbet",
"rom": "sherbet.z5",
"seed" : 0,
"grammar" : "answer/say/shout/speak;awake/awaken/wake;awake/awaken/wake up;bark/howl/woof;bother/curses/darn/drat;brief/normal;c,cast;carry/get/hold/take inventory;carry/get/hold/take out/off/up;chirrup;close/cover/shut up;coo;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;hear/listen;help/hint/hints;hop/jump/skip;hop/jump/skip across;hop/jump/skip over;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;memory/spells;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script;script off;script on;shake/wave;short/superbrie;sing;smell/sniff;sorry;stand up;think;verify;version;wait/z;y/yes;adjust/set OBJ;answer/say/shout/speak OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/drum/fight/hit/kick/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;bark/howl/woof OBJ;bite/chew OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;c,cast OBJ;carry/get/hold/take OBJ;carry/get/hold/take in/into/on/onto OBJ;carry/get/hold/take off OBJ;cast OBJ;chop/cut/prune/slice/tear OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;count/enumerate OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;draw/open/uncover/undo/unwrap OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;fire/shoot an arrow at OBJ;fire/shoot arrow at OBJ;fire/shoot at OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip across to OBJ;hop/jump/skip onto OBJ;hop/jump/skip over OBJ;hop/jump/skip over to OBJ;hop/jump/skip to OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;poison OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/swivel/turn/twist/unscrew OBJ;search OBJ;shake/wave OBJ;shoot OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/swivel/turn/twist/unscrew OBJ off;switch/rotate/screw/swivel/turn/twist/unscrew OBJ on;switch/rotate/screw/swivel/turn/twist/unscrew on OBJ;switch/rotate/screw/swivel/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;untie OBJ;adjust/set OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;bind OBJ in OBJ;bind OBJ into OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ from/off OBJ;cast OBJ at OBJ;cast OBJ on OBJ;check/describe/examine/watch/x OBJ in OBJ;check/describe/examine/watch/x OBJ through OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;dip/insert OBJ in/into OBJ;dip/insert/put OBJ up OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw OBJ to OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;fire/shoot OBJ at OBJ;grind OBJ with OBJ;hang/lean/prop/suspend OBJ on/from/against OBJ;hang/lean/prop/suspend OBJ up on/from/against OBJ;hang/lean/prop/suspend up OBJ on/from/against OBJ;l/look at OBJ in OBJ;l/look at OBJ through OBJ;l/look at OBJ using OBJ;l/look at OBJ with OBJ;l/look through OBJ at OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;pour/tip OBJ on OBJ;pour/tip OBJ onto OBJ;pour/tip/discard/drop/throw OBJ over OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove OBJ from OBJ;tell OBJ about OBJ;unlock/draw/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
sherlock = {
"name": "sherlock",
"rom": "sherlock.z5",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "KNOCK ON DOOR/U/N/GET NEWSPAPER/SHOW IT TO HOLMES/EXAMINE SLIPPER/GET TOBACCO,KNIFE,PIPE/WAIT/No/READ PAPER/WAIT/No/W/GET LAMP, GLASS AND AMPOULE/E/S/D/N/GET MATCHBOOK/S/OPEN DOOR/E/LIGHT LAMP/N/E/TURN OFF LAMP/DROP IT/PUT TOBACCO IN PIPE/OPEN MATCHBOOK/GET MATCH/STRIKE IT/LIGHT PIPE WITH MATCH/DROP MATCH/N/ASK HOLMES ABOUT ASH/W/EXAMINE STATUES/EXAMINE FAWKES/GET TORCH/LIGHT NEWSPAPER WITH PIPE/LIGHT TORCH WITH NEWSPAPER/EXAMINE CHARLES/GET HEAD/MELT HEAD WITH TORCH/GET GEM/E/S/GET LAMP/LIGHT IT/E/Z/BLOW WHISTLE/BLOW WHISTLE/ENTER CAB/DRIVE TO WHITEHALL/GET OUT/E/D/LOOK IN ROWBOAT/GET OAR/U/W/S/W/HAGGLE WITH SALESMAN/HAGGLE WITH SALESMAN/BUY TELESCOPE/E/Z/SE/U/OPEN BAG/OPEN BLUE BOTTLE/GET BALLS/WEAR BALLS/WAIT/WAIT/GET SAPPHIRE/GET SAPPHIRE/GET SAPPHIRE/EXAMINE SAPPHIRE/READ SAPPHIRE SCRATCH WITH GLASS/READ EMERALD SCRATCH WITH GLASS/D/NW/TURN OFF LAMP/BLOW WHISTLE/BLOW WHISTLE/ENTER CAB/DRIVE TO COVENT GARDEN/GET OUT/REMOVE BALLS/DROP THEM/REMOVE HAT/DROP MATCHBOOK/GET STETHOSCOPE/WEAR IT/LISTEN TO GIRL/OPEN BROWN BOTTLE/TAKE YELLOW PILL/GIVE IT TO GIRL/REMOVE STETHOSCOPE/PUT IT IN HAT/WEAR HAT/N/E/S/W/ASK FOR PIGEON/E/BLOW WHISTLE/BLOW WHISTLE/ENTER CAB/DRIVE TO TRAFALGAR SQUARE/GET OUT/LOOK AT STATUE THROUGH TELESCOPE/SHOW RUBY TO PIGEON/TELL PIGEON TO GET RUBY/THROW IT/DROP TELESCOPE/BLOW WHISTLE/BLOW WHISTLE/ENTER CAB/DRIVE TO PINCHIN LANE/GET OUT/W/ASK FOR PIGEON/READ RUBY SCRATCH WITH GLASS/E/BLOW WHISTLE/BLOW WHISTLE/ENTER CAB/DRIVE TO THE EMBANKMENT/GET/OUT/ENTER BOAT/INSERT OAR IN OARLOCK/WEIGH ANCHOR/S/ROW EAST/ROW EAST/DROP/ANCHOR/EXAMINE BRIDGE/EXAMINE MOSS/GET MOSS/READ OPAL SCRATCH WITH GLASS/WEIGH ANCHOR/ROW WEST/ROW WEST/N/GET OUT/N/W/N/Z/N/W/NE/N/E/OPEN BOOK/SHUT UP/OPEN BOOK/READ BOOK/W/S/SW/S/S/S/SW/E/S/SE/DROP PIPE/GET PACQUET, PAPER AND CRAYON/NW/OPEN DOOR/S/W/READ SIGN/E/N/N/EXAMINE TOMB/OPEN PACQUET OF PAPER/GET BROWN PAPER/PUT IT ON TOMB/RUB IT WITH CRAYON/GET IT/E/N/N/LOOK/HEAT BROWN PAPER OVER/CANDLES/READ BACK OF BROWN PAPER/PUT IT IN BAG/S/E/EXAMINE TOMB/GET YELLOW/PAPER/PUT IT ON TOMB/RUB IT WITH CRAYON/GET IT/S/W/EXAMINE TOMBS/GET BLUE PAPER/PUT IT ON HENRY'S TOMB/RUB IT WITH CRAYON/GET IT/DROP CRAYON AND PACQUET/E/N/W/N/HEAT YELLOW PAPER OVER CANDLES/READ BACK OF YELLOW PAPER/HEAT BLUE PAPER OVER CANDLES/READ BACK OF BLUE PAPER/DROP BLUE PAPER AND YELLOW PAPER/S/S/W/W/BLOW WHISTLE/BLOW WHISTLE/ENTER CAB/DRIVE TO THE MONUMENT/z/GET OUT/READ PLAQUE/NW/NW/EXAMINE URCHIN/GIVE SHILLING TO WIGGINS/ASK WIGGINS TO STEAL KEYS/N/GIVE OPAL, RUBY, SAPPHIRE AND EMERALD TO GUARD/N/EXAMINE DOOR/REMOVE HAT/GET STETHOSCOPE/WEAR IT/LISTEN TO DIAL/TURN DIAL RIGHT/TURN DIAL RIGHT/TURN DIAL LEFT/TURN DIAL RIGHT/TURN DIAL RIGHT/W/UNLOCK BOX 600 WITH KEY/GET TOPAZ/READ TOPAZ SCRATCH WITH GLASS/E/S/W/W/W/S/W/ASK FOR MYCROFT HOLMES/GIVE RING TO BUTLER/E/BLOW WHISTLE/BLOW WHISTLE/ENTER CAB/DRIVE TO TOWER OF LONDON/GET OUT/E/E/Aragon/N/N/SE/U/GET MACE/D/NW/NE/EXAMINE KEG/HIT BUNG WITH MACE/LOOK IN KEG/ASK WIGGINS TO GET GARNET/READ GARNET SCRATCH WITH GLASS/SW/E/D/WEAR ARMOUR/U/W/S/S/S/GET PADDLE/PULL CHAIN/S/ENTER BOAT/WEIGH ANCHOR/S/PADDLE WEST/PADDLE WEST/PADDLE WEST/N/REMOVE ARMOUR/E/E/D/W/REMOVE STETHOSCOPE/DROP IT/GET AMPOULE/PUT IT IN HAT/WEAR HAT/WAIT FOR 36 HOURS/WAIT FOR 35 MINUTES/ASK FOR AKBAR/swordfish/GIVE GARNET TO AKBAR/REMOVE HAT/GET AMPOULE/HOLD BREATH/BREAK AMPOULE/GET KNIFE/CUT ROPE WITH KNIFE/TIE MORIARTY AND AKBAR WITH ROPE/GET KEY, JEWELS AND WHISTLE/UNLOCK DOOR WITH KEY/OPEN IT/OUT/BLOW WHISTLE/BLOW WHISTLE/ENTER CAB/DRIVE TO BUCKINGHAM PALACE/GET OUT/GIVE JEWELS TO GUARD",
"grammar" : "aragon;boleyn;brief/super/superbrie/verbose;chico/echo/groucho/gummo/harpo/hum/marx/punt/sing/whistle/zeppo;cleves;clue/clues/help/hint/hints/invisiclu;fly;gaze/l/look/peek/peer/stare;gin;go/back/retreat/hike/proceed/step/trudge/walk;howard;hush/quiet/shh/shhh/shhhh/shush/sshh/sshhh;i/inventory;maybe;nap/rest/sleep/snooze;no;notify;parr;pray;q/quit;restart;restore;rise/stand;save;score;script/unscript;seymour;swordfish;thank/thanks;version;y/yes;aid/assist/preserve/rescue OBJ;answer OBJ;approach OBJ;arrest OBJ;ask/interroga/query/question OBJ;ask/interroga/query/question about OBJ;ask/interroga/query/question for OBJ;awake/awaken/revive/rouse/wake OBJ;awake/awaken/revive/rouse/wake up OBJ;bargain/deal/dicker/haggle/negotiate with OBJ;bathe/swim/wade OBJ;bathe/swim/wade in OBJ;bathe/swim/wade over OBJ;bathe/swim/wade through OBJ;bathe/swim/wade to OBJ;bathe/swim/wade under OBJ;bathe/swim/wade/dive down OBJ;bite/eat/swallow OBJ;blow OBJ;blow in OBJ;blow on OBJ;blow out OBJ;blow through OBJ;blow up OBJ;board/mount OBJ;bound/hurdle/jump/leap/vault OBJ;bound/hurdle/jump/leap/vault from OBJ;bound/hurdle/jump/leap/vault in OBJ;bound/hurdle/jump/leap/vault off OBJ;bound/hurdle/jump/leap/vault over OBJ;bound/hurdle/jump/leap/vault through OBJ;bound/hurdle/jump/leap/vault to OBJ;bound/hurdle/jump/leap/vault up OBJ;bow/genuflect/kneel before OBJ;bow/genuflect/kneel to OBJ;breathe in OBJ;breathe out OBJ;breathe/hypervent/inhale OBJ;browse/leaf/read/skim OBJ;browse/leaf/read/skim through OBJ;brush/clean/polish/smear/sweep/wipe OBJ;brush/clean/polish/smear/sweep/wipe off OBJ;burn/ignite/kindle OBJ;buy OBJ;bye/farewell/goodbye OBJ;call/hail OBJ;carry/catch/grab/keep/retrieve/seize/snatch/take down OBJ;carry/catch/grab/keep/retrieve/seize/snatch/take off OBJ;carry/catch/grab/keep/retrieve/seize/snatch/take up OBJ;cast off OBJ;chase/follow/pursue OBJ;check/describe/examine/inspect/see/study/survey/trace/x OBJ;check/describe/examine/inspect/see/study/survey/trace/x on OBJ;check/describe/examine/inspect/see/study/survey/trace/x/gaze/l/look/peek/peer/stare in OBJ;check/describe/examine/inspect/see/study/survey/trace/x/gaze/l/look/peek/peer/stare/frisk/ransack/rummage/search/sift for OBJ;chuck/fling/hurl/pitch/throw/toss OBJ;chuck/fling/hurl/pitch/throw/toss away OBJ;clear/empty OBJ;clear/empty off OBJ;clear/empty/shake out OBJ;climb through OBJ;climb under OBJ;climb/get in OBJ;climb/get on OBJ;climb/go/bound/hurdle/jump/leap/vault/hike/proceed/step/trudge/walk out OBJ;climb/go/scale/hike/proceed/step/trudge/walk down OBJ;climb/go/scale/hike/proceed/step/trudge/walk up OBJ;climb/scale over OBJ;climb/scale/ascend OBJ;close/slam/rotate/toggle/turn/twist/shut off OBJ;close/slam/shut OBJ;clue/clues/help/hint/hints/invisiclu off OBJ;count/tally OBJ;crouch/settle/sit/squat OBJ;crouch/settle/sit/squat at OBJ;crouch/settle/sit/squat down OBJ;crouch/settle/sit/squat in OBJ;crouch/settle/sit/squat on OBJ;cry/howl/scream/shout/yell OBJ;cry/howl/scream/shout/yell at OBJ;cry/howl/scream/shout/yell to OBJ;depart/exit/scram/withdraw/leave/disembark OBJ;descend OBJ;detonate/explode OBJ;discover/find/seek OBJ;disembark from OBJ;disembark out OBJ;dislocate/move/roll/shift OBJ;disrobe/strip/undress OBJ;dive OBJ;dive in OBJ;dive over OBJ;dive under OBJ;don/wear OBJ;douse/extinguis/quench/snuff OBJ;drag/pull/tug/yank OBJ;drag/pull/tug/yank on OBJ;drag/pull/tug/yank out OBJ;dress OBJ;drink/guzzle/imbibe/quaff/sip/swill OBJ;drink/guzzle/imbibe/quaff/sip/swill from OBJ;drive OBJ;drive to OBJ;drop/dump OBJ;elevate/hoist/lift/raise OBJ;elevate/hoist/lift/raise up OBJ;embark on OBJ;embark/enter OBJ;employ/exploit/operate/use OBJ;escape/flee OBJ;escape/flee from OBJ;exhale OBJ;extend/unflatten/unfold OBJ;fasten/secure/tie OBJ;feel/grope/reach in OBJ;fiddle/play/toy OBJ;fiddle/play/toy with OBJ;fire/shoot OBJ;flip OBJ;fly OBJ;fly on OBJ;fly over OBJ;fly with OBJ;focus on OBJ;focus/adjust OBJ;fold/wrap OBJ;fold/wrap out OBJ;fold/wrap up OBJ;foo OBJ;frisk/ransack/rummage/search/sift OBJ;frisk/ransack/rummage/search/sift in OBJ;frisk/ransack/rummage/search/sift through OBJ;gaze/l/look/peek/peer/stare OBJ;gaze/l/look/peek/peer/stare around OBJ;gaze/l/look/peek/peer/stare at OBJ;gaze/l/look/peek/peer/stare behind OBJ;gaze/l/look/peek/peer/stare down OBJ;gaze/l/look/peek/peer/stare on OBJ;gaze/l/look/peek/peer/stare out OBJ;gaze/l/look/peek/peer/stare over OBJ;gaze/l/look/peek/peer/stare through OBJ;gaze/l/look/peek/peer/stare to OBJ;gaze/l/look/peek/peer/stare up OBJ;gaze/l/look/peek/peer/stare/frisk/ransack/rummage/search/sift under OBJ;get down OBJ;get off OBJ;get out OBJ;get under OBJ;get up OBJ;get/carry/catch/grab/keep/retrieve/seize/snatch/take OBJ;go/get/back/retreat/escape/flee/hike/proceed/step/trudge/walk away OBJ;go/hike/proceed/step/trudge/walk OBJ;go/hike/proceed/step/trudge/walk around OBJ;go/hike/proceed/step/trudge/walk behind OBJ;go/hike/proceed/step/trudge/walk in OBJ;go/hike/proceed/step/trudge/walk over OBJ;go/hike/proceed/step/trudge/walk through OBJ;go/hike/proceed/step/trudge/walk to OBJ;go/hike/proceed/step/trudge/walk under OBJ;go/retreat/hike/proceed/step/trudge/walk from OBJ;go/rise/stand/bound/hurdle/jump/leap/vault/hike/proceed/step/trudge/walk on OBJ;greet/greetings/hello/hi/salute OBJ;grope/reach through OBJ;guess OBJ;haul OBJ;haul in OBJ;haul up OBJ;hear OBJ;hide OBJ;hide behind OBJ;hide in OBJ;hide/rise/stand under OBJ;hold OBJ;hold down OBJ;hold on OBJ;hold up OBJ;hold/carry/catch/grab/keep/retrieve/seize/snatch/take/drag/pull/tug/yank apart OBJ;insert/lay/place/put/stuff down OBJ;insert/lay/place/put/stuff on OBJ;insert/lay/place/put/stuff out OBJ;kick OBJ;kick around OBJ;kick down OBJ;kick in OBJ;kiss/smooch OBJ;knock/pound/rap/tap at OBJ;knock/pound/rap/tap on OBJ;land OBJ;launch OBJ;lean on OBJ;let go OBJ;lick/taste OBJ;lie down OBJ;lie in OBJ;lie on OBJ;light OBJ;listen OBJ;listen for OBJ;listen in OBJ;listen to OBJ;loiter/stay/wait/z OBJ;loiter/stay/wait/z for OBJ;loiter/stay/wait/z until OBJ;lower OBJ;make OBJ;make up OBJ;nap/rest/sleep/snooze in OBJ;nap/rest/sleep/snooze on OBJ;nudge/press/push/shove/stick/thrust OBJ;nudge/press/push/shove/stick/thrust in OBJ;nudge/press/push/shove/stick/thrust off OBJ;nudge/press/push/shove/stick/thrust on OBJ;nudge/press/push/shove/stick/thrust/drag/pull/tug/yank down OBJ;nudge/press/push/shove/stick/thrust/drag/pull/tug/yank up OBJ;observe/watch OBJ;open OBJ;open up OBJ;paddle OBJ;paddle with OBJ;password OBJ;pet/pat/feel/disturb/scratch/touch OBJ;pick OBJ;pick up OBJ;pinch/steal OBJ;pocket OBJ;point OBJ;point at OBJ;point to OBJ;pour/spill/sprinkle OBJ;pour/spill/sprinkle out OBJ;proclaim/say/speak/talk/utter OBJ;proclaim/say/speak/talk/utter to OBJ;refuse OBJ;remove OBJ;rent OBJ;replace OBJ;reply/respond to OBJ;retract OBJ;ride OBJ;ride in OBJ;ride on OBJ;ring OBJ;rip/tear off OBJ;rise/stand in OBJ;rise/stand up OBJ;rob OBJ;rotate/toggle/turn/twist around OBJ;rotate/toggle/turn/twist down OBJ;rotate/toggle/turn/twist on OBJ;rotate/toggle/turn/twist over OBJ;rotate/toggle/turn/twist through OBJ;rotate/toggle/turn/twist to OBJ;rotate/toggle/turn/twist up OBJ;save/clue/clues/help/hint/hints/invisiclu OBJ;set off OBJ;shut up OBJ;slide OBJ;slide/bound/hurdle/jump/leap/vault down OBJ;smell/sniff/whiff OBJ;smoke OBJ;spin/whirl OBJ;start OBJ;stop OBJ;swing OBJ;swing on OBJ;tell OBJ;tell about OBJ;thank/thanks OBJ;tip OBJ;translate OBJ;undo/unfasten/unhook/untie OBJ;wave OBJ;wave/grin/laugh/motion/nod/smile/sneer at OBJ;wave/grin/laugh/motion/nod/smile/sneer to OBJ;weigh OBJ;what/what's/whats OBJ;what/what's/whats about OBJ;where/where's/wheres OBJ;who/who's/whos OBJ;wind OBJ;wind up OBJ;ask/interroga/query/question OBJ about OBJ;ask/interroga/query/question OBJ for OBJ;blind/jab/poke OBJ with OBJ;block/cover/shield OBJ with OBJ;block/cover/shield over OBJ with OBJ;block/cover/shield up OBJ with OBJ;break/damage/destroy/erase/smash/trash/wreck OBJ off OBJ;break/damage/destroy/erase/smash/trash/wreck OBJ with OBJ;break/damage/destroy/erase/smash/trash/wreck down OBJ with OBJ;break/damage/destroy/erase/smash/trash/wreck in OBJ with OBJ;break/damage/destroy/erase/smash/trash/wreck through OBJ with OBJ;bribe/entice/pay OBJ to OBJ;bribe/entice/pay OBJ with OBJ;browse/leaf/read/skim OBJ OBJ;browse/leaf/read/skim OBJ through OBJ;browse/leaf/read/skim OBJ to OBJ;browse/leaf/read/skim OBJ with OBJ;brush/clean/polish/smear/sweep/wipe OBJ off OBJ;brush/clean/polish/smear/sweep/wipe OBJ on OBJ;brush/clean/polish/smear/sweep/wipe OBJ over OBJ;brush/clean/polish/smear/sweep/wipe off OBJ on OBJ;brush/clean/polish/smear/sweep/wipe off OBJ over OBJ;burn/ignite/kindle OBJ with OBJ;burn/ignite/kindle down OBJ with OBJ;burn/ignite/kindle up OBJ with OBJ;buy OBJ from OBJ;buy OBJ with OBJ;call/hail OBJ with OBJ;carry/catch/grab/keep/retrieve/seize/snatch/take OBJ to OBJ;check/describe/examine/inspect/see/study/survey/trace/x OBJ through OBJ;check/describe/examine/inspect/see/study/survey/trace/x OBJ with OBJ;chop/cut/slash down OBJ with OBJ;chop/cut/slash off OBJ with OBJ;chop/cut/slash through OBJ with OBJ;chop/cut/slash up OBJ with OBJ;chuck/fling/hurl/pitch/throw/toss OBJ OBJ;chuck/fling/hurl/pitch/throw/toss OBJ at OBJ;chuck/fling/hurl/pitch/throw/toss OBJ down OBJ;chuck/fling/hurl/pitch/throw/toss OBJ in OBJ;chuck/fling/hurl/pitch/throw/toss OBJ off OBJ;chuck/fling/hurl/pitch/throw/toss OBJ on OBJ;chuck/fling/hurl/pitch/throw/toss OBJ over OBJ;chuck/fling/hurl/pitch/throw/toss OBJ through OBJ;chuck/fling/hurl/pitch/throw/toss OBJ to OBJ;clear/empty OBJ from OBJ;clear/empty OBJ out OBJ;clear/empty out OBJ from OBJ;clear/empty/pour/spill/sprinkle OBJ in OBJ;clear/empty/pour/spill/sprinkle OBJ on OBJ;clear/empty/pour/spill/sprinkle out OBJ on OBJ;clear/empty/shake/pour/spill/sprinkle out OBJ in OBJ;conceal OBJ in OBJ;conceal/hide OBJ behind OBJ;conceal/hide OBJ under OBJ;deliver/give/hand/lend/loan/offer OBJ to OBJ;deliver/give/hand/lend/loan/offer/bribe/entice/pay OBJ OBJ;diagnose OBJ with OBJ;display/show OBJ OBJ;display/show OBJ to OBJ;disturb/scratch/touch OBJ with OBJ;drag/pull/tug/yank OBJ from OBJ;drag/pull/tug/yank OBJ out OBJ;drag/pull/tug/yank OBJ with OBJ;drag/pull/tug/yank down OBJ with OBJ;drag/pull/tug/yank on OBJ with OBJ;drag/pull/tug/yank up OBJ with OBJ;drop/dump OBJ down OBJ;drop/dump OBJ in OBJ;drop/dump OBJ on OBJ;employ/exploit/operate/use OBJ on OBJ;fasten/secure/tie OBJ to OBJ;fasten/secure/tie OBJ with OBJ;fasten/secure/tie up OBJ with OBJ;feed OBJ OBJ;feed OBJ to OBJ;feed OBJ with OBJ;fell/chop/cut/slash OBJ with OBJ;fill OBJ at OBJ;fill OBJ with OBJ;fire/shoot OBJ at OBJ;fire/shoot OBJ with OBJ;fix up OBJ with OBJ;fix/repair/service OBJ with OBJ;focus OBJ at OBJ;focus OBJ on OBJ;fold/wrap OBJ in OBJ;fold/wrap/wind OBJ around OBJ;fold/wrap/wind up OBJ in OBJ;force/wedge OBJ in OBJ;force/wedge/elevate/hoist/lift/raise up OBJ with OBJ;force/wedge/nudge/press/push/shove/stick/thrust/dislocate/move/roll/shift/elevate/hoist/lift/raise OBJ with OBJ;free/release/unjam OBJ from OBJ;free/release/unjam OBJ with OBJ;gaze/l/look/peek/peer/stare at OBJ through OBJ;gaze/l/look/peek/peer/stare at OBJ with OBJ;gaze/l/look/peek/peer/stare in OBJ through OBJ;gaze/l/look/peek/peer/stare in OBJ with OBJ;gaze/l/look/peek/peer/stare through OBJ at OBJ;get/bring OBJ OBJ;get/bring OBJ for OBJ;get/bring OBJ to OBJ;get/carry/catch/grab/keep/retrieve/seize/snatch/take OBJ from OBJ;get/carry/catch/grab/keep/retrieve/seize/snatch/take OBJ in OBJ;get/carry/catch/grab/keep/retrieve/seize/snatch/take OBJ off OBJ;get/carry/catch/grab/keep/retrieve/seize/snatch/take OBJ on OBJ;get/carry/catch/grab/keep/retrieve/seize/snatch/take OBJ out OBJ;get/carry/catch/grab/keep/retrieve/seize/snatch/take OBJ with OBJ;grope/reach OBJ with OBJ;grope/reach for OBJ with OBJ;grope/reach out OBJ with OBJ;grope/reach to OBJ with OBJ;heat/warm OBJ over OBJ;heat/warm OBJ with OBJ;hit/slap/swat/whack at OBJ with OBJ;hit/slap/swat/whack/knock/pound/rap/tap OBJ with OBJ;hold OBJ in OBJ;hold OBJ on OBJ;hold OBJ over OBJ;hold OBJ up OBJ;hold/disturb/scratch/touch OBJ to OBJ;hold/insert/lay/place/put/stuff OBJ against OBJ;hook/jiggle/loosen/wiggle/wobble OBJ with OBJ;illuminat OBJ with OBJ;insert/lay/place/put/stuff OBJ behind OBJ;insert/lay/place/put/stuff OBJ down OBJ;insert/lay/place/put/stuff OBJ in OBJ;insert/lay/place/put/stuff OBJ on OBJ;insert/lay/place/put/stuff OBJ over OBJ;insert/lay/place/put/stuff OBJ through OBJ;insert/lay/place/put/stuff OBJ under OBJ;kill/murder/punch/stab/wound/attack/fight/hurt OBJ with OBJ;knock/pound/rap/tap down OBJ with OBJ;knock/pound/rap/tap out OBJ with OBJ;leave OBJ in OBJ;leave OBJ on OBJ;let OBJ go OBJ;light OBJ from OBJ;light OBJ with OBJ;light up OBJ from OBJ;light up OBJ with OBJ;listen to OBJ through OBJ;listen to OBJ with OBJ;lock OBJ with OBJ;make OBJ OBJ;melt OBJ with OBJ;nudge/press/push/shove/stick/thrust OBJ at OBJ;nudge/press/push/shove/stick/thrust OBJ in OBJ;nudge/press/push/shove/stick/thrust OBJ on OBJ;nudge/press/push/shove/stick/thrust OBJ over OBJ;nudge/press/push/shove/stick/thrust OBJ under OBJ;nudge/press/push/shove/stick/thrust down OBJ with OBJ;nudge/press/push/shove/stick/thrust on OBJ with OBJ;nudge/press/push/shove/stick/thrust/dislocate/move/roll/shift/drag/pull/tug/yank OBJ OBJ;nudge/press/push/shove/stick/thrust/dislocate/move/roll/shift/drag/pull/tug/yank OBJ to OBJ;observe/watch OBJ through OBJ;observe/watch OBJ with OBJ;open OBJ with OBJ;open up OBJ with OBJ;paddle OBJ with OBJ;pick OBJ with OBJ;pinch/steal OBJ from OBJ;pinch/steal OBJ out OBJ;point at OBJ for OBJ;point out OBJ to OBJ;point to OBJ for OBJ;point/aim OBJ at OBJ;point/aim OBJ to OBJ;point/aim at OBJ with OBJ;pour/spill/sprinkle OBJ from OBJ;pour/spill/sprinkle OBJ out OBJ;proclaim/say/speak/talk/utter OBJ to OBJ;remove OBJ from OBJ;remove OBJ in OBJ;remove OBJ on OBJ;remove OBJ with OBJ;rip/tear through OBJ with OBJ;rip/tear up OBJ with OBJ;rob OBJ from OBJ;rotate/toggle/turn/twist OBJ with OBJ;rotate/toggle/turn/twist/spin/whirl OBJ OBJ;rotate/toggle/turn/twist/spin/whirl/set/dial OBJ to OBJ;row OBJ OBJ;rub OBJ with OBJ;sell OBJ OBJ;sell OBJ to OBJ;set OBJ at OBJ;shake OBJ with OBJ;shake/pour/spill/sprinkle out OBJ from OBJ;shine OBJ at OBJ;shine OBJ in OBJ;shine OBJ on OBJ;shine OBJ over OBJ;shine in OBJ with OBJ;shine on OBJ with OBJ;shine over OBJ with OBJ;slide OBJ down OBJ;slide OBJ in OBJ;slide OBJ to OBJ;slide OBJ under OBJ;start OBJ with OBJ;strike OBJ with OBJ;swing OBJ at OBJ;tell OBJ about OBJ;unlock OBJ with OBJ;unscrew OBJ from OBJ;unscrew OBJ out OBJ;wind OBJ in OBJ;work on OBJ with OBJ;",
"max_word_length" : 9
}
snacktime = {
"name": "snacktime",
"rom": "snacktime.z8",
"seed" : 0,
"walkthrough" : "w/pull cloth/close cold box/e/push pet/bark/lick pet/x box/x couch/x soft things/take cushion/w/pull cloth/get can/close cold box/e/show can to pet/get on couch/l/take wand/d/push wand/push wand/push wand/z/z/z/n/take dino/s/w/drop dino/z/z",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;beg;bother/curses/darn/drat;carry/hold/take inventory;change/switch channel;channel surf;chase/get tail;chase/spin;damn/fuck/shit;get out/off/up;go/leave/run/walk;go/leave/run/walk potty;growl/bark at dino;growl/bark at dinosaur;hop/jump/skip;hop/jump/skip on him;hop/jump/skip on man;hop/jump/skip on master;hop/jump/skip on owner;hop/jump/skip on pet;hop/jump/skip up;hop/jump/skip/get down;howl/sing;i/inv/inventory;l/look;lay/lie down;listen;long/verbose;love/kiss on dino;love/kiss on dinosaur;love/kiss/embrace/hug dino;love/kiss/embrace/hug dinosaur;make sandwich;make snack;nap/sleep;no;nod/shake head;normal/brief;notify;notify off;notify on;off/exit/out/stand;paw/shake;play dead;pronouns/nouns;q/quit;restart;restore;roll over;rollover;save;score;scratch;scratch him;scratch man;scratch master;scratch me;scratch myself;scratch owner;scratch pet;scratch player;scratch self;shake hand;shake hands;shake him;shake man;shake master;shake me;shake owner;shake paw;shake pet;shake self;shake yourself;short/superbrie;sit;sit down;smell/sniff;snack;sorry;spin/rotate/screw/turn/twist/unscrew around;stand up;switch show;talk to dino;talk to dinosaur;talk to friend;think;transcrip/script;transcrip/script off;transcrip/script on;urinate/poop/poo/pee;use toilet;verify;version;wag my tail;wag/shake tail;wait/z;walkthru/walkthrou/info/about/hints/hint/help;wave;whimper/whine/growl/talk/answer/say/shout/speak;y/yes;yap/yip/bark;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;bark at OBJ;bark to OBJ;bite/chew OBJ;bite/chew on OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale on OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/leave/run/walk OBJ;dig in OBJ;dig/search OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;feel/touch OBJ;fetch/get/carry/hold/take OBJ;get in/into/on/onto OBJ;get off OBJ;go/leave/run/walk into/in/inside/through OBJ;growl at OBJ;hear OBJ;hop/jump/skip on OBJ;hop/jump/skip onto OBJ;hop/jump/skip up on OBJ;hop/jump/skip up onto OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;listen to OBJ;love/kiss on OBJ;love/kiss/embrace/hug OBJ;open/uncover/unwrap OBJ;pick OBJ up;pick up OBJ;play with OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;scratch OBJ;shake OBJ;sit on top of OBJ;sit on/in/inside OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;talk to OBJ;taste OBJ;urinate/poop/poo/pee in OBJ;urinate/poop/poo/pee on OBJ;use OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;carry/hold/take OBJ off OBJ;consult OBJ on/about OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/unwrap OBJ with OBJ;use OBJ on OBJ;",
"max_word_length" : 9
}
sorcerer = {
"name": "sorcerer",
"rom": "sorcerer.z3",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "WAIT/FROTZ BOOK/GET UP/W/READ NOTE/DROP IT/W/OPEN DESK/GET BOX AND JOURNAL/LOOK BEHIND TAPESTRY/GET KEY/OPEN JOURNAL/READ IT/DROP JOURNAL AND KEY/E/S/S/W/GET VIAL, CALENDAR AND MATCHBOOK/E/OPEN RECEPTACLE/READ MATCHBOOK/PUT IT IN RECEPTACLE/E/GET SCROLL/READ IT/GNUSTO MEEF SPELL/W/D/PRESS white, gray, black, red, black/GET SCROLL/READ IT/OPEN VIAL/DRINK OCHRE POTION/DROP VIAL/U/OPEN RECEPTACLE/GET ORANGE VIAL/N/W/GET SCROLL/READ IT/GNUSTO GASPAR SPELL/AIMFIZ BELBOZ/NE/E/NE/LEARN PULVER/PULVER RIVER/D/NE/GET GUANO, SCROLL AND AMBER VIAL/READ SCROLL/GNUSTO FWEEP SPELL/D/S/GET INDIGO VIAL/W/D/W/SW/SW/W/LEARN IZYUK SPELL/AGAIN/IZYUK ME/W/W/N/GET COIN/S/E/IZYUK ME/E/E/NE/NE/E/E/WAKE GNOME/GIVE COIN TO GNOME/SEARCH GNOME/E/E/N/N/SLEEP/LEARN FWEEP/LEARN FWEEP/LEARN FWEEP/LEARN FWEEP/LEARN FWEEP/DROP ALL/E/FWEEP ME/N/E/S/S/W/D/E/E/N/N/U/U/S/E/GET SCROLL/READ IT/PUT IT IN HOLE/FWEEP ME/W/W/S/E/D/D/W/W/U/U/N/N/D/E/FWEEP ME/S/E/N/D/W/S/W/U/W/WAIT/WAIT/GET ALL/S/S/E/OPEN BOX/DROP BOX/GET AMULET AND SCROLL/GNUSTO SWANZO SPELL/LEARN IZYUK/W/W/W/W/U/U/W/W/IZYUK ME/NE/SE/E/LOWER FLAG/EXAMINE IT/DROP CALENDAR/GET AQUA VIAL/E/LOOK IN CANNON/PUT GUANO IN CANNON/GET SCROLL/READ IT/W/W/LEARN IZYUK/IZYUK ME/NW/SW/W/D/D/S/S/SW/W/GIVE COIN TO GNOME/W/W/S/SLEEP/OPEN AQUA VIAL/DRINK POTION/DROP AQUA VIAL/GET BALL/THROW IT AT RABBIT/READ GLITTERING SCROLL/GNUSTO MALYON SPELL/N/E/E/NE/S/YONK MALYON SPELL/LEARN MALYON/MALYON DRAGON/S/OPEN ORANGE VIAL/FROTZ ME/E/DRINK POTION/THROW ALL BUT BOOK INTO LOWER CHUTE/GIVE BOOK TO TWIN/E/TURN DIAL TO 639/OPEN DOOR/E/GET ROPE/U/NW/GET TIMBER/NW/W/TIE ROPE TO TIMBER/PUT TIMBER ACROSS CHUTE/THROW ROPE INTO CHUTE/D/GET SHIMMERING SCROLL/READ IT/GOLMAC ME/OPEN LAMP/GET SMELLY SCROLL/D/WAIT/WAIT/SAY TO TWIN \"THE COMBINATION IS 639\"/D/WAIT/SLEEP/LEARN MEEF, MEEF AND SWANZO/DROP ALL/E/D/MEEF WEEDS/OPEN CRATE/GET SUIT/U/W/GET ALL/WEAR SUIT/NE/N/MEEF VINES/W/W/OPEN WHITE DOOR/VARDIK ME/SWANZO BELBOZ",
"grammar" : "advanc/go/hike/procee/run/step/tramp/trudge/walk;answer/reply/respon;bathe/swim/wade;bound/dive/hurdle/jump/leap/vault;brief;call/procla/say/talk/utter;cavort/gambol/hop/skip;chase/follow/pursue;concea/enscon/hide/secret/stash;damn/fuck/shit;depart/exit/withdr;diagno;enter;fly;fweep;gaspar;gaze/l/look/stare;greeti/hello/hi/saluta;help/hint/hints;howl/scream/shout/yell;i/invent;land;leave;nap/snooze/sleep;q/quit;restar;restor;rise/stand;save;score;script;spells;super/superb;t/time;thank/thanks;unscri;verbos;versio;vezza;wait/z;advanc/go/hike/procee/run/step/tramp/trudge/walk OBJ;advanc/go/hike/procee/run/step/tramp/trudge/walk around OBJ;advanc/go/hike/procee/run/step/tramp/trudge/walk down OBJ;advanc/go/hike/procee/run/step/tramp/trudge/walk in OBJ;advanc/go/hike/procee/run/step/tramp/trudge/walk on OBJ;advanc/go/hike/procee/run/step/tramp/trudge/walk throug OBJ;advanc/go/hike/procee/run/step/tramp/trudge/walk to OBJ;advanc/go/hike/procee/run/step/tramp/trudge/walk up OBJ;aimfiz OBJ;answer/reply/respon OBJ;assaul/attack/fight/hit/hurt/injure OBJ;awake/rouse/startl/surpri/wake OBJ;awake/rouse/startl/surpri/wake up OBJ;banish/drive/exorci OBJ;banish/drive/exorci away OBJ;banish/drive/exorci out OBJ;bathe/swim/wade in OBJ;beckon/brandi/motion/wave OBJ;beckon/brandi/motion/wave to OBJ;beckon/brandi/motion/wave/howl/scream/shout/yell at OBJ;bite OBJ;blow out OBJ;blow up OBJ;board/embark/ride OBJ;bound/dive/hurdle/jump/leap/vault across OBJ;bound/dive/hurdle/jump/leap/vault from OBJ;bound/dive/hurdle/jump/leap/vault in OBJ;bound/dive/hurdle/jump/leap/vault off OBJ;bound/dive/hurdle/jump/leap/vault/advanc/go/hike/procee/run/step/tramp/trudge/walk over OBJ;break/crack/damage/demoli/destro/smash/wreck OBJ;call/procla/say/talk/utter to OBJ;carry/catch/confis/get/grab/hold/seize/snatch/take OBJ;carry/catch/confis/get/grab/hold/seize/snatch/take off OBJ;carry/catch/confis/get/grab/hold/seize/snatch/take out OBJ;cast/incant/invoke OBJ;chase/follow/pursue OBJ;check/descri/examin/inspec/observ/study/survey/watch OBJ;check/descri/examin/inspec/observ/study/survey/watch on OBJ;check/descri/examin/inspec/observ/study/survey/watch/gaze/l/look/stare in OBJ;check/descri/examin/inspec/observ/study/survey/watch/gaze/l/look/stare/frisk/ransac/rummag/search for OBJ;climb/scale OBJ;climb/scale down OBJ;climb/scale over OBJ;climb/scale up OBJ;climb/scale/rest/sit/squat/carry/catch/confis/get/grab/hold/seize/snatch/take in OBJ;climb/scale/rest/sit/squat/carry/catch/confis/get/grab/hold/seize/snatch/take on OBJ;close/shut OBJ;combin/combo OBJ;concea/enscon/hide/secret/stash OBJ;concea/enscon/hide/secret/stash behind OBJ;concea/enscon/hide/secret/stash under OBJ;consum/devour/eat/gobble/ingest/nibble/taste OBJ;count/tally OBJ;cross/ford/traver OBJ;debark/disemb OBJ;defile/molest/rape/ravish OBJ;deflat OBJ;depart/exit/withdr OBJ;descen OBJ;dig/excava in OBJ;dig/excava throug OBJ;dig/excava with OBJ;discha/fire/shoot OBJ;disloc/displa/move/shift/drag/pull/shove/tug/yank OBJ;dispat/kill/murder/slay/stab/vanqui OBJ;doff/remove/shed OBJ;don/wear OBJ;douse/exting/quench OBJ;drag/pull/shove/tug/yank down OBJ;drag/pull/shove/tug/yank on OBJ;drink/guzzle/imbibe/quaff/sip/swallo/swill OBJ;drink/guzzle/imbibe/quaff/sip/swallo/swill from OBJ;drop/dump/releas OBJ;elevat/hoist/lift/raise OBJ;elevat/hoist/lift/raise up OBJ;enter OBJ;feel/pat/pet/rub/touch OBJ;fill OBJ;find/see/seek OBJ;flip/set/turn OBJ;flip/set/turn off OBJ;flip/set/turn on OBJ;fly OBJ;forget/unlear/unmemo OBJ;free/unatta/unfast/unhook/untie OBJ;frisk/ransac/rummag/search OBJ;frisk/ransac/rummag/search in OBJ;frotz OBJ;fweep OBJ;gaspar OBJ;gaze/l/look/stare OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare down OBJ;gaze/l/look/stare on OBJ;gaze/l/look/stare throug OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare up OBJ;gestur/point at OBJ;gestur/point to OBJ;gnusto OBJ;golmac OBJ;greeti/hello/hi/saluta OBJ;gyrate/rotate/spin/whirl OBJ;harken/listen for OBJ;harken/listen to OBJ;inflat OBJ;insert/lay/place/put/stuff down OBJ;insert/lay/place/put/stuff on OBJ;izyuk OBJ;jostle/rattle/shake OBJ;kick OBJ;kiss/smooch OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;know/learn/memori OBJ;launch OBJ;lean on OBJ;leave OBJ;lie/reclin/repose down OBJ;lie/reclin/repose on OBJ;light OBJ;lower OBJ;malyon OBJ;meef OBJ;nap/snooze/sleep in OBJ;nap/snooze/sleep on OBJ;nudge/press/push/thrust OBJ;nudge/press/push/thrust on OBJ;open OBJ;open up OBJ;pay OBJ;pick OBJ;pick up OBJ;play OBJ;polish/shine/wax OBJ;pour/spill/sprink OBJ;pulver OBJ;pump up OBJ;reach in OBJ;read/skim OBJ;read/skim about OBJ;rest/sit/squat at OBJ;rest/sit/squat down OBJ;rezrov OBJ;rise/stand on OBJ;rise/stand/carry/catch/confis/get/grab/hold/seize/snatch/take up OBJ;roll up OBJ;send for OBJ;slide OBJ;smell/sniff/whiff OBJ;spray OBJ;squeez OBJ;strike OBJ;swanzo OBJ;swing OBJ;tell OBJ;thank/thanks OBJ;tortur OBJ;vardik OBJ;vezza OBJ;wait/z for OBJ;what/whats OBJ;where/wheres OBJ;who/whos OBJ;yomin OBJ;yonk OBJ;aimfiz OBJ to OBJ;apply OBJ to OBJ;ask/interr/query/quiz OBJ about OBJ;ask/interr/query/quiz OBJ for OBJ;assaul/attack/fight/hit/hurt/injure OBJ with OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie up OBJ with OBJ;beckon/brandi/motion/wave OBJ at OBJ;bestow/donate/feed/give/hand/offer/presen OBJ OBJ;bestow/donate/feed/give/hand/offer/presen OBJ to OBJ;blind/jab/poke OBJ with OBJ;break/crack/damage/demoli/destro/smash/wreck OBJ with OBJ;break/crack/damage/demoli/destro/smash/wreck down OBJ with OBJ;burn/combus/ignite/kindle OBJ with OBJ;burn/combus/ignite/kindle down OBJ with OBJ;carry/catch/confis/get/grab/hold/seize/snatch/take OBJ from OBJ;carry/catch/confis/get/grab/hold/seize/snatch/take OBJ in OBJ;carry/catch/confis/get/grab/hold/seize/snatch/take OBJ off OBJ;carry/catch/confis/get/grab/hold/seize/snatch/take OBJ out OBJ;cast/incant/invoke OBJ at OBJ;cast/incant/invoke OBJ on OBJ;chuck/fling/hurl/pitch/throw/toss OBJ at OBJ;chuck/fling/hurl/pitch/throw/toss OBJ off OBJ;chuck/fling/hurl/pitch/throw/toss OBJ over OBJ;chuck/fling/hurl/pitch/throw/toss OBJ throug OBJ;cleave/cut/gash/lacera/sever/slash/slice/split OBJ with OBJ;cleave/cut/gash/lacera/sever/slash/slice/split throug OBJ with OBJ;clog/fix/glue/patch/plug/repair OBJ with OBJ;compar OBJ to OBJ;concea/enscon/hide/secret/stash OBJ from OBJ;dig/excava OBJ with OBJ;dig/excava in OBJ with OBJ;dispat/kill/murder/slay/stab/vanqui OBJ with OBJ;dissol/liquif/melt/thaw OBJ with OBJ;drop/dump/releas/chuck/fling/hurl/pitch/throw/toss/insert/lay/place/put/stuff OBJ down OBJ;drop/dump/releas/chuck/fling/hurl/pitch/throw/toss/insert/lay/place/put/stuff OBJ in OBJ;feel/pat/pet/rub/touch OBJ with OBJ;fill OBJ at OBJ;fill OBJ with OBJ;flip/set/turn OBJ for OBJ;flip/set/turn OBJ to OBJ;flip/set/turn OBJ with OBJ;free/unatta/unfast/unhook/untie OBJ from OBJ;gaze/l/look/stare at OBJ throug OBJ;gaze/l/look/stare up OBJ in OBJ;hone/sharpe OBJ with OBJ;insert/lay/place/put/stuff OBJ across OBJ;insert/lay/place/put/stuff OBJ behind OBJ;insert/lay/place/put/stuff OBJ on OBJ;insert/lay/place/put/stuff OBJ over OBJ;insert/lay/place/put/stuff OBJ under OBJ;light OBJ with OBJ;lock OBJ with OBJ;lower OBJ down OBJ;lower OBJ in OBJ;open OBJ with OBJ;pay OBJ OBJ;pay OBJ to OBJ;pay OBJ with OBJ;pick OBJ with OBJ;polish/shine/wax OBJ with OBJ;pour/spill/sprink OBJ from OBJ;pour/spill/sprink OBJ in OBJ;pour/spill/sprink OBJ on OBJ;pump up OBJ with OBJ;read/skim OBJ throug OBJ;read/skim about OBJ in OBJ;show OBJ OBJ;show OBJ to OBJ;slide/nudge/press/push/thrust OBJ OBJ;slide/nudge/press/push/thrust OBJ to OBJ;slide/nudge/press/push/thrust OBJ under OBJ;spray OBJ on OBJ;spray OBJ with OBJ;squeez/drop/dump/releas/chuck/fling/hurl/pitch/throw/toss OBJ on OBJ;strike OBJ with OBJ;swing OBJ at OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
spellbrkr = {
"name": "spellbrkr",
"rom": "spellbrkr.z3",
"seed" : 0,
"walkthrough" : "z/z/z/z/s/take bread/s/learn lesoch/cast lesoch/take cube/write 1 on cube/learn blorple/blorple 1/frotz burin/d/d/z/z/z/z/z/z/get stained scroll/read stained scroll/gnusto caskly/learn blorple/blorple 1/e/s/take zipper/open zipper/reach in zipper/look in zipper/take flimsy scroll/read flimsy scroll/learn blorple/blorple 1/s/take dirty scroll/read dirty scroll/gnusto throck/u/z/z/z/cast girgol/u/u/u/u/take coin/examine coin/w/learn caskly/caskly hut/take cube/e/put coin in zipper/put bread in zipper/put knife in zipper/write 2 on cube/learn blorple/blorple 2/s/pull weed/pull weed/learn blorple/blorple 1/w/n/learn yomin/yomin ogre/plant weed/learn throck/throck weed/d/take dusty scroll/take gold box/u/s/read dusty scroll/gnusto espnis/open box/take cube/put box in zipper/write 3 on cube/learn blorple/blorple 3/take bread from zipper/learn blorple/drop all except bread/s/drop bread/take cube/take bottle/blorple 3/open bottle/look in bottle/take damp scroll/read damp scroll/take all/gnusto liskon/take 1/n/sleep/enter outflow pipe/learn liskon/liskon self/enter inflow pipe/z/z/z/learn liskon/liskon me/z/z/learn liskon/liskon me/frotz bottle/learn blorple/drop all except bottle/take 3/enter outflow pipe/w/take cube/w/climb out of pipe/blorple 3/n/take all/put bottle in zipper/write 4 on cube/learn blorple/blorple 1/e/n/learn liskon/liskon snake/n/n/take cube/learn malyon/malyon idol/learn espnis/learn malyon/malyon idol/z/espnis idol/z/climb idol/look in mouth/take cube/d/write 5 on cube/learn blorple/blorple 5/n/take white scroll/learn blorple/blorple 5/w/read white scroll/gnusto tinsot/e/examine blue carpet/take coin from zipper/point at blue carpet/buy blue carpet/offer 300/offer 400/offer 500/take blue carpet/exit/learn blorple/blorple 3/learn tinsot/learn tinsot/learn tinsot/learn tinsot/put all in zipper/take burin/close zipper/n/rezrov door/tinsot channel/tinsot channel/tinsot channel/z/z/z/tinsot water/enter/u/take cube/open zipper/get book/write 6 on cube/e/n/rezrov cabinet/take moldy book/learn caskly/caskly moldy book/read moldy book/gnusto snavig/s/w/u/drop carpet/sit on carpet/u/w/w/w/w/d/get off carpet/pick up cube/sit on carpet/u/e/e/e/e/d/get off carpet/take carpet/write 7 on cube/d/learn blorple/blorple 3/sleep/learn snavig/learn blorple/put spell book in zipper/close zipper/s/get 3/snavig grouper/d/z/z/z/z/get all/u/blorple 3/sleep/write 8 on cube/open zipper/take spell book from zipper/n/learn blorple/blorple 8/w/learn tinsot/tinsot fragment/take fragment/put all in zipper except book/learn blorple/take 4/blorple 4/n/take compass rose/learn blorple/blorple 4/w/put compass rose in carving/take compass rose/n/touch nw rune with rose/nw/touch w rune with rose/w/touch ne rune with rose/ne/rezrov alabaster/w/take cube/take burin from zipper/write 9 on cube/learn blorple/blorple 9/s/take fragment from zipper/give fragment to green rock/sit on green rock/l/rock, n/rock, sw/rock, e/rock, s/rock, e/rock, e/jump to brown rock/get cube/write 10 on cube/learn blorple/blorple 10/d/learn snavig/drop all except 10, book/d/snavig grue/d/climb pillar/take cube/z/z/z/learn blorple/blorple 10/d/take all/write 11 on cube/learn blorple/blorple 11/n/take box/examine box/put 10 in box/take 10/throw box at outcropping/learn blorple/blorple 10/u/take box/take cube/write 12 on cube/put all in zipper/take book/take 7/learn blorple/blorple 7/s/ask belboz about me/berknip/ask belboz about cube/ask belboz about figure/take 9/learn blorple/blorple 9/e/rezrov door/put all except book in zipper/n/learn jindak/learn jindak/learn jindak/learn blorple/take x1,x2,x7,x8/jindak/put x1, x2, x7 on first pile/get x3, x4, x5, x6/put x8 on second pile/get x12/put x12 on first pile/l/jindak/drop all cubes/take x1, x2, x7, x12/drop all cubes/take x8, x9, x10, x11/drop all cubes/take x1, x10/put x1 in first/put x10 in second/jindak/take x10/blorple x10/d/take key from zipper/unlock cabinet with key/open cabinet/take vellum scroll/read vellum scroll/learn rezrov/learn blorple/learn blorple/learn girgol/put book in cabinet/close cabinet/lock cabinet with key/rezrov door/blorple x10/u/open sack/take flimsy scroll/take burin/copy flimsy scroll to vellum scroll/take sack/empty zipper into sack/put flimsy scroll in zipper/close zipper/drop zipper/take 12/blorple 12/e/z/z/z/take knife/z/z/z/z/z/z/cast girgol/take 12/put sack in tesseract/z",
"grammar" : "answer/reply/respon;bathe/swim/wade;brief;call/say/talk;cavort/gambol/hop/skip;chase/follow/pursue;concea/hide;damn/fuck/shit;debark/disemb;depart/exit/withdr;diagno;dive/jump/leap;enter;fly;gaze/l/look/stare;girgol;hello/hi;help/hint;i/invent;jindak;land;leave;lesoch;listen;lurk/drool/gurgle/saliva/slaver;move/shift/go/procee/run/step/walk;nap/snooze/sleep;no/nope;okay/y/yes;q/quit;restar;restor;rise/stand;save;score;scream/shout/yell;script;smell/sniff;snavig;spells;super/superb;t/time;thank/thanks;unscri;verbos;versio;wait/z;where;who;yawn;admire/compli OBJ;answer/reply/respon OBJ;ask/query/quiz about OBJ;ask/query/quiz for OBJ;assaul/attack/fight/hit/strike OBJ;awake/rouse/startl/surpri/wake OBJ;awake/rouse/startl/surpri/wake up OBJ;bargai/haggle with OBJ;bathe/swim/wade in OBJ;beckon/wave OBJ;beckon/wave to OBJ;beckon/wave/scream/shout/yell at OBJ;bite OBJ;blorpl OBJ;blow out OBJ;blow up OBJ;board/embark/ride OBJ;break/crack/destro/scratc/smash/wreck OBJ;bury/plant OBJ;call/say/talk to OBJ;carry/catch/get/grab/hold/snatch/take OBJ;carry/catch/get/grab/hold/snatch/take off OBJ;carry/catch/get/grab/hold/snatch/take out OBJ;caskly OBJ;cast/incant/invoke OBJ;chase/follow/pursue OBJ;check/descri/examin/inspec/observ/watch OBJ;check/descri/examin/inspec/observ/watch/gaze/l/look/stare in OBJ;check/descri/examin/inspec/observ/watch/gaze/l/look/stare on OBJ;check/descri/examin/inspec/observ/watch/gaze/l/look/stare/rummag/search for OBJ;climb/scale OBJ;climb/scale down OBJ;climb/scale off OBJ;climb/scale out OBJ;climb/scale over OBJ;climb/scale up OBJ;climb/scale/carry/catch/get/grab/hold/snatch/take on OBJ;climb/scale/medita/rest/sit/carry/catch/get/grab/hold/snatch/take in OBJ;close/shut/zip OBJ;concea/hide OBJ;concea/hide behind OBJ;concea/hide from OBJ;concea/hide in OBJ;concea/hide under OBJ;consum/eat/gobble/taste OBJ;copy OBJ;count OBJ;cross/ford/traver OBJ;debark/disemb OBJ;depart/exit/withdr OBJ;descen OBJ;dig/excava in OBJ;dig/excava throug OBJ;dig/excava with OBJ;dive/jump/leap across OBJ;dive/jump/leap down OBJ;dive/jump/leap from OBJ;dive/jump/leap in OBJ;dive/jump/leap off OBJ;dive/jump/leap on OBJ;dive/jump/leap to OBJ;dive/jump/leap/go/procee/run/step/walk over OBJ;drag/pull/tug down OBJ;drag/pull/tug on OBJ;drink/sip/swallo OBJ;drink/sip/swallo from OBJ;drop/dump/releas OBJ;empty/pour/spill OBJ;enter OBJ;erase OBJ;espnis OBJ;exting OBJ;feel/pat/pet/rub/touch OBJ;fill OBJ;find OBJ;fire/shoot OBJ;fit/insert/lay/place/put/stuff/wedge down OBJ;fit/insert/lay/place/put/stuff/wedge on OBJ;fix/patch/repair OBJ;flip/set/turn OBJ;flip/set/turn off OBJ;flip/set/turn on OBJ;flip/set/turn over OBJ;fly OBJ;forget/unlear OBJ;free/unatta/unfast/untie OBJ;frotz OBJ;gaze/l/look/stare OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare down OBJ;gaze/l/look/stare throug OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare up OBJ;gestur/point at OBJ;gestur/point to OBJ;girgol OBJ;gnusto OBJ;go/procee/run/step/walk OBJ;go/procee/run/step/walk around OBJ;go/procee/run/step/walk down OBJ;go/procee/run/step/walk in OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk throug OBJ;go/procee/run/step/walk to OBJ;go/procee/run/step/walk up OBJ;hello/hi OBJ;help/hint OBJ;inflat OBJ;inscri/print/scribe/write OBJ;inscri/print/scribe/write in OBJ;inscri/print/scribe/write on OBJ;jindak OBJ;kick OBJ;kill/murder/slay/stab OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;label OBJ;lean on OBJ;learn/memori OBJ;leave OBJ;lesoch OBJ;lie down OBJ;lie on OBJ;lift/raise OBJ;lift/raise up OBJ;liskon OBJ;listen for OBJ;listen to OBJ;lower OBJ;lurk behind OBJ;lurk in OBJ;malyon OBJ;medita/rest/sit at OBJ;medita/rest/sit down OBJ;medita/rest/sit on OBJ;move/shift/drag/pull/tug OBJ;nap/snooze/sleep in OBJ;nap/snooze/sleep on OBJ;open/unzip OBJ;open/unzip up OBJ;pay OBJ;pick OBJ;pick up OBJ;play OBJ;press/push/shove OBJ;press/push/shove on OBJ;pump up OBJ;rattle/shake OBJ;reach in OBJ;read OBJ;read about OBJ;remove/shed OBJ;rezrov OBJ;rise/stand on OBJ;rise/stand/carry/catch/get/grab/hold/snatch/take up OBJ;roll up OBJ;rotate/spin/whirl OBJ;rummag/search OBJ;rummag/search in OBJ;send for OBJ;smell/sniff OBJ;snavig OBJ;squeez OBJ;swing/thrust OBJ;tell OBJ;tell about OBJ;thank/thanks OBJ;throck OBJ;tinsot OBJ;tortur OBJ;trade OBJ;wait/z for OBJ;wear OBJ;what OBJ;where OBJ;who OBJ;yomin OBJ;apply OBJ to OBJ;ask/query/quiz OBJ about OBJ;ask/query/quiz OBJ for OBJ;assaul/attack/fight/hit/strike OBJ with OBJ;attach/fasten/tie OBJ to OBJ;attach/fasten/tie up OBJ with OBJ;beckon/wave OBJ at OBJ;blind/jab/poke OBJ with OBJ;block/clog/plug OBJ with OBJ;break/crack/destro/scratc/smash/wreck OBJ with OBJ;break/crack/destro/scratc/smash/wreck down OBJ with OBJ;burn/ignite OBJ with OBJ;burn/ignite down OBJ with OBJ;bury/plant OBJ in OBJ;buy/purcha OBJ from OBJ;buy/purcha OBJ with OBJ;carry/catch/get/grab/hold/snatch/take OBJ from OBJ;carry/catch/get/grab/hold/snatch/take OBJ in OBJ;carry/catch/get/grab/hold/snatch/take OBJ off OBJ;carry/catch/get/grab/hold/snatch/take OBJ out OBJ;cast/incant/invoke OBJ at OBJ;cast/incant/invoke OBJ on OBJ;compar OBJ to OBJ;compar OBJ with OBJ;concea/hide OBJ from OBJ;copy OBJ on OBJ;copy OBJ to OBJ;copy OBJ with OBJ;count OBJ in OBJ;cut/divide/prune/sever/slash/slice/split OBJ with OBJ;cut/divide/prune/sever/slash/slice/split throug OBJ with OBJ;dig/excava OBJ with OBJ;dig/excava in OBJ with OBJ;drop/dump/releas/fit/insert/lay/place/put/stuff/wedge OBJ down OBJ;drop/dump/releas/fit/insert/lay/place/put/stuff/wedge OBJ in OBJ;empty/pour/spill OBJ from OBJ;empty/pour/spill OBJ in OBJ;empty/pour/spill OBJ on OBJ;feed/give/hand/offer OBJ OBJ;feed/give/hand/offer OBJ for OBJ;feed/give/hand/offer OBJ to OBJ;feed/give/hand/offer OBJ with OBJ;feel/pat/pet/rub/touch OBJ to OBJ;feel/pat/pet/rub/touch OBJ with OBJ;fill OBJ at OBJ;fill OBJ with OBJ;fit/insert/lay/place/put/stuff/wedge OBJ across OBJ;fit/insert/lay/place/put/stuff/wedge OBJ behind OBJ;fit/insert/lay/place/put/stuff/wedge OBJ on OBJ;fit/insert/lay/place/put/stuff/wedge OBJ over OBJ;fit/insert/lay/place/put/stuff/wedge OBJ under OBJ;fix/patch/repair OBJ with OBJ;flip/set/turn OBJ to OBJ;flip/set/turn OBJ with OBJ;free/unatta/unfast/untie OBJ from OBJ;gaze/l/look/stare at OBJ throug OBJ;gaze/l/look/stare up OBJ in OBJ;hurl/pitch/throw/toss OBJ at OBJ;hurl/pitch/throw/toss OBJ down OBJ;hurl/pitch/throw/toss OBJ in OBJ;hurl/pitch/throw/toss OBJ off OBJ;hurl/pitch/throw/toss OBJ on OBJ;hurl/pitch/throw/toss OBJ over OBJ;hurl/pitch/throw/toss OBJ throug OBJ;hurl/pitch/throw/toss OBJ to OBJ;inscri/print/scribe/write OBJ in OBJ;inscri/print/scribe/write OBJ on OBJ;inscri/print/scribe/write in OBJ with OBJ;inscri/print/scribe/write on OBJ with OBJ;kill/murder/slay/stab OBJ with OBJ;label OBJ with OBJ;lock OBJ with OBJ;lower OBJ down OBJ;lower OBJ in OBJ;melt/thaw OBJ with OBJ;open/unzip OBJ with OBJ;pay OBJ OBJ;pay OBJ to OBJ;pay OBJ with OBJ;pick OBJ with OBJ;press/push/shove OBJ OBJ;press/push/shove OBJ to OBJ;pry OBJ with OBJ;pry out OBJ with OBJ;pump up OBJ with OBJ;rattle/shake OBJ at OBJ;read OBJ throug OBJ;read about OBJ in OBJ;sell OBJ OBJ;sell OBJ to OBJ;sharpe OBJ with OBJ;show OBJ OBJ;show OBJ to OBJ;slide/press/push/shove OBJ under OBJ;squeez/drop/dump/releas OBJ on OBJ;swing/thrust OBJ at OBJ;tell OBJ OBJ;tell OBJ about OBJ;trade OBJ for OBJ;trade OBJ with OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
spirit = {
"name": "spirit",
"rom": "spirit.z5",
"seed" : 0,
"walkthrough" : "z/z/z/z/z/read note/take pallet/n/w/w/take sand/w/n/n/wear amulet/read prayer/learn frotz/cast frotz on amulet/learn frotz/cast frotz on amulet/s/s/e/e/e/e/e/s/take scroll/read scroll/gnusto espnis/read diary/n/w/s/read page/n/n/read journal/push west wall/w/take scroll/read scroll/gnusto foblub/read faded/e/s/w/w/u/e/ne/open window/sw/e/read scriptures/consult scriptures about planes/w/w/d/d/e/n/read paper/s/e/take flour/move barrel/open trapdoor/w/w/u/w/w/s/s/drop all/drop amulet/e/u/w/yell/take scroll/read scroll/enter window/sw/w/d/w/w/s/s/take all/wear amulet/gnusto swanko/s/s/w/d/put pallet on boulder/u/climb tree/shake branch/d/d/take egg/u/e/s/s/throw sand/s/s/s/s/se/push dusty s/n/push moldy s/n/push filthy s/push dusty s/n/push moldy s/n/push filthy s/n/n/take key/x floorboards/move floorboards/s/s/push dusty n/s/push moldy n/s/push filthy n/push filthy n/s/push dusty s/n/push moldy s/n/n/unlock icebox with square key/open icebox/open blue box/take scroll/read scroll/gnusto zemdor/learn zemdor/zemdor blue box/open royal-blue box/take wrapper/open wrapper/read notice/drop notice/open navy-blue box/take whistle/take crunchy cereal/take packet/take butter/hit board/u/s/s/push moldy n/s/push dusty n/n/nw/sw/search couch/take coin/x clock/turn knob/turn knob/turn knob/turn knob/turn knob/turn knob/take clock/take ledger/read ledger/read subway/open door/s/ask governor about key/take key/n/ne/s/d/put coin in slot/se/z/e/learn espnis/spells/espnis thug/search seat/take newspaper/read newspaper/read news in newspaper/read sports in newspaper/read features in newspaper/drop newspaper/w/d/z/z/s/search seat/z/n/n/u/e/e/e/se/e/touch waterfall/e/put egg in pot/put flour in pot/put butter in pot/put packet in pot/put whistle in pot/put shiny in pot/put cereal in pot/put volcano in pot/put clock in pot/put ledger in pot/take pot/take small key/w/n/take glasses/search fountain/s/w/s/unlock toolshed with small key/drop small key/drop glasses/open toolshed/take all from toolshed/wear gloves/wear cap/read cracked/drop cracked/n/n/clip monster/drop cap/drop clipper/s/nw/w/s/look in telescope/open telescope/take scroll/read scroll/gnusto fiznav/n/w/s/ask delbin about ale/ask morgan about dragon/offer coin to delbin/learn zemdor/zemdor ale/take rag/take headdress/put rag in pot/put headdress in pot/n/w/d/put coin in slot/s/z/z/s/z/z/n/u/w/learn foblub/foblub yupple/search seat/z/z/z/z/z/z/z/e/ne/u/n/take scroll/read scroll/gnusto gloth/w/n/n/w/take flat key/e/e/read history book/look up index in history book/look up trophy in history book/w/n/w/search mess/take all/put cube in pot/put decaf in pot/e/n/w/unlock northern door with flat key/drop flat key/open northern door/n/open book/read book/take card/s/e/n/n/turn on switch/open notebook/take term paper/read term paper/s/s/s/e/take scroll/read scroll/w/show card to librarian/drop card/w/gnusto taclor/n/e/e/e/read journal/w/read research paper/w/w/s/s/s/s/e/s/d/put coin in slot/sw/z/z/z/z/z/z/w/z/z/z/z/z/z/e/ne/u/ne/ne/n/z/show term paper to frobar/read scroll/gnusto feeyuk/e/read journal/read twisted/w/s/sw/sw/w/take toy/put toy in pot/s/take coin/read sign/take clock/pull knob/turn knob/push knob/turn knob/turn knob/read scroll/gnusto wigro/drop clock/n/w/sw/sw/take scroll/read muddy scroll/gnusto throck/s/s/x natives/x shaman/take headdress/give headdress to shaman/n/read map/search grass/drop map/n/ne/ne/e/e/d/put coin in slot/sw/z/z/z/w/z/z/z/e/ne/u/take umbrella/se/s/z/l/take coin/s/e/e/d/take scroll/read gray scroll/gnusto tossio/give gloves to zombie/take wire/u/w/w/n/n/nw/d/put coin in slot/put wire in slot/drop wire/sw/d/s/z/z/z/z/z/z/n/u/e/z/z/z/z/z/z/z/z/z/z/w/nw/u/n/w/take rag/ask skier about scroll/give rag to skier/take scroll/read waxy scroll/gnusto egdelp/e/s/d/put coin in slot/se/e/z/z/z/z/z/z/w/nw/u/sw/sw/w/take scroll/read spotted scroll/gnusto bekdab/learn bekdab/gnusto bekdab/x hermit/take cereal/give cereal to hermit/ask hermit about exit/ask hermit about entrance/s/read parchment/drop parchment/ne/ask farmer about oyster/e/e/se/nw/w/take sack/x boat/enter boat/take volcano/s/drop volcano in water/n/exit/n/s/x boat/learn fiznav/fiznav boat/enter boat/s/ne/exit/learn swanko/e/swanko spirit/take gray rod/put gray rod in pot/w/enter boat/sw/n/exit/w/put silver sphere in pot/ne/n/read memo/open fur sack/put coins in fur sack/take all from basis/put coins in fur sack/take coin from basis/put coin in interest/take coin from basis/put coin in loans/take seven coins from basis/put six coins in overhead/s/d/put coin in slot/se/e/z/z/w/d/z/z/z/z/z/z/s/z/z/n/n/u/e/e/e/se/n/learn throck/throck shrub/take dornberries/put dornberries in pot/s/nw/w/nw/read sign/n/n/learn foblub/foblub man/n/take scroll/read pale scroll/gnusto shazok/n/learn egdelp/egdelp golem/learn tossio/tossio golem/learn bekdab/bekdab golem/learn taclor/taclor me/s/x coach/ask coach for trophy/s/s/s/s/sw/s/ask delbin about minirva/n/w/d/take coin/put coin in slot/s/z/s/z/z/z/z/z/z/z/z/z/z/n/u/ne/u/se/ne/x treant/gnusto shazok/learn shazok/shazok storm/put red sphere in pot/sw/s/s/e/e/e/n/take pan/s/take sticker/read sticker/drop sticker/take flour from pot/put flour in pan/take butter from pot/put butter in pan/take packet from pot/put packet in pan/take sugar from pot/put sugar in pan/take egg from pot/put egg in pan/take berries from pot/put berries in pan/mix pan/learn gloth/gloth dough/open oven/put pan in oven/close oven/turn dial/turn dial/turn dial/turn dial/turn dial/turn dial/turn dial/turn dial/push button/open oven/take pan/w/w/w/n/n/nw/d/take coin/take wire/put coin in slot/put wire in slot/drop wire/sw/d/z/z/z/z/s/z/z/z/z/z/z/n/u/nw/u/s/read sign/give pan to guard/s/w/read sign/d/learn feeyuk/feeyuk me/learn feeyuk/feeyuk me/take ebony/w/w/s/s/take ebony/w/e/take balsa beam from northern niche/put ebony in northern niche/n/n/take ebony/w/e/take balsa beam/put ebony in southern niche/s/s/take ebony/take beam from southern niche/put ebony in southern niche/s/s/take beam from eastern niche/put ebony in eastern niche/learn swanko/e/e/swanko spirit/take smoke rod/put smoke rod in pot/enter hole/u/e/s/e/take key/unlock cabinet with shiny key/drop shiny key/open cabinet/learn egdelp/egdelp cabinet/open cabinet/look in cabinet/take crumpled scroll/read crumpled scroll/gnusto ledak/read warped/d/s/take ledger/show ledger to magistrate/x sydney/take toy/give toy to sydney/ask sydney for hat/put brown sphere in pot/drop red hat/n/n/e/s/take vase/n/n/n/x painting/take whistle/blow whistle/drop whistle/take door/s/w/n/n/d/take coin/put coin in slot/se/d/z/z/z/z/z/s/z/z/z/z/z/z/n/u/w/z/z/e/ne/u/n/e/learn ledak/drop door/open door/e/u/n/w/read journal/w/ledak painting/take painting/e/e/s/d/w/search painting/read white scroll/gnusto huncho/drop painting/w/n/n/nw/n/take coin/n/n/drop balsa wood beam/take red flag/take red flag/put red flag on speckled/put red flag on spotted/x cage/pull lever/yes/n/nw/learn taclor/taclor kobold/take green sphere/put green sphere in pot/se/ne/n/n/e/e/e/read torn/w/n/n/u/e/put trophy in depression/take trophy/w/w/put trophy in depression/take trophy/e/s/put trophy in depression/take trophy/n/n/put trophy in depression/learn swanko/s/swanko spirit/take white rod/put white rod in pot/d/s/s/w/w/s/s/sw/s/s/s/s/se/s/s/w/n/n/n/n/e/e/x dials/set left dial to 7/set center dial to 3/set right dial to 4/push button/s/x contraption/stand on disc/sw/sw/s/take briefcase/s/open briefcase/take red scroll/read red scroll/gnusto luncho/drop briefcase/w/z/z/z/z/z/z/z/z/z/z/e/ne/u/w/w/sw/sw/s/w/n/e/push statue w/push statue n/push statue n/n/e/put vase in opening/w/u/e/open umbrella/put umbrella on pool/w/learn swanko/u/swanko spirit/drop all/drop amulet/e/take black rod/put black rod in hole/w/take all/wear amulet/d/d/e/learn huncho/huncho vase/w/s/s/s/e/n/ne/ne/e/e/d/put coin in slot/sw/z/z/w/z/z/z/z/z/z/e/ne/u/n/w/n/n/n/n/e/e/s/stand on disc/se/se/read ripped/sw/sw/take black rod/take smoke rod/take white rod/take gray rod/learn huncho/l/huncho me/take decaf from pot/e/take can/drop decaf/w/listen/listen/enter curtain/z/z/z/z/exit/listen/open door/w/x machine/yes/drop can/x implementor/wake implementor/ne/n/l/enter square/take scrawled/w/n/n/n/n/n/n/n/read scrawled/drop scrawled/s/e/e/d/e/e/d/e/se/read blackened/take silver rod/push purple/jump/z/z/put silver rod in top/take green rod/push purple/say minirva/drop green rod/take red rod/push purple/put red rod in hole/z/take brown rod/push purple/take dusty brick/e/take thick brick/drop dusty brick/put brown rod in hole/x dusty brick/x hole/break wall/take all but parchment/wear amulet/nw/w/u/w/w/u/w/w/s/s/s/s/search snow/search top/take silver rod/s/s/s/s/s/s/s/d/take coin/put coin in slot/se/z/z/z/e/z/z/z/z/z/z/w/nw/u/s/enter boat/s/se/exit/s/search wreck/take green rod/n/enter boat/nw/n/exit/n/d/take coin/put coin in slot/se/z/e/z/z/z/w/d/s/z/z/n/n/u/e/ne/push statue/l/take red rod/sw/w/d/sw/w/d/take coin/put coin in slot/s/s/z/z/z/z/z/z/z/z/z/z/n/u/ne/u/se/s/s/e/e/d/search foundation/l/take brown rod/join red rod to silver rod/join golden rod to green rod/join golden rod to brown rod/take all from pot/throw red at smoke/throw silver at white/throw green at gray/throw brown at black",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;beg;bellow/scream/yell;bother/curses/darn/drat;brief/normal;c,cast;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;chants/memory/spells;chuckle/laugh;close/cover/shut up;cough/sneeze;damn/fuck/shit/sod;diagnose/health/status;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;help/hint;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;long/verbose;meditate;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;plover/plugh/treasure/xyzzy/yoho;pray;restart;restore;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand/carry/get/hold/take up;think;verify;version;wait/z;wave;y/yes;yawn;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;c,cast OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;cast/chant OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;clip/trim/chop/cut/prune/slice OBJ;close/cover/shut OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;fold OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/hold/take into OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look through OBJ;l/look under OBJ;lie/sit/go/leave/run/walk inside OBJ;lie/sit/go/leave/run/walk/carry/get/hold/take in OBJ;lie/sit/stand/carry/get/hold/take on OBJ;mix/stir OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;play/blow OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;shake/yank OBJ;smell/sniff OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;cast/chant OBJ at OBJ;cast/chant OBJ on OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;connect/join OBJ to OBJ;connect/join OBJ with OBJ;dig OBJ with OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;lock OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
temple = {
"name": "temple",
"rom": "temple.z5",
"seed" : 0,
"walkthrough" : "x me/x shadow/x shape/pull railing/x statue/d/x paper/take paper/x table/x writing/x man/touch man/x books/x dust/x desk/d/x door/u/s/take vial/take vial/take vial/n/d/stand on charles/unlock door with key/open door/s/x tower/sw/x sandstone/x temple/ask charles about temple/w/x crack/n/x carvings/n/x carvings/x skeleton/take note/x note/show note to charles/x skeleton/x torn clothes/take book/x book/x vial/x mukhtar/s/s/w/n/s/s/x cat/x slabs/search slabs/move slab/d/d/x carvings/x blocks/n/n/x trapdoor/e/e/x stove/x kettle/x rack/x chest/w/w/s/s/s/ask charles about chest/u/n/e/e/s/x mural/listen/s/x huge statue/n/n/ne/n/u/put mukhtar on table/read writing/x paper/x old man/z/z/z/x mukhtar/x writing/x horriblis/x statue/d/s/sw/w/w/s/d/d/n/n/e/e/take yellow powder/put yellow powder in kettle/take red powder/put red powder in kettle/take kettle/put kettle on stove/turn on stove/draw symbol on me/take bottle/w/w/x thing/throw bottle at thing/w/yell/s/z/z/e/e/look under stove/unlock chest with brass key/open chest/take all from chest/x crowbar/x photograph/w/w/move stone/s/show photograph to charles/s/s/u/throw iron key at cat/take iron key/n/e/e/listen to wall/x wall/x door/open door/e/x device/x column/x bulbs/x lever/pull lever/z/z/z/z/enter cloud/pull lever/w/s/s/z/z/z/z/z/z/z/z",
"grammar" : "about/clue/help/hint;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/get/hold/take inventory;carry/get/hold/take off;carry/get/hold/take out;chant/recite/sing;close/cover/shut up;damn/fuck/shit/sod;die/q/quit;dive/swim;draw/paint/sketch epsilon;draw/paint/sketch sign;draw/paint/sketch symbol;exit/out/outside/stand;frotz/gnusto/plugh/rezrov/xyzzy;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;scream/yell/shout;script;script off;script on;short/superbrie;smell/sniff;sorry;stand/carry/get/hold/take up;think;verify;version;wait/z;walkthrou/walkthru;wave;y/yes;adjust/set OBJ;ask OBJ for help;attach/fasten/fix/tie OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;bang/knock OBJ;bang/knock OBJ down;bang/knock OBJ out;bang/knock OBJ over;bang/knock at OBJ;bang/knock down OBJ;bang/knock on OBJ;bang/knock out OBJ;bang/knock over OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/hold/take OBJ;carry/get/hold/take off OBJ;chant/recite/sing OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;close/cover/shut OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;draw/paint/sketch OBJ;draw/paint/sketch epsilon on OBJ;draw/paint/sketch sign on OBJ;draw/paint/sketch symbol on OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;follow OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/hold/take into OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;kick/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;l/look at OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look through OBJ;l/look under OBJ;lie/sit/go/leave/run/walk inside OBJ;lie/sit/go/leave/run/walk/carry/get/hold/take in OBJ;lie/sit/stand/carry/get/hold/take on OBJ;lift OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;think about OBJ;wave OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/get/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;dig OBJ with OBJ;discard/drop/throw OBJ against OBJ;discard/drop/throw OBJ at OBJ;discard/drop/throw OBJ down OBJ;discard/drop/throw/insert/put OBJ in OBJ;discard/drop/throw/insert/put OBJ into OBJ;discard/drop/throw/put OBJ on OBJ;discard/drop/throw/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;draw/paint/sketch OBJ on OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;lock OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/hold/take OBJ from OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
theatre = {
"name": "theatre",
"rom": "theatre.z5",
"seed" : 0,
"walkthrough" : "nw/d/take all/examine panel/turn on switch/x barrels/get popcorn/get popcorn/get popcorn/get popcorn/u/se/s/n/x thug/z/z/z/z/n/drop all but pager/nw/drop pager/n/get all/w/w/s/w/push piano east/push piano south/climb up piano/open trapdoor/u/get all/n/turn handle/n/get lens/s/s/d/n/n/e/e/n/move carpet/get all/examine body/read book/n/get all/n/open bin/get page/get ticket/sw/s/s/s/se/drop all but lens/u/open door/e/s/open curtains/ne/w/d/put lens in spotlight/get lens/get ticket/u/e/n/push chandelier/drop ticket/e/e/u/go chandelier/nw/get all/s/w/n/get page/get page/s/s/n/unhook chain/push bookcase/s/x nest/get blue/w/u/get crow/d/climb down chimney/open coffin/d/open chest/get bottle/get lump/clean lump/get key/u/u/e/s/w/get mask/break bottle/get mask/e/s/s/climb piano/u/n/wear mask/n/put lens in telescope/drop mask/look in telescope/s/s/d/n/n/n/n/open cabinet/get page/get tablets/get stethoscope/get vial/read letter/s/s/e/e/s/wear stethoscope/turn dial left/turn dial right/turn dial left/turn dial right/drop stethoscope/open desk/get page/get plans/examine plans/drop plans/kick south wall/s/get page/examine skeleton/x rags/x watch/pull dial/turn dial/turn dial/turn dial/turn dial/n/se/drop all/u/get all/n/get all/s/e/n/drop ticket/n/n/drop ticket/get page/examine rope/climb rope/w/open locker/push locker east/d/read newspaper/wear amulet/s/s/e/e/take picture/drop camera/e/nw/examine items/get green/get page/se/w/w/n/se/s/w/d/get all/ne/examine hooks/pull clean hook/d/get page/d/unlock door with key/open door/drop key/e/get page/pour liquid on body/throw body in pit/drop vial/d/e/get page/sw/d/show violet to trent/show blue to trent/show red to trent/show green to trent/u/ne/se/drop popcorn/e/drop popcorn/e/drop popcorn/se/drop popcorn/nw/w/n/examine lever/wait/pull lever/s/e/se/se/get page/put control in top socket/put mana in bottom socket/put strength in right socket/put wisdom in left socket/get dagger/nw/drop amulet/nw/w/w/nw/sw/d/examine dagger/get earth crystal/show earth crystal to trent/show star crystal to trent/put star crystal in dagger/ask trent about elizabeth/drop all/get dagger/get tablets/u/ne/ne/ne/y/n/n/give tablets to trent/get star/d/throw star at young woman//nw/d/u/se/s/enter car",
"grammar" : "abra cadabra;agaliarep/echo/plover/plugh/xyzzy/yoho;answer/say/shout/speak/whisper;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/get/take inventory;carry/get/take off;carry/get/take out;close/cover/shut up;cls/dir/ls;credits/version;damn/fuck/shit/sod;die/q/quit;dig;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;hello/hi/howdy;help/hint/hints/instructi;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;l/look around;l/look down;l/look up;lie/sit down on;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;open/uncover/undo/unwrap sesame;places;pray;prompt/prompting;restart;restore;save;score;script;script off;script on;short/superbrie;sing;smell/sniff;sorry;stand in;stand/carry/get/take up;think;time;verify;wait/z;wave;y/yes;adjust/set OBJ;answer/say/shout/speak/whisper OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;board/mount/ride OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/take OBJ;carry/get/take in to OBJ;carry/get/take off OBJ;check/describe/examine/watch/x OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale out OBJ;climb/scale out of OBJ;climb/scale over OBJ;close/cover/shut OBJ;connect/hook/attach/fasten/fix/tie OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;damn/fuck/shit/sod OBJ;descend OBJ;dig OBJ;discard/drop OBJ;discharge/drain/pour/spill OBJ;disconnec/unattach/unhook/untie OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;flick/flip OBJ;go/leave/run/walk through OBJ;go/leave/run/walk/carry/get/take/carry/get/take into OBJ;grab hold of OBJ;grab/peel/remove OBJ;hear/listen OBJ;hear/listen to OBJ;help/hint/hints/instructi OBJ;hold on OBJ;hold on to OBJ;hold onto OBJ;hop/jump/skip over OBJ;kick in OBJ;kick/tear/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;knock OBJ;knock on OBJ;l/look at OBJ;l/look behind OBJ;l/look down OBJ;l/look in OBJ;l/look inside OBJ;l/look through OBJ;l/look under OBJ;l/look/l/look into OBJ;lie/sit/go/leave/run/walk/stand inside OBJ;lie/sit/go/leave/run/walk/stand/carry/get/take in OBJ;lie/sit/stand/carry/get/take on OBJ;open/uncover/undo/unwrap OBJ;peel off OBJ;photograp OBJ;pick OBJ up;pick up OBJ;play/practice OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;rotate/screw/turn/twist/unscrew OBJ;rotate/screw/turn/twist/unscrew OBJ left;rotate/screw/turn/twist/unscrew OBJ right;rotate/screw/turn/twist/unscrew OBJ to left;rotate/screw/turn/twist/unscrew OBJ to right;rotate/screw/turn/twist/unscrew OBJ to the left;rotate/screw/turn/twist/unscrew OBJ to the right;search OBJ;shinny/slide/climb/scale/go/leave/run/walk down OBJ;shinny/slide/climb/scale/go/leave/run/walk up OBJ;smell/sniff OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;talk to OBJ;taste OBJ;wave OBJ;wind OBJ;ask OBJ for OBJ;biff/chuck/throw OBJ against OBJ;biff/chuck/throw OBJ at OBJ;biff/chuck/throw OBJ in OBJ;biff/chuck/throw OBJ into OBJ;burn/light OBJ with OBJ;carry/get/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;connect/hook/attach/fasten/fix/tie OBJ onto OBJ;connect/hook/attach/fasten/fix/tie OBJ to OBJ;dig OBJ with OBJ;discard/drop OBJ down OBJ;discard/drop/insert/put OBJ in OBJ;discard/drop/insert/put OBJ into OBJ;discard/drop/put OBJ on OBJ;discard/drop/put OBJ onto OBJ;discharge/drain/pour/spill OBJ in OBJ;discharge/drain/pour/spill OBJ into OBJ;discharge/drain/pour/spill OBJ on OBJ;discharge/drain/pour/spill OBJ on to OBJ;discharge/drain/pour/spill OBJ onto OBJ;discharge/drain/pour/spill OBJ over OBJ;disconnec/unattach/unhook/untie OBJ from OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;l/look at OBJ through OBJ;lock OBJ with OBJ;photograp OBJ with OBJ;put OBJ inside OBJ;remove/carry/get/take OBJ from OBJ;rotate/screw/turn/twist/unscrew/adjust/set OBJ to OBJ;search/check/describe/examine/watch/x OBJ through OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
trinity = {
"name": "trinity",
"rom": "trinity.z4",
"seed" : 0,
"walkthrough" : "i/examine watch/take coin/examine coin/e/examine memorial/examine blossoms/take ball/nw/examine sundial/examine gnomon/unscrew gnomon/take gnomon/nw/examine woman/examine pigeons/buy bag/take bag/take coin/feed birds/take ruby/ask woman about roadrunner/ask woman about ruby/e/examine boats/take bird/examine bird/open bird/read paper/nw/examine pram/open pram/look in pram/push pram east/examine boy/push pram east/examine woman/throw ball at umbrella/take umbrella/push pram south/examine notice/e/enter pram/open umbrella/take all but pram/examine statue/examine missile/e/e/i/fold paper/examine meteor/put paper in pocket/put coin in pocket/examine watch/take card/read card/put card in pocket/n/examine toadstools/ne/examine log/take log/examine splinter/take splinter/e/drop bag/drop umbrella/u/u/examine shadow/examine sundial/examine symbols/examine rose/examine hole/examine ring/put gnomon in hole/d/d/s/sw/e/examine sculpture/reach in sculpture/n/u/take axe/examine axe/s/d/w/ne/n/u/u/put gnomon in hole/push lever/turn ring/d/d/s/sw/e/n/u/s/d/w/ne/n/nw/examine flytrap/e/examine tree/examine chasm/examine mesa/examine toadstool/chop tree/push tree north/sw/e/drop axe/take umbrella/w/w/examine waterfall/w/enter waterfall/search waterfall/w/examine icicles/hit icicle with umbrella/take icicle/e/e/e/u/u/d/d/ne/e/e/put icicle on lump/take lump/examine lump/w/w/examine hive/sw/drop lump/drop umbrella/w/w/n/examine crypt/read crypt/open crypt/examine statues/examine barrow/n/examine wight/examine hole/examine door/n/examine bones/search bones/take key/examine key/s/put key in hole/turn key/d/e/e/n/n/examine boy/examine bubble/examine wand/examine dish/examine headphones/talk to boy/se/ne/examine cottage/knock on door/open door/e/examine map/examine cauldron/look in cauldron/smell/examine book/read book/examine pedestal/turn page/read book/examine magpie/examine cage/wait/wait/wait/wait/wait/wait/wait/wait/open cage/take cage/open back door/e/examine herbs/examine fence/examine thyme/take thyme/examine refuse/search refuse/take garlic/examine garlic/examine toadstool/examine white door/w/w/se/w/put hand in hive/w/w/examine flytrap/e/e/put hand in hive/e/nw/e/put honey in cauldron/put hand in cauldron/w/se/sw/w/drop garlic/drop birdcage/drop splinter/take umbrella/u/u/turn ring to sixth symbol/d/d/e/e/enter door/open umbrella/take all/examine teachers/examine children/e/examine spade/take spade/w/wait/give umbrella to girl/e/give paper to girl/w/ride bird/enter door/w/w/w/w/n/open lid with spade/look in crypt/examine corpse/examine bandage/take bandage/examine mouth/take silver coin/examine silver coin/put silver coin in pocket/examine boots/take boots/wear boots/examine shroud/take shroud/examine corpse/kiss corpse/s/e/e/drop shroud/drop bandage/drop spade/take splinter/u/u/turn ring to third symbol/d/d/w/w/n/n/n/enter door/examine cylinder/take lantern/w/turn on lantern/drop lantern/w/put splinter into crevice/take skink/put skink in pocket/e/take all/e/e/s/turn key/d/e/e/e/turn off lantern/drop lantern/examine walkie/drop walkie/take axe/take lump/u/u/turn ring to second symbol/d/d/nw/n/enter dish/z/z/s/sw/enter door/take skink/kill skink/examine satellite/examine stars/examine door/break bubble with axe/e/e/drop skink/drop axe/u/u/turn ring to fourth symbol/d/d/nw/e/n/enter door/d/open box/examine switch/push switch/push button/s/nw/examine islet/examine tree/w/examine crabs/examine fin/look/w/examine coconuts/point at coconut/examine tide/examine tide/examine tide/point to coconut/take coconut/se/n/u/enter door/s/w/se/take axe/take skink/take garlic/ne/e/nw/e/drop coconut/cut coconut with axe/take coconut/pour milk in cauldron/put skink in cauldron/put garlic in cauldron/w/z/e/look in cauldron/examine emerald/take emerald/put emerald in green boot/w/se/w/sw/drop coconut/drop axe/take cage/u/u/turn ring to fifth symbol/d/d/ne/e/nw/e/e/enter white door/d/ne/ne/look in fissure/take lemming/put lemming in cage/close cage/sw/sw/u/enter door/examine lemming/w/w/se/w/sw/take shroud/wear shroud/take walkie/take lantern/take bag/u/u/turn ring to seventh symbol/d/d/se/z/z/enter dory/give silver coin to oarsman/s/drop shroud/enter door/open book/take slip/examine slip/examine diagram/drop slip/w/d/d/take ruby/put ruby in red boot/nw/nw/nw/open jeep/in/examine radio/examine dial/set slider to 31/pull antenna/turn on walkie/out/se/se/se/se/se/open gate/se/s/open door/e/take knife/e/n/n/close door/open cage/open door/drop cage/s/search workbench/take screwdriver/open front door/e/e/se/u/drop walkie/drop bag/d/ne/u/turn on lantern/take binoculars/d/take all but lantern/u/u/take all/s/w/w/sw/sw/s/look through binoculars at shelter/wait/point to key/take key/examine thin man/n/n/n/n/unlock box with key/examine panel/pull breaker/close breaker/sw/sw/sw/sw/examine dog/examine searchlight/drop bag/ne/ne/ne/ne/u/u/e/examine enclosure/examine panel/unscrew panel/look in panel/turn on bulb/look in panel/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/cut blue wire/i/e/take ball/nw/examine sundial/turn gnomon/take gnomon/nw/ask woman about roadrunner/ask woman about ruby/give card to woman/buy bag/take bag/take coin/feed birds/take ruby/e/examine boats/take paper bird/open bird/read paper/examine watch/nw/open pram/push pram e/take dish/push pram e/examine woman/",
"grammar" : "brief;diagnose;duck;fly;gaze/l/look/peek/peer/stare;go/back/retreat/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk;hint/hints/aid/help/pray;i/inventory;loiter/wait/z;nap/rest/sleep/snooze;notify;q/quit;restart;restore;rise/stand;save;score;script;super/superbrie;surface;t/time;thank/thanks;unscript;verbose;version;aid/help/save OBJ;answer OBJ;approach OBJ;arouse/frighten/scare/startle/surprise OBJ;ask/interroga/query/question/quiz OBJ;ask/interroga/query/question/quiz about OBJ;ask/interroga/query/question/quiz for OBJ;awake/awaken/rouse/wake OBJ;awake/awaken/rouse/wake up OBJ;bandage OBJ;bathe/swim/wade OBJ;bathe/swim/wade in OBJ;bathe/swim/wade over OBJ;bathe/swim/wade through OBJ;bathe/swim/wade to OBJ;bathe/swim/wade under OBJ;bathe/swim/wade up OBJ;bathe/swim/wade/dive down OBJ;bite OBJ;blow in OBJ;blow on OBJ;blow out OBJ;blow through OBJ;blow up OBJ;board/mount OBJ;bounce/dribble OBJ;bounce/dribble around OBJ;bound/hurdle/jump/leap/vault OBJ;bound/hurdle/jump/leap/vault from OBJ;bound/hurdle/jump/leap/vault in OBJ;bound/hurdle/jump/leap/vault off OBJ;bound/hurdle/jump/leap/vault over OBJ;bound/hurdle/jump/leap/vault through OBJ;bound/hurdle/jump/leap/vault to OBJ;bound/hurdle/jump/leap/vault up OBJ;bow/genuflect/grovel/kneel before OBJ;bow/genuflect/grovel/kneel to OBJ;breathe in OBJ;breathe out OBJ;breathe/hypervent/inhale OBJ;browse/leaf/read/skim OBJ;browse/leaf/read/skim through OBJ;brush/clean/polish/smear/sweep/wipe OBJ;brush/clean/polish/smear/sweep/wipe off OBJ;buy OBJ;bye/farewell/goodbye OBJ;call/maybe/proclaim/say/speak/talk/utter OBJ;call/maybe/proclaim/say/speak/talk/utter to OBJ;carry/catch/confiscat/grab/keep/seize/snatch/steal/take down OBJ;carry/catch/confiscat/grab/keep/seize/snatch/steal/take off OBJ;carry/catch/confiscat/grab/keep/seize/snatch/steal/take up OBJ;cavort/fiddle/play/toy OBJ;cavort/fiddle/play/toy with OBJ;chase/follow/pursue OBJ;check/describe/examine/inspect/see/study/survey/trace OBJ;check/describe/examine/inspect/see/study/survey/trace on OBJ;check/describe/examine/inspect/see/study/survey/trace/gaze/l/look/peek/peer/stare in OBJ;check/describe/examine/inspect/see/study/survey/trace/gaze/l/look/peek/peer/stare/frisk/ransack/rummage/search/sift for OBJ;chuck/fling/hurl/pitch/throw/toss away OBJ;clear/empty OBJ;clear/empty off OBJ;clear/empty/shake out OBJ;climb through OBJ;climb under OBJ;climb/get in OBJ;climb/get on OBJ;climb/go/bound/hurdle/jump/leap/vault/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk out OBJ;climb/go/scale/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk down OBJ;climb/go/scale/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk up OBJ;climb/scale over OBJ;climb/scale/ascend OBJ;close/shut/slam OBJ;close/shut/slam/flip/rotate/toggle/turn/twist off OBJ;consume/devour/eat/gobble/ingest/nibble/swallow OBJ;count/tabulate/tally OBJ;cross/traverse OBJ;crouch/settle/sit/squat OBJ;crouch/settle/sit/squat at OBJ;crouch/settle/sit/squat down OBJ;crouch/settle/sit/squat in OBJ;crouch/settle/sit/squat on OBJ;crumple/crush/squash/squeeze/squish OBJ;defile/hump/molest/rape/ravish OBJ;deflate OBJ;depart/exit/scram/withdraw/disembark OBJ;descend OBJ;detach/disconnec/disengage/unattach/unplug OBJ;detonate/explode/fire OBJ;disassemb/open/unseal OBJ;disassemb/open/unseal up OBJ;discover/find/seek OBJ;disembark from OBJ;disembark out OBJ;dislocate/move/roll/shift OBJ;dispatch/kill/murder/punch/slay/stab/vanquish/wound OBJ;disrobe/strip/undress OBJ;dive OBJ;dive in OBJ;dive over OBJ;dive under OBJ;don/wear OBJ;douse/extinguis/quench/snuff OBJ;drag/pull/tug/yank OBJ;drag/pull/tug/yank on OBJ;drag/pull/tug/yank out OBJ;dress OBJ;drink/guzzle/imbibe/quaff/sip/swill OBJ;drink/guzzle/imbibe/quaff/sip/swill from OBJ;drive OBJ;drop/dump OBJ;duck/hide/rise/stand under OBJ;elevate/hoist/lift/raise OBJ;elevate/hoist/lift/raise up OBJ;embark on OBJ;embark/enter OBJ;employ/exploit/operate/use OBJ;escape/flee OBJ;escape/flee from OBJ;exhale OBJ;extend/unflatten/unfold OBJ;fasten/secure/tie OBJ;feel/grope/reach in OBJ;flatten out OBJ;flip/rotate/toggle/turn/twist around OBJ;flip/rotate/toggle/turn/twist down OBJ;flip/rotate/toggle/turn/twist on OBJ;flip/rotate/toggle/turn/twist over OBJ;flip/rotate/toggle/turn/twist through OBJ;flip/rotate/toggle/turn/twist to OBJ;flush OBJ;fly OBJ;fly on OBJ;fly over OBJ;fly with OBJ;focus on OBJ;focus/adjust OBJ;fold/wrap out OBJ;fold/wrap up OBJ;fold/wrap/flatten OBJ;frisk/ransack/rummage/search/sift OBJ;frisk/ransack/rummage/search/sift in OBJ;frisk/ransack/rummage/search/sift through OBJ;gaze/l/look/peek/peer/stare OBJ;gaze/l/look/peek/peer/stare around OBJ;gaze/l/look/peek/peer/stare at OBJ;gaze/l/look/peek/peer/stare behind OBJ;gaze/l/look/peek/peer/stare down OBJ;gaze/l/look/peek/peer/stare on OBJ;gaze/l/look/peek/peer/stare out OBJ;gaze/l/look/peek/peer/stare over OBJ;gaze/l/look/peek/peer/stare through OBJ;gaze/l/look/peek/peer/stare to OBJ;gaze/l/look/peek/peer/stare up OBJ;gaze/l/look/peek/peer/stare/frisk/ransack/rummage/search/sift under OBJ;get down OBJ;get off OBJ;get out OBJ;get under OBJ;get up OBJ;get/hold/carry/catch/confiscat/grab/keep/seize/snatch/steal/take OBJ;go/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk OBJ;go/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk around OBJ;go/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk behind OBJ;go/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk in OBJ;go/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk through OBJ;go/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk to OBJ;go/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk under OBJ;go/cross/traverse/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk over OBJ;go/get/back/retreat/escape/flee/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk away OBJ;go/retreat/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk from OBJ;go/rise/stand/bound/hurdle/jump/leap/vault/advance/crawl/hike/hop/jog/proceed/run/saunter/skip/step/stroll/tramp/trudge/walk on OBJ;greet/greetings/hello/hi/salute/affirmati/aye/naw/nay/negative/no/nope/ok/okay/positive/positivel/sure/y/yes/yup OBJ;grope/reach through OBJ;hear OBJ;heel OBJ;hide OBJ;hide behind OBJ;hide in OBJ;hold down OBJ;hold on OBJ;hold up OBJ;hold/carry/catch/confiscat/grab/keep/seize/snatch/steal/take/drag/pull/tug/yank apart OBJ;howl/scream/shout/yell OBJ;howl/scream/shout/yell at OBJ;howl/scream/shout/yell to OBJ;inflate/blow OBJ;insert/lay/place/put/stash/stuff down OBJ;insert/lay/place/put/stash/stuff on OBJ;insert/lay/place/put/stash/stuff out OBJ;kick OBJ;kick around OBJ;kick in OBJ;kick/stamp/trample down OBJ;kiss/smooch OBJ;knock/pound/rap at OBJ;knock/pound/rap on OBJ;lean on OBJ;leave OBJ;let go OBJ;lick/taste OBJ;lie/recline/repose down OBJ;lie/recline/repose in OBJ;lie/recline/repose on OBJ;listen OBJ;listen for OBJ;listen in OBJ;listen to OBJ;loiter/wait/z for OBJ;lower OBJ;make OBJ;make up OBJ;melt OBJ;nap/rest/sleep/snooze in OBJ;nap/rest/sleep/snooze on OBJ;nudge/press/push/shove/stick/thrust OBJ;nudge/press/push/shove/stick/thrust in OBJ;nudge/press/push/shove/stick/thrust on OBJ;nudge/press/push/shove/stick/thrust/drag/pull/tug/yank down OBJ;nudge/press/push/shove/stick/thrust/drag/pull/tug/yank up OBJ;observe/watch OBJ;pee/urinate/piss OBJ;pee/urinate/piss on OBJ;pet/pat/feel/disturb/rub/touch OBJ;pick up OBJ;piss off OBJ;plug in OBJ;pocket OBJ;point OBJ;point at OBJ;point to OBJ;pour/spill/sprinkle OBJ;pour/spill/sprinkle out OBJ;preserve/rescue OBJ;refuse OBJ;remove OBJ;replace OBJ;reply/respond/retort to OBJ;retract OBJ;ride OBJ;ride in OBJ;ride on OBJ;rip/tear off OBJ;rise/stand in OBJ;rise/stand up OBJ;set off OBJ;shoot OBJ;shoot off OBJ;slide OBJ;slide/bound/hurdle/jump/leap/vault down OBJ;smell/sniff/whiff OBJ;spin/whirl OBJ;stamp/trample on OBJ;stamp/trample over OBJ;start OBJ;stop OBJ;suck OBJ;suck in OBJ;suck on OBJ;swing OBJ;swing on OBJ;tell OBJ;thank/thanks OBJ;undo/unfasten/unhook/untie OBJ;untangle OBJ;wave OBJ;wave/beckon/grin/laugh/motion/nod/smile/sneer at OBJ;wave/beckon/grin/laugh/motion/nod/smile/sneer to OBJ;what/what's/whats OBJ;what/what's/whats about OBJ;where/where's/wheres OBJ;who/who's/whos OBJ;wind OBJ;wind up OBJ;wish for OBJ;ask/interroga/query/question/quiz OBJ about OBJ;ask/interroga/query/question/quiz OBJ for OBJ;assault/attack/fight/hurt/injure OBJ with OBJ;attach/connect OBJ to OBJ;bestow/deliver/donate/give/hand/offer/present OBJ OBJ;bestow/deliver/donate/give/hand/offer/present OBJ to OBJ;blind/jab/poke OBJ with OBJ;block/cover/shield OBJ with OBJ;block/cover/shield over OBJ with OBJ;block/cover/shield up OBJ with OBJ;break/crack/crumble/damage/demolish/destroy/erase/smash/trash/wreck OBJ off OBJ;break/crack/crumble/damage/demolish/destroy/erase/smash/trash/wreck OBJ with OBJ;break/crack/crumble/damage/demolish/destroy/erase/smash/trash/wreck down OBJ with OBJ;break/crack/crumble/damage/demolish/destroy/erase/smash/trash/wreck in OBJ with OBJ;break/crack/crumble/damage/demolish/destroy/erase/smash/trash/wreck through OBJ with OBJ;bribe/entice/pay/renumerat OBJ with OBJ;browse/leaf/read/skim OBJ through OBJ;browse/leaf/read/skim OBJ to OBJ;brush/clean/polish/smear/sweep/wipe OBJ off OBJ;brush/clean/polish/smear/sweep/wipe OBJ on OBJ;brush/clean/polish/smear/sweep/wipe OBJ over OBJ;brush/clean/polish/smear/sweep/wipe off OBJ on OBJ;brush/clean/polish/smear/sweep/wipe off OBJ over OBJ;burn/char/combust/cremate/ignite/incinerat/kindle/scorch OBJ with OBJ;burn/char/combust/cremate/ignite/incinerat/kindle/scorch down OBJ with OBJ;burn/char/combust/cremate/ignite/incinerat/kindle/scorch up OBJ with OBJ;burst/pop OBJ with OBJ;buy OBJ from OBJ;buy OBJ with OBJ;charge OBJ with OBJ;check/describe/examine/inspect/see/study/survey/trace OBJ through OBJ;check/describe/examine/inspect/see/study/survey/trace OBJ with OBJ;chop/cleave/cut/gash/lacerate/sever/slash/slice/split down OBJ with OBJ;chop/cleave/cut/gash/lacerate/sever/slash/slice/split through OBJ with OBJ;chop/cleave/cut/gash/lacerate/sever/slash/slice/split up OBJ with OBJ;chuck/fling/hurl/pitch/throw/toss OBJ OBJ;chuck/fling/hurl/pitch/throw/toss OBJ at OBJ;chuck/fling/hurl/pitch/throw/toss OBJ down OBJ;chuck/fling/hurl/pitch/throw/toss OBJ in OBJ;chuck/fling/hurl/pitch/throw/toss OBJ off OBJ;chuck/fling/hurl/pitch/throw/toss OBJ on OBJ;chuck/fling/hurl/pitch/throw/toss OBJ over OBJ;chuck/fling/hurl/pitch/throw/toss OBJ through OBJ;chuck/fling/hurl/pitch/throw/toss OBJ to OBJ;clear/empty OBJ from OBJ;clear/empty OBJ out OBJ;clear/empty out OBJ from OBJ;clear/empty/pour/spill/sprinkle OBJ in OBJ;clear/empty/pour/spill/sprinkle OBJ on OBJ;clear/empty/pour/spill/sprinkle out OBJ on OBJ;clear/empty/shake/pour/spill/sprinkle out OBJ in OBJ;conceal OBJ in OBJ;conceal/hide OBJ behind OBJ;conceal/hide OBJ under OBJ;crumple/crush/squash/squeeze/squish OBJ on OBJ;detach/disconnec/disengage/unattach/unplug OBJ from OBJ;dig/excavate OBJ with OBJ;dig/excavate at OBJ with OBJ;dig/excavate through OBJ with OBJ;dig/excavate up OBJ with OBJ;dig/excavate with OBJ in OBJ;dig/excavate/dig/excavate in OBJ with OBJ;disassemb/open/unseal OBJ with OBJ;disassemb/open/unseal up OBJ with OBJ;dispatch/kill/murder/punch/slay/stab/vanquish/wound OBJ with OBJ;display/show OBJ OBJ;display/show OBJ to OBJ;disturb/rub/touch OBJ to OBJ;disturb/rub/touch OBJ with OBJ;drag/pull/tug/yank OBJ out OBJ;drag/pull/tug/yank OBJ with OBJ;drag/pull/tug/yank down OBJ with OBJ;drag/pull/tug/yank on OBJ with OBJ;drag/pull/tug/yank up OBJ with OBJ;drop/dump OBJ down OBJ;drop/dump OBJ in OBJ;drop/dump OBJ on OBJ;fasten/secure/tie OBJ to OBJ;fasten/secure/tie up OBJ with OBJ;feed OBJ OBJ;feed OBJ to OBJ;feed OBJ with OBJ;fell/chop/cleave/cut/gash/lacerate/sever/slash/slice/split OBJ with OBJ;fill OBJ at OBJ;fill OBJ with OBJ;fix up OBJ with OBJ;fix/repair/service OBJ with OBJ;flash/shine OBJ at OBJ;flash/shine OBJ in OBJ;flash/shine OBJ on OBJ;flash/shine OBJ over OBJ;flip/rotate/toggle/turn/twist OBJ OBJ;flip/rotate/toggle/turn/twist OBJ with OBJ;flip/rotate/toggle/turn/twist/dial/tune/change/set OBJ to OBJ;focus OBJ at OBJ;focus OBJ on OBJ;fold/wrap OBJ in OBJ;fold/wrap/wind OBJ around OBJ;fold/wrap/wind up OBJ in OBJ;force/wedge OBJ in OBJ;force/wedge/elevate/hoist/lift/raise up OBJ with OBJ;force/wedge/nudge/press/push/shove/stick/thrust/dislocate/move/roll/shift/elevate/hoist/lift/raise OBJ with OBJ;free/release OBJ from OBJ;free/release OBJ with OBJ;gaze/l/look/peek/peer/stare at OBJ through OBJ;gaze/l/look/peek/peer/stare at OBJ with OBJ;gaze/l/look/peek/peer/stare in OBJ through OBJ;gaze/l/look/peek/peer/stare in OBJ with OBJ;gaze/l/look/peek/peer/stare through OBJ at OBJ;get/bring OBJ OBJ;get/bring OBJ for OBJ;get/bring OBJ to OBJ;get/carry/catch/confiscat/grab/keep/seize/snatch/steal/take OBJ from OBJ;get/carry/catch/confiscat/grab/keep/seize/snatch/steal/take OBJ in OBJ;get/carry/catch/confiscat/grab/keep/seize/snatch/steal/take OBJ off OBJ;get/carry/catch/confiscat/grab/keep/seize/snatch/steal/take OBJ on OBJ;get/carry/catch/confiscat/grab/keep/seize/snatch/steal/take OBJ out OBJ;get/carry/catch/confiscat/grab/keep/seize/snatch/steal/take OBJ with OBJ;grope/reach OBJ with OBJ;grope/reach for OBJ with OBJ;grope/reach out OBJ with OBJ;grope/reach to OBJ with OBJ;hit/slap/strike/swat/whack at OBJ with OBJ;hit/slap/strike/swat/whack/knock/pound/rap OBJ with OBJ;hold OBJ against OBJ;hold OBJ in OBJ;hold OBJ on OBJ;hold OBJ over OBJ;hook/jiggle/loosen/wiggle/wobble/pry OBJ with OBJ;illuminat OBJ with OBJ;insert/lay/place/put/stash/stuff OBJ against OBJ;insert/lay/place/put/stash/stuff OBJ behind OBJ;insert/lay/place/put/stash/stuff OBJ down OBJ;insert/lay/place/put/stash/stuff OBJ in OBJ;insert/lay/place/put/stash/stuff OBJ on OBJ;insert/lay/place/put/stash/stuff OBJ over OBJ;insert/lay/place/put/stash/stuff OBJ through OBJ;insert/lay/place/put/stash/stuff OBJ under OBJ;jostle/rattle/shake OBJ with OBJ;knock/pound/rap down OBJ with OBJ;leave OBJ in OBJ;leave OBJ on OBJ;let OBJ go OBJ;light OBJ with OBJ;light up OBJ with OBJ;lock OBJ with OBJ;nudge/press/push/shove/stick/thrust OBJ at OBJ;nudge/press/push/shove/stick/thrust OBJ in OBJ;nudge/press/push/shove/stick/thrust OBJ on OBJ;nudge/press/push/shove/stick/thrust OBJ over OBJ;nudge/press/push/shove/stick/thrust OBJ under OBJ;nudge/press/push/shove/stick/thrust down OBJ with OBJ;nudge/press/push/shove/stick/thrust on OBJ with OBJ;nudge/press/push/shove/stick/thrust/dislocate/move/roll/shift/drag/pull/tug/yank OBJ OBJ;nudge/press/push/shove/stick/thrust/dislocate/move/roll/shift/drag/pull/tug/yank OBJ to OBJ;observe/watch OBJ through OBJ;observe/watch OBJ with OBJ;pick OBJ with OBJ;plug OBJ in OBJ;point at OBJ for OBJ;point out OBJ to OBJ;point to OBJ for OBJ;point/aim OBJ at OBJ;point/aim OBJ to OBJ;point/aim at OBJ with OBJ;pour/spill/sprinkle OBJ from OBJ;pour/spill/sprinkle OBJ out OBJ;pry out OBJ with OBJ;pry up OBJ with OBJ;remove OBJ from OBJ;remove OBJ in OBJ;remove OBJ on OBJ;remove OBJ with OBJ;rip/tear OBJ with OBJ;rip/tear down OBJ with OBJ;rip/tear through OBJ with OBJ;rip/tear up OBJ with OBJ;screw/tighten OBJ in OBJ;screw/tighten OBJ on OBJ;screw/tighten OBJ with OBJ;screw/tighten down OBJ in OBJ;screw/tighten down OBJ on OBJ;screw/tighten down OBJ with OBJ;screw/tighten in OBJ on OBJ;screw/tighten in OBJ with OBJ;sell OBJ OBJ;sell OBJ to OBJ;set OBJ at OBJ;shake/pour/spill/sprinkle out OBJ from OBJ;shine in OBJ with OBJ;shine on OBJ with OBJ;shine over OBJ with OBJ;slide OBJ down OBJ;slide OBJ in OBJ;slide OBJ to OBJ;slide OBJ under OBJ;start OBJ with OBJ;suck OBJ from OBJ;suck OBJ out OBJ;suck out OBJ from OBJ;swing OBJ at OBJ;tell OBJ about OBJ;unlock OBJ with OBJ;unscrew OBJ from OBJ;unscrew OBJ out OBJ;unscrew OBJ with OBJ;wind OBJ in OBJ;work on OBJ with OBJ;",
"max_word_length" : 9
}
tryst = {
"name": "tryst205",
"rom": "tryst205.z5",
"seed" : 0,
"walkthrough" : "stand/stand/get gum/n/x gnome/ask george about bag/ask george about beetlebaum/ask george about frank/ask george about gumball/w/open closet/take bag/x bag/open bag/e/x dresser/open drawer/take gloves/wear gloves/s/e/open curtain/get soap/put soap on zipper/open bag/drop soap/remove gloves/drop gloves/w/ne/get bench/s/drop bench/stand on bench/get key/exit/unlock door with key/drop key/open door/s/search clothes/get pack/open closet/get safe/n/ne/x pack/read label/shake pack/put pack on gum/drop pack/get gum/s/chew gum/x paper/get paper/wait/wait/wait/blow dust/blow dust/blow dust/x paper/dehlila/wait/wait/take bag/take safe/ne/x safe/shake safe/open safe/give safe to mark/get key/s/n/w/dehlila/open door/s/get box/unlock box with key/open box/open trapdoor/drop all/d/get gum/chew gum//w/s/w/ne/n/n/n/read sign/w/w/n/look under floorboard/get match/get gum/open box then get cube/x door/tear cloth/w/x stalk/get corn/e/s/s/x jail/w/u/x nest/look in nest/get note/read note/d/e/e/n/get pan/x pan/look in hole/n/x machine/look under table/read green/drop green/s/s/s/x table/x writing/clean table with cloth/x machine/read crumpled/drop crumpled/set first wheel to 9/set second wheel to 2/set third wheel to 8/set fourth wheel to 9/set fifth wheel to 3/x wheels/n/e/s/s/s/sw/sw/x magnetite/put pan in water/shake pan/get gold/drop pan/put cloth in water/sw/s/get sprig/n/ne/ne/ne/n/n/n/w/n/n/x machine/put gold in plate/pull lever/get coin/x coin/s/s/s/x slot/put coin in slot/pull lever/get bottle/open bottle/drop cork/clean table with cloth/drop cloth/read writing/n/e/n/n/open door/x door/x paint splotches/touch red/touch orange/touch yellow/touch blue/touch orange/touch blue/open door/n/enter stall/get shovel/exit/u/get fork/d/e/x grass/search grass/get tin/w/s/s/w/e/s/e/dig plot with shovel/dig plot with shovel/dig plot with shovel/dig plot with shovel/drop shovel/get boots/search boots/read note/give corn to bird/wear boots/w/n/w/w/w/w/get spoon/nw/ne/n/get lantern/s/w/read sign/read book/put cube in cauldron/put gum in cauldron/put sprig in cauldron/mix cauldron with spoon/get gumball/drop spoon/x sketch/ask george about dehlila/ask george about gumball/ask george about town/e/sw/se/e/e/e/drop all/take bottle/take gumball/s/chew gum/drink whiskey/x barkeep/ask barkeep about thad/n/x thad/shoot thad/get boots/wear boots/e/get knife/x knife/w/n/get tin/e/s/s/s/sw/e/open tin/get balm/wear balm/s/x rock/sharpen knife on rock/get berries/eat berries/n/drop tin/w/sw/sw/s/se/cut branch with knife/get stick/nw/n/ne/ne/ne/n/n/n/w/w/n/get gum/s/e/n/chew gum/put gum on stick/put stick in hole/drop stick/get locket/wear locket/s/get all/w/w/w/sw/s/e/u/say wheel/u/say candle/u/say oil/u/get can/d/get candle/d/get wheel/d/d/w/cut rope with knife/drop knife/w/light match/light candle with match/drop match/drop lantern/w/w/ne/look in crack/get piece/open locket/put piece in locket/close locket/ne/w/w/get accordion/e/e/sw/sw/e/e/drop candle/e/e/u/u/u/u/play accordion/d/d/d/d/get rock/x rock/read lettering/w/n/ne/drop boots/nw/ne/w/rub locket/get key/e/sw/se/e/e/e/e/read sign/cuss/s/get bar/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/wait/open door/n/e/e/s/s/w/move rail with bar/drop bar/e/e/get weeds with fork/drop fork/oil handcar with can/drop can/enter handcar/x handle/move handle/move handle/move handle/move handle/exit/read sign/x door/x wheel/put wheel on peg/read plaque/turn wheel left/turn wheel right/turn wheel right/turn wheel left/turn wheel left/turn wheel left/turn wheel right/open door/s/unlock box with key/drop key/open box/get stick/n/enter car/x lever/pull lever/move handle/move handle/move handle/exit/s/sw/sw/sw/s/sw/w/x pile/put stick in pile/throw rock at stick/rub locket/w/n/n/get blanket/open door/rub locket/w/x cutout/open cover/cover opening with blanket/x frank/x stove/x grille/x flue/x lever/wait/wait/look/e/rub locket/w/pull lever/wait/get watch/get apple/e/untie horse/feed apple to horse/get on horse/x pair/blizzard/x thad/beetlebaum/wait/wait//stand up",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;beetlebau;blizzard;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;curse/cuss/swear;damn/fuck/shit/sod;dehlila;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;get out/off/up;hear/listen;help;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;lift;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;play;pray;restart;restore;save;score;script;script off;script on;shake;short/superbrie;sing;smell/sniff;sneeze;sorry;stand up;think;verify;version;wait/z;wave;xyzzy;y/yes;adjust/set OBJ;answer/say/shout/speak OBJ;attach/fasten/tie OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;blow out OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;check/describe/examine/watch/x OBJ;chew OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;climb/scale OBJ;climb/scale up/over OBJ;cover/close/shut OBJ;cover/close/shut up OBJ;cover/switch/close/shut off OBJ;cross/enter/go/run/walk OBJ;curse/cuss/swear at OBJ;damn/fuck/shit/sod OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;free/loosen/untie OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;jimmy/pry OBJ;kick/attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;lift OBJ;lift up OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pet OBJ;pick OBJ up;pick up OBJ;pitch/discard/drop/throw OBJ;plant OBJ;play OBJ;pump/move/press/push/shift OBJ;put OBJ down;put down OBJ;put on OBJ;read OBJ;ride OBJ;rotate/screw/turn/twist/unscrew OBJ;rotate/screw/turn/twist/unscrew OBJ left;rotate/screw/turn/twist/unscrew OBJ right;scrape OBJ;search OBJ;shake OBJ;shoot OBJ;smell/sniff OBJ;smoke OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch OBJ off;switch OBJ on;switch on OBJ;taste OBJ;tear OBJ;tear up OBJ;wave OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;chop/cut/prune/slice OBJ with OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ with OBJ;consult OBJ about OBJ;consult OBJ on OBJ;cover OBJ with OBJ;cover up OBJ with OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;dig up OBJ with OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;hone/sharpen OBJ on OBJ;hone/sharpen OBJ with OBJ;insert OBJ in/into OBJ;jimmy/pry OBJ with OBJ;l/look up OBJ in OBJ;lift OBJ with OBJ;lock OBJ with OBJ;mix/stir OBJ with OBJ;move/press/push/shift OBJ OBJ;move/press/push/shift OBJ with OBJ;move/press/push/shift/transfer OBJ to OBJ;oil OBJ with OBJ;pitch/clear OBJ with OBJ;pitch/discard/drop/throw OBJ at/against/on/onto OBJ;plant OBJ in OBJ;poke OBJ with OBJ;put OBJ in/inside/into OBJ;put OBJ over OBJ;remove/get/carry/hold/take OBJ from OBJ;remove/get/carry/hold/take OBJ with OBJ;repair/fix OBJ with OBJ;rotate/screw/turn/twist/unscrew/adjust/set OBJ to OBJ;scrape OBJ with OBJ;scrape off OBJ with OBJ;shoot OBJ with OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
weapon = {
"name": "weapon",
"rom": "weapon.z5",
"seed" : 0,
"walkthrough" : "touch notch/x studs/x information stud/x powerup stud/touch powerup stud/x project status stud/x generator access/x sequence stud/x forcefield stud/touch information/touch generator/reply/x white pillar/reply/touch panel/reply/x bay/x contact points/touch tool stud/wait/take probe/x probe/ask cheryl for hologram/wear goggles/x generator/x bay/probe contacts/press bottom/wait/touch calibration/take probe/shut panel/press powerup/press project status stud/remove goggles/x status display/x indicator/x studs/press status stud/press bottom/give goggles to cheryl/press sequence stud/x viewscreen/press sequence stud/x studs/x transmit stud/put probe in slab/touch bottom/touch tool stud/touch transmit stud/touch control stud/x rods/cheryl, pull rods/take key/unlock handcuffs with key/remove handcuffs/pull rods/put handcuffs on rods/press information/press forcefield/press ignition/reply/reply/reply/reply/reply/reply/reply/reply/reply/reply/press ignition",
"grammar" : "about;awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;cackle/chuckle/giggle/laugh/snicker;carry/hold/take inventory;clue/clues/help/hint/hints;clue/clues/help/hint/hints off;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;frown/glare;full/fullscore;full/fullscore score;get out/off/up;grin/smile/smirk;hear/listen;hello;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;lose/win;nap/sleep;no;nod;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;r/reply/respond/answer/say/shout/speak;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;sorry;stand up;t/talk/answer/say/shout/speak;think;verify;version;wait/z;wave;wink;y/yes;activate/trigger OBJ;adjust/set OBJ;approach OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;cackle/chuckle/giggle/laugh/snicker at OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;clear/move/press/push/shift/drag/pull OBJ together;clear/move/press/push/shift/stand by OBJ;clear/move/press/push/shift/stand close to OBJ;clear/move/press/push/shift/stand closer to OBJ;clear/move/press/push/shift/stand near OBJ;clear/move/press/push/shift/stand next to OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;cuff/handcuff OBJ;damn/fuck/shit/sod OBJ;deactivat OBJ;dig OBJ;discard/drop OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;fire/shoot OBJ;frown/glare at OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;grin/smile/smirk at OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;nod at OBJ;nod to OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;point at OBJ;probe OBJ;put OBJ down;put down OBJ;put on OBJ;r/answer/say/shout/speak OBJ;r/reply/respond to OBJ;read/check/describe/examine/study/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;stand with OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;t OBJ;t/talk/answer/say/shout/speak to OBJ;taste OBJ;throw OBJ;wave OBJ;wink at OBJ;adjust/set OBJ to OBJ;aim/fire/shoot OBJ at OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;attach/fasten/fix/tie OBJ with OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ in OBJ;carry/hold/take OBJ near OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;feel/fondle/grope/touch OBJ to OBJ;feel/fondle/grope/touch OBJ with OBJ;fire/shoot OBJ with OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;link/drag/pull OBJ with OBJ;lock OBJ with OBJ;point OBJ at OBJ;probe OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;throw/discard/drop OBJ at/against/on/onto OBJ;throw/discard/drop OBJ in/into/down OBJ;throw/discard/drop/put OBJ on/onto OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
wishbringer = {
"name": "wishbringer",
"rom": "wishbringer.z3",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "S/WAIT/GET ENVELOPE/N/W/W/YES/N/W/E/D/GET BONE/U/S/E/E/E/GIVE BONE TO POODLE/N/WAIT/GET NOTE/N/LOOK IN FOUNTAIN/GET COIN/N/N/N/N/E/U/W/N/U/E/S/U/OPEN DOOR/IN/WAIT/WAIT/GIVE ENVELOPE TO WOMAN/OPEN ENVELOPE/READ LETTER TO WOMAN/WAIT/GET CAN/WAIT/WAIT/D/N/W/D/S/E/D/BREAK BRANCH/GET IT/W/OPEN CAN/GET CAN/SQUEEZE IT/DROP CAN/GET WISHBRINGER/OPEN GATE/S/S/W/S/LOOK IN PIT/PUT BRANCH IN PIT/GET BRANCH/DROP BRANCH/DIG IN SAND/GET WHISTLE/BLOW IT/W/WAIT/GET HAT/WAIT/BLOW WHISTLE/S/YES/E/D/N/E/PUSH BUNK/DROP ALL/U/GET BLANKET/D/GET ALL/N/E/PUT BLANKET ON GRUE/OPEN FRIDGE/GET WORM/W/W/OPEN STUMP/U/N/E/E/S/OPEN MAILBOX/WAIT/WAIT/WAIT/N/E/GIVE HAT TO PELICAN/W/W/S/Z/Z/S/LOOK IN FOUNTAIN/PUT WORM IN FOUNTAIN/GET TOKEN/E/BUY TICKET/IN/SHOW TICKET TO GRAVEDIGGER/N/EXAMINE FLOOR/GET GLASSES/OUT/OUT/YES/E/WAIT/WAIT/S/PUT TOKEN IN SLOT/PUSH JOYSTICK WEST/AGAIN/PUSH IT SOUTH/AGAIN/PRESS BUTTON/YES/YES/SAY KALUZE/S/WAIT/GIVE NOTE TO MR CRISP/GET COAT/LOOK IN COAT/GET KEY/UNLOCK CHAIN WITH KEY/PULL LEVER/GET NOTE/READ IT/OPEN HATCH/U/U/WEAR GLASSES/GET BROOM/EXAMINE PANEL/PUSH SECOND SWITCH/D/REMOVE GLASSES/PUSH PAINTING/TURN CRANK/N/N/E/ALEXIS, HEEL/OPEN DOOR/IN/GET STEEL KEY/OUT/WAIT/N/UNLOCK DOOR WITH STEEL KEY/IN/WAIT/S/SMASH CASE WITH BROOM/EXAMINE SCULPTURE/INSERT WISHBRINGER IN SCULPTURE/YES/YES/KNOCK ON DOOR",
"grammar" : "aftern/bye/farewe/goodby/greet/greeti/hello/hi/salute/affirm/aye/naw/nay/negati/no/nope/ok/okay/positi/sure/y/yes/yup;again/g;aid/help/hint/hints/pray;back/advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk;brief;fly;gaze/l/look/peek/peer/stare;heel;i/invent;loiter/wait/z;nap/rest/sleep/snooze;ne/northe/northw/nw/se/southe/southw/sw;q/quit;restar;restor;rise/stand;save;score;script;super/superb;t/time;thank/thanks;unscri;verbos;versio;advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk OBJ;advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk around OBJ;advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk behind OBJ;advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk over OBJ;advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk throug OBJ;advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk to OBJ;advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk under OBJ;affirm/aye/naw/nay/negati/no/nope/ok/okay/positi/sure/y/yes/yup OBJ;aftern/bye/farewe/goodby/greet/greeti/hello/hi/salute OBJ;answer/reply/respon OBJ;answer/reply/respon to OBJ;ask/interr/query/questi/quiz OBJ;assaul/attack/fight/hurt/injure/hit/kill/murder/punch/slap/slay/stab/strike/whack/wound OBJ;awake/awaken/rouse/startl/surpri/wake OBJ;awake/awaken/rouse/startl/surpri/wake up OBJ;bathe/swim/wade OBJ;bathe/swim/wade in OBJ;bathe/swim/wade to OBJ;bite OBJ;blow OBJ;blow in OBJ;blow out OBJ;blow throug OBJ;blow up OBJ;bow/grovel/kneel before OBJ;bow/grovel/kneel to OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck into OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck throug OBJ;brush/clean/sweep OBJ;brush/clean/sweep off OBJ;buy OBJ;call/say/speak/talk/utter OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take off OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take out OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take up OBJ;cavort/fiddle/play/toy OBJ;cavort/fiddle/play/toy with OBJ;chase/follow/pursue OBJ;check/descri/examin/inspec/observ/see/study/survey/watch OBJ;check/descri/examin/inspec/observ/see/study/survey/watch/gaze/l/look/peek/peer/stare in OBJ;check/descri/examin/inspec/observ/see/study/survey/watch/gaze/l/look/peek/peer/stare on OBJ;check/descri/examin/inspec/observ/see/study/survey/watch/gaze/l/look/peek/peer/stare/frisk/ransac/rummag/search for OBJ;climb/scale in OBJ;climb/scale over OBJ;climb/scale under OBJ;climb/scale/advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk down OBJ;climb/scale/advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk up OBJ;climb/scale/ascend OBJ;climb/scale/carry/catch/confis/get/grab/hold/seize/snatch/steal/take on OBJ;climb/scale/dive/jump/leap/advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk out OBJ;close/shut/slam OBJ;concea/hide OBJ;concea/hide behind OBJ;concea/hide under OBJ;consum/devour/eat/gobble/nibble/swallo OBJ;count/tally OBJ;cross/traver OBJ;crush/squash/squeez OBJ;defile/hump/molest/rape/ravish OBJ;depart/exit/scram/withdr OBJ;descen OBJ;detach/free/releas/undo/unfast/unhook/untie OBJ;dig/excava at OBJ;dig/excava in OBJ;dig/excava throug OBJ;dig/excava up OBJ;dig/excava with OBJ;distur/feel/pat/pet/rub/touch OBJ;dive/jump/leap OBJ;dive/jump/leap from OBJ;dive/jump/leap off OBJ;dive/jump/leap over OBJ;dive/jump/leap/advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk in OBJ;dive/jump/leap/rise/stand/advanc/crawl/go/hike/hop/procee/run/skip/step/tramp/trudge/walk on OBJ;don/wear OBJ;douse/exting/quench/snuff OBJ;drag/pull/shove/tug/yank OBJ;drag/pull/shove/tug/yank down OBJ;drag/pull/shove/tug/yank on OBJ;drag/pull/shove/tug/yank up OBJ;drink/guzzle/sip OBJ;drink/guzzle/sip from OBJ;drop/dump OBJ;elevat/hoist/lift/raise OBJ;elevat/hoist/lift/raise up OBJ;employ/exploi/use OBJ;empty OBJ;empty out OBJ;enter OBJ;fill OBJ;find/seek OBJ;flip/set/turn OBJ;flip/set/turn off OBJ;flip/set/turn on OBJ;fly OBJ;fly on OBJ;fly with OBJ;force/insert/lay/place/put/stash/stuff/wedge down OBJ;force/insert/lay/place/put/stash/stuff/wedge on OBJ;force/insert/lay/place/put/stash/stuff/wedge out OBJ;frisk/ransac/rummag/search OBJ;frisk/ransac/rummag/search in OBJ;gaze/l/look/peek/peer/stare OBJ;gaze/l/look/peek/peer/stare around OBJ;gaze/l/look/peek/peer/stare at OBJ;gaze/l/look/peek/peer/stare behind OBJ;gaze/l/look/peek/peer/stare down OBJ;gaze/l/look/peek/peer/stare out OBJ;gaze/l/look/peek/peer/stare throug OBJ;gaze/l/look/peek/peer/stare up OBJ;gaze/l/look/peek/peer/stare/frisk/ransac/rummag/search under OBJ;gestur/point at OBJ;gestur/point to OBJ;grin/laugh/nod/smile/sneer/wave at OBJ;grin/laugh/nod/smile/sneer/wave to OBJ;hear OBJ;howl/scream/shout/yell OBJ;howl/scream/shout/yell at OBJ;howl/scream/shout/yell to OBJ;jostle/rattle/shake OBJ;kick OBJ;kick down OBJ;kick in OBJ;kiss/smooch OBJ;knock/pound/rap at OBJ;knock/pound/rap down OBJ;knock/pound/rap on OBJ;leave OBJ;lie/reclin down OBJ;lie/reclin in OBJ;lie/reclin on OBJ;light OBJ;listen OBJ;listen for OBJ;listen in OBJ;listen to OBJ;loiter/wait/z for OBJ;lower OBJ;move/roll/shift OBJ;nap/rest/sleep/snooze in OBJ;nap/rest/sleep/snooze on OBJ;nudge/press/push/thrust OBJ;nudge/press/push/thrust down OBJ;nudge/press/push/thrust on OBJ;nudge/press/push/thrust up OBJ;open/unseal OBJ;open/unseal up OBJ;pick OBJ;pick up OBJ;pour/spill/sprink OBJ;pour/spill/sprink out OBJ;pray/dream/hope/wish/yearn for OBJ;preser/rescue OBJ;reach in OBJ;read/skim OBJ;remove OBJ;replac OBJ;ride OBJ;ride in OBJ;ride on OBJ;rise/stand under OBJ;rise/stand up OBJ;rotate/spin/whirl OBJ;save/aid/help OBJ;shoot OBJ;sit/squat OBJ;sit/squat at OBJ;sit/squat down OBJ;sit/squat on OBJ;sit/squat/carry/catch/confis/get/grab/hold/seize/snatch/steal/take in OBJ;smell/sniff OBJ;swing OBJ;swing on OBJ;taste OBJ;tell OBJ;thank/thanks OBJ;ask/interr/query/questi/quiz OBJ about OBJ;ask/interr/query/questi/quiz OBJ for OBJ;assaul/attack/fight/hurt/injure OBJ with OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie up OBJ with OBJ;bestow/delive/donate/give/hand/offer/presen/show OBJ OBJ;bestow/delive/donate/give/hand/offer/presen/show OBJ to OBJ;blind/jab/poke OBJ with OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck OBJ off OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck OBJ with OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck down OBJ with OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck into OBJ with OBJ;break/crack/damage/demoli/destro/erase/smash/trash/wreck throug OBJ with OBJ;bribe/entice/pay OBJ with OBJ;burn/ignite OBJ with OBJ;burn/ignite down OBJ with OBJ;buy OBJ with OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ from OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ in OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ off OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ on OBJ;carry/catch/confis/get/grab/hold/seize/snatch/steal/take OBJ out OBJ;check/descri/examin/inspec/observ/see/study/survey/watch OBJ throug OBJ;check/descri/examin/inspec/observ/see/study/survey/watch OBJ with OBJ;chuck/fling/hurl/pitch/throw/toss OBJ at OBJ;chuck/fling/hurl/pitch/throw/toss OBJ down OBJ;chuck/fling/hurl/pitch/throw/toss OBJ in OBJ;chuck/fling/hurl/pitch/throw/toss OBJ off OBJ;chuck/fling/hurl/pitch/throw/toss OBJ on OBJ;chuck/fling/hurl/pitch/throw/toss OBJ over OBJ;chuck/fling/hurl/pitch/throw/toss OBJ throug OBJ;chuck/fling/hurl/pitch/throw/toss OBJ to OBJ;concea/hide OBJ behind OBJ;concea/hide OBJ from OBJ;concea/hide OBJ under OBJ;cover OBJ with OBJ;crush/squash/squeez OBJ on OBJ;cut/rip/slash/tear OBJ with OBJ;cut/rip/slash/tear throug OBJ with OBJ;detach/free/releas/undo/unfast/unhook/untie OBJ from OBJ;detach/free/releas/undo/unfast/unhook/untie OBJ with OBJ;dig/excava OBJ with OBJ;dig/excava in OBJ with OBJ;distur/feel/pat/pet/rub/touch OBJ with OBJ;drag/pull/shove/tug/yank OBJ out OBJ;dream/hope/wish/yearn for OBJ with OBJ;drop/dump OBJ down OBJ;drop/dump OBJ in OBJ;drop/dump OBJ on OBJ;empty OBJ from OBJ;empty OBJ out OBJ;feed OBJ OBJ;feed OBJ to OBJ;feed OBJ with OBJ;fill OBJ at OBJ;fill OBJ with OBJ;flip/set/turn OBJ for OBJ;flip/set/turn OBJ to OBJ;flip/set/turn OBJ with OBJ;force/insert/lay/place/put/stash/stuff/wedge OBJ behind OBJ;force/insert/lay/place/put/stash/stuff/wedge OBJ down OBJ;force/insert/lay/place/put/stash/stuff/wedge OBJ over OBJ;force/insert/lay/place/put/stash/stuff/wedge OBJ under OBJ;gaze/l/look/peek/peer/stare at OBJ throug OBJ;hit/kill/murder/punch/slap/slay/stab/strike/whack/wound OBJ with OBJ;leave/force/insert/lay/place/put/stash/stuff/wedge OBJ in OBJ;leave/force/insert/lay/place/put/stash/stuff/wedge OBJ on OBJ;light OBJ with OBJ;lock OBJ with OBJ;move/roll/shift/nudge/press/push/thrust OBJ OBJ;move/roll/shift/nudge/press/push/thrust OBJ to OBJ;nudge/press/push/thrust OBJ in OBJ;nudge/press/push/thrust OBJ under OBJ;open/unseal OBJ with OBJ;pick OBJ with OBJ;pour/spill/sprink OBJ from OBJ;pour/spill/sprink OBJ in OBJ;pour/spill/sprink OBJ on OBJ;pour/spill/sprink out OBJ into OBJ;read/skim OBJ throug OBJ;read/skim OBJ to OBJ;swing OBJ at OBJ;tell OBJ about OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
yomomma = {
"name": "yomomma",
"rom": "yomomma.z8",
"seed" : 0,
"walkthrough" : "d/se/x stranger/sit on chair/insult joe/yes/stand/w/w/insult vincent/norbert/search machines/e/set thermostat to warm/n/ne/x posse/wait/take sweater/remove jacket/wear sweater/wear jacket/sw/w/talk to sleaze/talk to sleaze/talk to sleaze/talk to sleaze/talk to sleaze/point at cola/e/w/take cola/n/give cola to guard/look under sofa/take pass/s/give pass to sleaze/n/search guard/tase sleaze/s/point at cola/point at vodka/pour vodka in cola/n/give cola to britney/talk to britney/se/ne/look under loudspeaker/s/look under jukebox/x knob/turn knob to high/put coin in jukebox/press techno/n/take lens/sw/look under table/n/climb on stage///se/put sweater on table/look at tag through lens/nw/n/climb on stage///norbert/give lens to norbert/sw/insult vincent/sleaze/s/insult vincent/sleaze/n/take card/x card/e/scrape gum with card/e/put gum on ass/put coin in jukebox/push ass/open satchel/x books/x wallet/open wallet/stage//",
"grammar" : "bother/curses/darn/drat;bust a/some/few move/moves;bust moves/move;carry/hold/take inventory;chat/t/talk;chat/t/talk to;climb/scale down;credits;damn/fuck/shit;eula/terms/copyright/lisence/lisense/licence/license;exit/leave/out/stand;full score;full/score;get out/off/up;go/run/walk;help/info/about;hint/think;hop/jump;i/inv/inventory;jam/dance;l/look;listen;long/verbose;map;no;normal/brief;notify;notify off;notify on;point;point at/to;pronouns/nouns;q/quit;restart;restore;save;short/superbrie;sing;smell/sniff;sorry;stand up;transcrip/script;transcrip/script off;transcrip/script on;verify;version;wait/z;y/yes;abuse/insult/mock/offend/ridicule/slander/taunt OBJ;attack/break/crack/destroy/fight/hit/punch/smash/wreck OBJ;bust a/some/few move/moves with OBJ;bust moves/move with OBJ;buy/order/purchase OBJ;carry/hold/take off OBJ;challenge OBJ;change/switch OBJ;change/switch/rotate/turn/twist OBJ off;change/switch/rotate/turn/twist OBJ on;change/switch/rotate/turn/twist on OBJ;change/switch/rotate/turn/twist/close/shut off OBJ;chat/t/talk OBJ;chat/t/talk to/with OBJ;chew/eat/lick/taste OBJ;chew/eat/lick/taste on OBJ;climb/scale OBJ;climb/scale down/off OBJ;climb/scale down/off from/of OBJ;climb/scale on/up/to OBJ;climb/scale up/over OBJ;close/shut OBJ;close/shut up OBJ;discard/drop/throw OBJ;disrobe/doff/shed/strip/remove OBJ;don/wear OBJ;drink/sip/swallow OBJ;embrace/hug/kiss OBJ;enter/go/run/walk OBJ;feel/touch OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;go/run/walk into/in/inside/through OBJ;hear OBJ;jam/dance with OBJ;l/look at OBJ;l/look behind OBJ;l/look inside/in/into/through OBJ;l/look thru OBJ;l/look under OBJ;listen to OBJ;move/drag/pull OBJ;open/uncover/unwrap OBJ;pick OBJ up;pick up OBJ;place/put OBJ down;place/put down OBJ;place/put on OBJ;play OBJ;play with OBJ;point OBJ;point at/to OBJ;press/push/shift OBJ;read/check/describe/examine/watch/x OBJ;rotate/turn/twist OBJ;search OBJ;sit on top of OBJ;sit on/in/inside OBJ;smell/sniff OBJ;stand on OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;ask OBJ for OBJ;carry/hold/take OBJ off OBJ;change/switch/rotate/turn/twist/adjust/set OBJ to OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/place/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;give/offer/pay OBJ OBJ;give/offer/pay OBJ to OBJ;insert OBJ in/into OBJ;jam OBJ with OBJ;l/look at OBJ thru/with/using OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;mix/pour OBJ into/with/in/to/on/onto OBJ;place/put OBJ in/inside/into OBJ;place/put OBJ under OBJ;read OBJ in OBJ;read about OBJ in OBJ;read/check/describe/examine/watch/x OBJ thru/with/using OBJ;remove/get/carry/hold/take OBJ from OBJ;scrape OBJ off OBJ;scrape/get/carry/hold/take OBJ off with OBJ;scrape/get/carry/hold/take OBJ with OBJ;stun/tase/taze/zap/attack/break/crack/destroy/fight/hit/punch/smash/wreck OBJ with OBJ;tell OBJ about OBJ;unlock/open/uncover/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
zenon = {
"name": "zenon",
"rom": "zenon.z5",
"seed" : 0,
"walkthrough" : "x bed/look under bed/x key/x gauge/x switch/open guage/push button/push switch/wait/wait/wait/x guard/unlock door with key/open door/s/e/s/x box/open box/take grenade/x grenade/n/w/w/w/s/x panels/x cupboard/open cupboard/pull pin/put grenade in cupboard/wait/wait/x electronics/n/n/x camera/n/d/n/n/x sign/x male/x female/n/ask male about female/ask male about work/ask male about officer/ask male about invitation/ask male about daddy/ask female about male/ask female about bar/e/x sign/x barman/x juke/ask barman about job/x form/w/give form to female/e/give form to barman/w/w/x copier/x table/x officer/ask officer about convention/x invite/open copier/put invite in copier/push button/x copy/e/give copy to male/n/n/e/x button/push button/w/open door/n",
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/hold/take inventory;damn/fuck/shit/sod;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;get out/off/up;hear/listen;help;hop/jump/skip;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;leave/go/run/walk;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script/transcrip;script/transcrip off;script/transcrip on;short/superbrie;sing;smell/sniff;sorry;stand up;think;verify;version;wait/z;wave;y/yes;adjust/set OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;blow OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/hold/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale up/over OBJ;close/cover/shut OBJ;close/cover/shut up OBJ;cross/enter/go/run/walk OBJ;damn/fuck/shit/sod OBJ;dig OBJ;discard/drop/throw OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;drag/pull OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;feel/fondle/grope/touch OBJ;fill OBJ;get in/into/on/onto OBJ;get off OBJ;get/carry/hold/take OBJ;hear/listen OBJ;hear/listen to OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look inside/in/into/through OBJ;l/look under OBJ;leave OBJ;leave/go/run/walk into/in/inside/through OBJ;lie/sit on top of OBJ;lie/sit on/in/inside OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;rotate/screw/turn/twist/unscrew OBJ;search OBJ;smell/sniff OBJ;squash/squeeze OBJ;stand on OBJ;swing OBJ;swing on OBJ;switch OBJ;switch/rotate/screw/turn/twist/unscrew OBJ off;switch/rotate/screw/turn/twist/unscrew OBJ on;switch/rotate/screw/turn/twist/unscrew on OBJ;switch/rotate/screw/turn/twist/unscrew/close/cover/shut off OBJ;taste OBJ;wave OBJ;adjust/set OBJ to OBJ;answer/say/shout/speak OBJ to OBJ;ask OBJ about OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;burn/light OBJ with OBJ;carry/hold/take OBJ off OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;consult OBJ about OBJ;consult OBJ on OBJ;dig OBJ with OBJ;discard/drop/throw OBJ at/against/on/onto OBJ;discard/drop/throw OBJ in/into/down OBJ;discard/drop/throw/put OBJ on/onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ to/into/on/onto OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;insert OBJ in/into OBJ;l/look up OBJ in OBJ;lock OBJ with OBJ;put OBJ in/inside/into OBJ;read OBJ in OBJ;read about OBJ in OBJ;remove/get/carry/hold/take OBJ from OBJ;tell OBJ about OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
zork1 = {
"name": "zork1",
"rom": "zork1.z5",
"seed": 12,
"walkthrough": "N/N/U/Get egg/D/S/E/Open window/W/Open sack/Get garlic/W/Get lamp/E/U/Light lamp/Get rope/Get knife/D/Douse lamp/W/Get sword/Move rug/Open trapdoor/D/Light lamp/N/Kill troll with sword/drop egg/E/E/Se/E/Tie rope to railing/D/Douse lamp/Get torch/D/S/Drop sword/Get candles/Douse candles/Get book/N/Get bell/E/Open coffin/Get sceptre/W/S/Pray/E/S/E/W/W/Read book/Drop all/get torch/get lamp/Open trapdoor/D/N/E/E/Se/E/D/D/D/Get coffin/U/S/Pray/E/S/E/W/W/Open case/Put coffin in case/Get book/Get bell/Get candles/D/S/E/Get painting/W/N/N/E/E/E/Echo/Get bar/U/E/N/Drop painting/Get matchbook/S/S/D/W/S/S/E/D/Ring bell/Get candles/Light match/Light candles with match/Read prayer/Drop matchbook/Drop candles/Drop book/S/get skull/N/U/N/N/N/W/W/S/U/Put skull in case/Put bar in case/D/N/W/W/W/U/Get knife/get bag/Sw/E/S/Se/Odysseus/E/E/Put bag in case/Drop rusty knife/D/N/E/E/N/Ne/E/D/Get pile/U/N/N/Get screwdriver/Get wrench/Press red button/Press yellow button/S/S/Turn bolt with wrench/Drop wrench/W/W/E/Sw/S/S/W/W/S/U/Drop pile/Drop screwdriver/D/N/E/E/S/S/Touch mirror/E/D/Get trident/S/Get pump/S/S/Sw/S/S/W/W/S/U/Put trident in case/Get sceptre/Get pile/D/N/E/E/E/E/E/Drop pile/Inflate pile/Drop pump/Get label/Read label/Drop label/Throw sceptre in boat/Enter boat/Launch/Get sceptre/Wait/Get buoy/Wait/Land/Get out of boat/N/Open buoy/Get emerald/Drop buoy/Get shovel/Ne/Dig sand/Dig sand/Dig sand/Dig sand/Drop shovel/Get scarab/Sw/S/S/Wave sceptre/W/W/Get pot/Sw/U/U/Nw/W/W/W/Put sceptre in case/Put pot in case/Put emerald in case/Put scarab in case/Get rusty knife/Get nasty knife/W/W/U/Kill thief with nasty knife/kill thief with nasty knife/kill thief with nasty knife/attack thief with nasty knife/Get painting/Get egg/Drop rusty knife/Drop nasty knife/Get chalice/D/E/E/Put painting in case/Get canary/Put chalice in case/Put egg in case/E/E/N/N/E/Wind canary/Get bauble/W/S/E/W/W/Put canary in case/Put bauble in case/Get screwdriver/Get garlic/D/N/E/E/S/S/Touch mirror/N/W/N/W/N/E/Put torch in basket/Put screwdriver in basket/Light lamp/N/D/E/Ne/Se/Sw/D/D/S/Get coal/N/U/U/N/E/S/N/U/S/Put coal in basket/Lower basket/N/D/E/Ne/Se/Sw/D/D/W/Drop all/W/Get all from basket/S/Open lid/Put coal in machine/Close lid/Turn switch with screwdriver/Open lid/Get diamond/N/Put diamond in basket/Put torch in basket/Put screwdriver in basket/E/Get lamp/Get garlic/E/U/U/N/E/S/N/Get bracelet/U/S/Raise basket/Get all from basket/W/Get figurine/S/E/S/D/Drop garlic/U/Put diamond in case/Put torch in case/Put bracelet in case/Put figurine in case/W/W/U/take trunk/D/E/E/PUT trunk in case/Look/Get map/Examine map/E/E/N/W/Sw/W",
"minimal_actions": "Ulysses/wait/pray/inventory/go down/go east/go north/go south/go west/go southeast/go southwest/go northeast/go northwest/launch/leave boat/cross rainbow/drop all/take all/enter house/go up/go up chimney/climb down/climb tree/close lid/dig in sand/drop book/drop buoy/drop coffin/drop garlic/drop knife/drop leaflet/drop pump/drop screwdriver/drop shovel/drop stiletto/drop sword/drop torch/drop wrench/echo/give egg to thief/go in boat/inflate plastic with pump/kill thief with knife/kill troll with sword/light candles with match/light match/look in basket/lower basket/move rug/open bag/open buoy/open case/open coffin/open grate/open lid/open mailbox/open trap door/open window/push yellow button/put bar in case/put bauble in case/put bracelet in case/put canary in case/put chalice in case/put coal in basket/put coal in machine/put coffin in case/put coins in case/put diamond in basket/put diamond in case/put egg in case/put emerald in case/put gold in case/put jade in case/put jewels in case/put painting in case/put scarab in case/put sceptre in case/put screwdriver in basket/put skull in case/put torch in basket/put trident in case/raise basket/read book/read leaflet/ring bell/rub mirror/sand/take bar/take bauble/take bell/take book/take bracelet/take buoy/take canary from egg/take candles/take chalice/take coal/take coffin/take coins/take diamond/take egg/take emerald/take garlic/take gold/take jade/take key/take knife/take lamp/take matches/take painting/take pump/take rope/take scarab/take sceptre/take screwdriver/take shovel/take skull/take sword/take torch/take trident/take trunk/take wrench/tie rope to railing/turn bolt with wrench/turn off lamp/turn on lamp/turn switch with screwdriver/unlock grate/wave sceptre/wind up canary",
"grammar" : "again/g;answer/reply;back;barf/chomp/lose;bathe/swim/wade;blast;brief;bug;chant/incant;chase/come/follow/pursue;climb/sit;climb/sit;curse/damn/fuck/shit;diagno;dive/jump/leap;echo;enter;exit;froboz;gaze/l/look/stare;hello/hi;hop/skip;i/invent;leave;mumble/sigh;odysse/ulysse;plugh/xyzzy;pray;q/quit;repent;restar;restor;save;say;score;scream/shout/yell;script;stand;stay;super/superb;temple/treasu;unscri;verbos;versio;wait/z;win/winnag;wish;zork;activa OBJ;answer/reply OBJ;ask/tell OBJ;awake/startl/surpri/wake OBJ;awake/startl/surpri/wake up OBJ;banish/begone/cast/drive/exorci OBJ;banish/begone/cast/drive/exorci away OBJ;banish/begone/cast/drive/exorci out OBJ;bathe/swim/wade in OBJ;bite/consum/eat/taste OBJ;block/break/damage/destro/smash in OBJ;blow in OBJ;blow out OBJ;blow up OBJ;board OBJ;brandi/wave OBJ;brush/clean OBJ;carry/catch/get/grab/hold/remove/take OBJ;carry/catch/get/grab/hold/remove/take out OBJ;chase/come/follow/pursue OBJ;climb/sit OBJ;climb/sit/carry/catch/get/grab/hold/remove/take in OBJ;climb/sit/carry/catch/get/grab/hold/remove/take on OBJ;climb/sit/go/procee/run/step/walk down OBJ;climb/sit/go/procee/run/step/walk up OBJ;climb/sit/go/procee/run/step/walk with OBJ;close OBJ;comman OBJ;count OBJ;cross/ford OBJ;curse/damn/fuck/shit OBJ;deflat OBJ;descri/examin/what/whats OBJ;descri/examin/what/whats on OBJ;descri/examin/what/whats/gaze/l/look/stare in OBJ;disemb OBJ;disenc OBJ;dive/jump/leap across OBJ;dive/jump/leap from OBJ;dive/jump/leap in OBJ;dive/jump/leap off OBJ;dive/jump/leap/go/procee/run/step/walk over OBJ;douse/exting OBJ;drink/imbibe/swallo OBJ;drink/imbibe/swallo from OBJ;drop/releas OBJ;enchan OBJ;enter OBJ;exit OBJ;feel/pat/pet/rub/touch OBJ;fill OBJ;find/see/seek/where OBJ;flip/set/shut/turn off OBJ;flip/set/shut/turn on OBJ;free/unatta/unfast/unhook/untie OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare down OBJ;gaze/l/look/stare on OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare up OBJ;gaze/l/look/stare with OBJ;gaze/l/look/stare/search for OBJ;go/procee/run/step/walk OBJ;go/procee/run/step/walk around OBJ;go/procee/run/step/walk away OBJ;go/procee/run/step/walk in OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk to OBJ;hatch OBJ;hello/hi OBJ;hide/insert/place/put/stuff down OBJ;hide/insert/place/put/stuff on OBJ;hide/insert/place/put/stuff out OBJ;kick/taunt OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;launch OBJ;lean on OBJ;leave OBJ;lift/raise OBJ;lift/raise up OBJ;light OBJ;listen for OBJ;listen to OBJ;lower OBJ;make OBJ;molest/rape OBJ;move/roll/pull/tug/yank OBJ;open OBJ;open up OBJ;peal/ring OBJ;pick OBJ;pick up OBJ;play OBJ;pour/spill OBJ;press/push OBJ;press/push on OBJ;pull/tug/yank on OBJ;pump up OBJ;read/skim OBJ;read/skim from OBJ;roll/pull/tug/yank up OBJ;search OBJ;search in OBJ;send for OBJ;shake OBJ;smell/sniff OBJ;spin OBJ;squeez OBJ;stab OBJ;stand/carry/catch/get/grab/hold/remove/take up OBJ;strike OBJ;swing/thrust OBJ;talk to OBJ;wear OBJ;wind OBJ;wind up OBJ;apply OBJ to OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie up OBJ with OBJ;attack/fight/hit/hurt/injure/dispat/kill/murder/slay/stab OBJ with OBJ;block/break/damage/destro/smash OBJ with OBJ;block/break/damage/destro/smash down OBJ with OBJ;blow up OBJ with OBJ;brandi/wave OBJ at OBJ;brush/clean OBJ with OBJ;burn/ignite/incine OBJ with OBJ;burn/ignite/incine down OBJ with OBJ;carry/catch/get/grab/hold/remove/take OBJ from OBJ;carry/catch/get/grab/hold/remove/take OBJ off OBJ;carry/catch/get/grab/hold/remove/take OBJ out OBJ;chuck/hurl/throw/toss OBJ OBJ;chuck/hurl/throw/toss OBJ at OBJ;chuck/hurl/throw/toss OBJ in OBJ;chuck/hurl/throw/toss OBJ off OBJ;chuck/hurl/throw/toss OBJ on OBJ;chuck/hurl/throw/toss OBJ over OBJ;chuck/hurl/throw/toss OBJ with OBJ;cut/pierce/slice OBJ with OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;donate/feed/give/offer/hand OBJ OBJ;donate/feed/give/offer/hand OBJ to OBJ;drop/releas OBJ down OBJ;drop/releas/hide/insert/place/put/stuff OBJ on OBJ;drop/releas/move/hide/insert/place/put/stuff OBJ in OBJ;feel/pat/pet/rub/touch OBJ with OBJ;fill OBJ with OBJ;fix/glue/patch/plug/repair OBJ with OBJ;flip/set/shut/turn OBJ for OBJ;flip/set/shut/turn OBJ to OBJ;flip/set/shut/turn OBJ with OBJ;flip/set/shut/turn on OBJ with OBJ;free/unatta/unfast/unhook/untie OBJ from OBJ;gaze/l/look/stare at OBJ with OBJ;grease/lubric/oil OBJ with OBJ;hide/insert/place/put/stuff OBJ behind OBJ;hide/insert/place/put/stuff OBJ under OBJ;inflat OBJ with OBJ;is OBJ in OBJ;is OBJ on OBJ;light OBJ with OBJ;liquif/melt OBJ with OBJ;lock OBJ with OBJ;move/press/push OBJ with OBJ;open OBJ with OBJ;peal/ring OBJ with OBJ;pick OBJ with OBJ;poke/punctu OBJ with OBJ;pour/spill OBJ from OBJ;pour/spill OBJ in OBJ;pour/spill OBJ on OBJ;press/push/slide OBJ OBJ;press/push/slide OBJ to OBJ;press/push/slide OBJ under OBJ;pump up OBJ with OBJ;read/skim OBJ OBJ;read/skim OBJ with OBJ;spray OBJ on OBJ;spray OBJ with OBJ;squeez OBJ on OBJ;strike OBJ with OBJ;swing/thrust OBJ at OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
zork2 = {
"name" : "zork2",
"rom": "zork2.z5",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "GET LAMP AND SWORD/S/S/S/S/LIGHT LAMP/SE/ENTER GAZEBO/GET TEAPOT, MAT, LETTER OPENER, MATCHBOOK AND PAPER/OUT/S/W/SW/N/W/N/W/N/W/N/N/FILL POT WITH WATER/N/U/OPEN LID/PUT MAT UNDER DOOR/PUT OPENER IN KEYHOLE/REMOVE OPENER AND DROP IT/PULL MAT/GET KEY/UNLOCK DOOR WITH KEY/OPEN DOOR/DROP KEY/N/GET BLUE SPHERE/S/D/W/N/HIT DRAGON WITH SWORD/S/HIT DRAGON WITH SWORD/S/HIT DRAGON WITH SWORD/W/DROP SWORD/E/N/N/N/OPEN CHEST/OPEN CHEST/GET DRAGON/S/E/E/D/S/WAIT/WAIT/E/E/N/IN/WAIT/WAIT/OUT/S/S/W/SE/S/SE/N/SE/N/SE/SE/SE/ANSWER \"A WELL\"/E/E/DROP DRAGON AND PAPER/GET IN BUCKET/DROP WATER/GET OUT OF BUCKET/E/DROP TEAPOT/GET RED, BLUE AND GREEN CAKE/EAT GREEN/E/PUT RED CAKE IN POOL/GET CANDY/W/EAT BLUE CAKE/NW/ROBOT, EAST AND SOUTH/E/S/GET RED SPHERE/ROBOT, LIFT CAGE/GET RED SPHERE/ROBOT, NORTH AND PRESS TRIANGULAR BUTTON/N/W/SE/W/SE/W/SE/GET TEAPOT/W/GET IN BUCKET/FILL TEAPOT/GET OUT OF BUCKET/W/W/NW/DROP TEAPOT, ROSE, CANDY AND GOLD KEY/SE/E/E/GET DRAGON AND PAPER/W/GET NECKLACE/W/NW/OPEN BOX/DROP NECKLACE AND DRAGON/N/GET BRICK/S/SW/GET STRING/NE/NW/W/W/GET RUBY/S/OPEN RECEPTACLE/PUT PAPER IN RECEPTACLE/GET IN BASKET/STRIKE MATCH/BURN PAPER/WAIT/WAIT/W/TIE WIRE TO HOOK/GET OUT OF BASKET/S/READ WHITE BOOK/DROP IT/OPEN PURPLE BOOK/GET STAMP/N/PUT STRING IN BRICK/GET ZORKMID/GET IN BASKET/UNTIE WIRE/WAIT/WAIT/WAIT/WAIT/DROP ALL/W/TIE WIRE TO HOOK/WAIT/WAIT/WAIT/WAIT/TAKE ALL/GET OUT OF BASKET/S/PUT BRICK IN HOLE/LIGHT MATCH/BURN STRING/N/S/GET CROWN/EXAMINE CARD/N/GET IN BASKET/CLOSE RECEPTACLE/UNTIE WIRE/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/GET OUT OF BASKET/N/E/E/SE/DROP CROWN, STAMP, ZORKMID AND RUBY/NW/N/N/W/W/NW/W/S/GET PORTRAIT/N/GO THROUGH CURTAIN/GO THROUGH SOUTH WALL/GO THROUGH CURTAIN/GET BILLS/GO THROUGH NORTH WALL/DROP PORTRAIT AND BILLS/W/W/GET PORTRAIT AND BILLS/GO THROUGH CURTAIN/S/E/E/S/S/SE/S/S/D/SW/GET CLUB/SE/NE/NW/SW/S/U/N/N/DROP MATCHBOOK/GET CANDY AND GOLD KEY/WAIT/WAIT/WAIT/SW/SW/GIVE CANDY TO LIZARD/UNLOCK DOOR WITH KEY/OPEN DOOR/S/W/PUT BLUE SPHERE IN SAPPHIRE STAND/PUT RED SPHERE IN RUBY STAND/W/TURN OFF LAMP/THROW CLUB AT AQUARIUM/GET CLEAR SPHERE/E/PUT CLEAR SPHERE IN DIAMOND STAND/GET SPHERE/S/PUT SPHERE ON PENTAGRAM/GIVE GOLD KEY, PORTRAIT AND BILLS TO DEMON/N/E/TURN ON LAMP/N/N/NE/GET VIOLIN, NECKLACE, RUBY, ZORKMID, STAMP, CROWN AND DRAGON/SW/SW/S/W/S/GIVE VIOLIN, NECKLACE, RUBY, STAMP, ZORKMID, CROWN AND DRAGON TO DEMON/DEMON, GIVE ME WAND/GET WAND/N/E/N/N/NE/S/POINT WAND AT MENHIR/SAY \"FLOAT\"/SW/GET COLLAR/NE/S/D/D/PUT COLLAR ON DOG/E/OPEN DOOR/S/TURN OFF LAMP/OPEN SECRET DOOR/S",
"grammar" : "again/g;answer/reply;back;barf/chomp/lose;bathe/swim/wade;blast;brief;bug;chant/incant;chase/come/follow/pursue;climb/sit;curse/damn/fuck/shit;debug;diagno;dive/jump/leap;echo;enter;exit;froboz;gaze/l/look/stare;hello/hi;hop/skip;i/invent;leave;mumble/sigh;odysse/ulysse;plugh/xyzzy;pray;q/quit;repent;restar;restor;save;say;score;scream/shout/yell;script;stand;stay;super/superb;temple/treasu;unscri;verbos;versio;wait/z;win/winnag;wish;zork;activa OBJ;aim/brandi/point/wave OBJ;aim/brandi/point/wave at OBJ;answer/reply OBJ;ask/tell OBJ;attack/fight/hit/hurt/injure/dispat/kill/murder/slay OBJ;awake/startl/surpri/wake OBJ;awake/startl/surpri/wake up OBJ;banish/begone/cast/drive/exorci OBJ;banish/begone/cast/drive/exorci away OBJ;banish/begone/cast/drive/exorci out OBJ;bathe/swim/wade in OBJ;bite/consum/eat/taste OBJ;block/break/damage/destro/smash in OBJ;blow in OBJ;blow out OBJ;blow up OBJ;board OBJ;brush/clean OBJ;carry/catch/get/grab/hold/remove/take OBJ;carry/catch/get/grab/hold/remove/take out OBJ;chase/come/follow/pursue OBJ;climb/sit OBJ;climb/sit/carry/catch/get/grab/hold/remove/take in OBJ;climb/sit/carry/catch/get/grab/hold/remove/take on OBJ;climb/sit/go/procee/run/step/walk down OBJ;climb/sit/go/procee/run/step/walk up OBJ;climb/sit/go/procee/run/step/walk with OBJ;close OBJ;comman OBJ;count OBJ;cross/ford OBJ;curse/damn/fuck/shit OBJ;deflat OBJ;descri/examin/what/whats OBJ;descri/examin/what/whats on OBJ;descri/examin/what/whats/gaze/l/look/stare in OBJ;dig in OBJ;disemb OBJ;disenc OBJ;dive/jump/leap across OBJ;dive/jump/leap from OBJ;dive/jump/leap in OBJ;dive/jump/leap off OBJ;dive/jump/leap/go/procee/run/step/walk over OBJ;douse/exting OBJ;drink/imbibe/swallo OBJ;drink/imbibe/swallo from OBJ;drop/releas OBJ;enchan OBJ;enter OBJ;exit OBJ;feel/pat/pet/rub/touch OBJ;fill OBJ;find/see/seek/where OBJ;flip/set/shut/turn off OBJ;flip/set/shut/turn on OBJ;free/unatta/unfast/unhook/untie OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare down OBJ;gaze/l/look/stare on OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare up OBJ;gaze/l/look/stare with OBJ;gaze/l/look/stare/search for OBJ;go/procee/run/step/walk OBJ;go/procee/run/step/walk around OBJ;go/procee/run/step/walk away OBJ;go/procee/run/step/walk in OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk to OBJ;hatch OBJ;hello/hi OBJ;hide/insert/place/put/stuff down OBJ;hide/insert/place/put/stuff on OBJ;hide/insert/place/put/stuff out OBJ;kick/taunt OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;launch OBJ;lean on OBJ;leave OBJ;lift/raise OBJ;lift/raise up OBJ;light OBJ;listen for OBJ;listen to OBJ;lower OBJ;make OBJ;molest/rape OBJ;move/pull/tug/yank OBJ;open OBJ;open up OBJ;peal/ring OBJ;pick OBJ;pick up OBJ;play OBJ;pour/spill OBJ;press/push OBJ;press/push on OBJ;pull/tug/yank on OBJ;pump up OBJ;read/skim OBJ;read/skim from OBJ;roll/pull/tug/yank up OBJ;search OBJ;search in OBJ;send for OBJ;shake OBJ;smell/sniff OBJ;spin OBJ;squeez OBJ;stab OBJ;stand/carry/catch/get/grab/hold/remove/take up OBJ;strike OBJ;swing/thrust OBJ;talk to OBJ;wear OBJ;wind OBJ;wind up OBJ;aim/brandi/point/wave OBJ at OBJ;apply OBJ to OBJ;ask/tell OBJ about OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie up OBJ with OBJ;attack/fight/hit/hurt/injure/dispat/kill/murder/slay/stab OBJ with OBJ;block/break/damage/destro/smash OBJ with OBJ;block/break/damage/destro/smash down OBJ with OBJ;blow up OBJ with OBJ;brush/clean OBJ with OBJ;burn/ignite/incine OBJ with OBJ;burn/ignite/incine down OBJ with OBJ;carry/catch/get/grab/hold/remove/take OBJ from OBJ;carry/catch/get/grab/hold/remove/take OBJ off OBJ;carry/catch/get/grab/hold/remove/take OBJ out OBJ;chuck/hurl/throw/toss OBJ OBJ;chuck/hurl/throw/toss OBJ at OBJ;chuck/hurl/throw/toss OBJ in OBJ;chuck/hurl/throw/toss OBJ off OBJ;chuck/hurl/throw/toss OBJ on OBJ;chuck/hurl/throw/toss OBJ over OBJ;chuck/hurl/throw/toss OBJ with OBJ;cut/pierce/slice OBJ with OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;donate/feed/give/offer/hand OBJ OBJ;donate/feed/give/offer/hand OBJ to OBJ;drop/releas OBJ down OBJ;drop/releas/hide/insert/place/put/stuff OBJ on OBJ;drop/releas/move/hide/insert/place/put/stuff OBJ in OBJ;feel/pat/pet/rub/touch OBJ with OBJ;fill OBJ with OBJ;fix/glue/patch/plug/repair OBJ with OBJ;flip/set/shut/turn OBJ for OBJ;flip/set/shut/turn OBJ to OBJ;flip/set/shut/turn OBJ with OBJ;flip/set/shut/turn on OBJ with OBJ;free/unatta/unfast/unhook/untie OBJ from OBJ;gaze/l/look/stare at OBJ with OBJ;grease/lubric/oil OBJ with OBJ;hide/insert/place/put/stuff OBJ behind OBJ;hide/insert/place/put/stuff OBJ under OBJ;inflat OBJ with OBJ;is OBJ in OBJ;is OBJ on OBJ;light OBJ with OBJ;liquif/melt OBJ with OBJ;lock OBJ with OBJ;move/press/push OBJ with OBJ;open OBJ with OBJ;peal/ring OBJ with OBJ;pick OBJ with OBJ;poke/punctu OBJ with OBJ;pour/spill OBJ from OBJ;pour/spill OBJ in OBJ;pour/spill OBJ on OBJ;press/push/slide OBJ OBJ;press/push/slide OBJ to OBJ;press/push/slide OBJ under OBJ;pump up OBJ with OBJ;read/skim OBJ OBJ;read/skim OBJ with OBJ;spray OBJ on OBJ;spray OBJ with OBJ;squeez OBJ on OBJ;strike OBJ with OBJ;swing/thrust OBJ at OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
zork3 = {
"name" : "zork3",
"rom": "zork3.z5",
"seed" : 0,
# Walkthrough adapted from http://mirror.ifarchive.org/if-archive/solutions/jgunness.zip
"walkthrough": "GET LAMP/S/LIGHT LAMP/W/W/GET BREAD/E/E/E/NE/SE/W/NE/WAKE UP OLD MAN/GIVE BREAD TO OLD MAN/SW/W/S/S/S/TURN OFF LAMP/DROP LAMP/JUMP LAKE/D/GET AMULET/GET AMULET/W/S/GET TORCH/WAIT/WAIT/WAIT/TOUCH TABLE/GET CAN/WAIT/WAIT/TOUCH TABLE/DROP TORCH/WAIT/N/JUMP LAKE/D/GET CAN/GET CAN/S/S/SPRAY REPELLANT ON MYSELF/S/E/GET KEY/MOVE COVER/D/N/N/N/GET TORCH/W/W/W/D/WAIT/WAIT/TIE CHEST TO ROPE/WAIT/WAIT/WAIT/WAIT/GRAB ROPE/GET CHEST/D/D/S/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/HELLO SAILOR/GET VIAL/E/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/WAIT/KILL FIGURE WITH SWORD/KILL FIGURE WITH SWORD/KILL FIGURE WITH SWORD/KILL FIGURE WITH SWORD/KILL FIGURE WITH SWORD/KILL FIGURE WITH SWORD/REMOVE HOOD/DROP SWORD/GET CLOAK/NE/E/E/N/E/NE/OPEN DOOR/N/N/DROP CHEST/S/S/SW/W/S/E/E/S/S/E/N/PUSH GOLDEN MACHINE SOUTH/OPEN STONE DOOR/PUSH GOLDEN MACHINE EAST/EXAMINE MACHINE/READ PLAQUE/GET IN MACHINE/SET DIAL TO 776/PRESS BUTTON/WAIT/WAIT/WAIT/WAIT/WAIT/GET RING/OPEN DOOR/W/OPEN WOODEN DOOR/N/LIFT SEAT/HIDE RING UNDER SEAT/GET IN GOLDEN MACHINE/SET DIAL TO 948/PRESS BUTTON/GET OUT OF GOLDEN MACHINE/LIFT SEAT/OPEN WOODEN DOOR/S/OPEN STONE DOOR/E/GET ALL/W/S/D/PRESS SOUTH WALL/E/S/E/E/PRESS SOUTH WALL/GET BOOK/PRESS SOUTH WALL/PRESS WEST WALL/AGAIN/E/E/N/N/N/N/PRESS EAST WALL/W/S/S/S/S/E/E/N/N/N/PRESS WEST WALL/N/W/PRESS SOUTH WALL/E/E/S/S/S/W/W/N/PRESS EAST WALL/W/W/W/N/N/W/N/PRESS EAST WALL/AGAIN/AGAIN/S/PRESS SOUTH WALL/N/E/E/S/PRESS SOUTH WALL/W/PRESS WEST WALL/AGAIN/S/W/PRESS NORTH WALL/AGAIN/AGAIN/W/N/U/N/W/N/N/W/W/N/E/NE/N/PRESS BUTTON/N/N/N/RAISE SHORT POLE/PRESS WHITE PANEL/AGAIN/LOWER SHORT POLE/PUSH PINE PANEL/N/OPEN VIAL/DRINK LIQUID/N/N/N/KNOCK ON DOOR/N/E/N/N/READ BOOK/TURN DIAL TO 4/PRESS BUTTON/DUNGEON MASTER, WAIT/S/OPEN CELL DOOR/S/DUNGEON MASTER, TURN DIAL TO 8 AND PRESS BUTTON/UNLOCK BRONZE DOOR WITH KEY/OPEN IT/S",
"grammar" : "again/g;answer/reply;back;barf/chomp/lose;bathe/swim/wade;blast;brief;bug;chant/incant;chase/come/follow/pursue;curse/damn;diagno;dive/jump/leap;enter;exit;gaze/l/look/stare;hello/hi;hop/skip;i/invent;leave;mumble/sigh;plugh/xyzzy;pray;q/quit;repent;restar;restor;save;say/talk;score;scream/shout/yell;script;stand;stay;super/superb;unscri;verbos;versio;wait/z;win/winnag;zork;answer/reply OBJ;ask/tell OBJ;awake/startl/surpri/wake OBJ;awake/startl/surpri/wake up OBJ;banish/begone/cast/drive/exorci OBJ;banish/begone/cast/drive/exorci away OBJ;banish/begone/cast/drive/exorci out OBJ;bathe/swim/wade in OBJ;bite/kick/taunt OBJ;blow out OBJ;blow up OBJ;board OBJ;brandi/wave OBJ;carry/get/grab/hold/remove/take OBJ;carry/get/grab/hold/remove/take out OBJ;chase/come/follow/pursue OBJ;climb/sit OBJ;climb/sit/carry/get/grab/hold/remove/take in OBJ;climb/sit/carry/get/grab/hold/remove/take on OBJ;climb/sit/go/procee/run/step/walk down OBJ;climb/sit/go/procee/run/step/walk up OBJ;close OBJ;consum/eat/taste OBJ;count OBJ;cross/ford OBJ;curse/damn OBJ;deflat OBJ;descri/examin/what/whats OBJ;descri/examin/what/whats on OBJ;descri/examin/what/whats/gaze/l/look/stare in OBJ;dig in OBJ;disemb OBJ;dive/jump/leap across OBJ;dive/jump/leap from OBJ;dive/jump/leap in OBJ;dive/jump/leap off OBJ;dive/jump/leap/go/procee/run/step/walk over OBJ;douse/exting OBJ;drink/imbibe/swallo OBJ;drink/imbibe/swallo from OBJ;drop/releas OBJ;enter OBJ;exit OBJ;feel/pat/pet/rub/touch OBJ;fill OBJ;find/see/seek/where OBJ;flip/set/shut/turn OBJ;flip/set/shut/turn off OBJ;flip/set/shut/turn on OBJ;free/unatta/unfast/unhook/untie OBJ;gaze/l/look/stare around OBJ;gaze/l/look/stare at OBJ;gaze/l/look/stare behind OBJ;gaze/l/look/stare throug OBJ;gaze/l/look/stare under OBJ;gaze/l/look/stare with OBJ;gaze/l/look/stare/search for OBJ;go/procee/run/step/walk OBJ;go/procee/run/step/walk around OBJ;go/procee/run/step/walk in OBJ;go/procee/run/step/walk on OBJ;go/procee/run/step/walk to OBJ;go/procee/run/step/walk with OBJ;hello/hi OBJ;hide/insert/place/put/stuff down OBJ;hide/insert/place/put/stuff on OBJ;kiss OBJ;knock/rap at OBJ;knock/rap down OBJ;knock/rap on OBJ;launch OBJ;lean on OBJ;leave OBJ;lift/raise OBJ;lift/raise up OBJ;light OBJ;listen for OBJ;listen to OBJ;lower OBJ;make OBJ;molest/rape OBJ;move/pull/tug OBJ;open OBJ;open up OBJ;pick OBJ;pick up OBJ;play OBJ;pour/spill OBJ;press/push OBJ;press/push on OBJ;pull/tug on OBJ;pump up OBJ;read/skim OBJ;roll up OBJ;say/talk to OBJ;search OBJ;search in OBJ;send for OBJ;shake OBJ;slide OBJ;smell/sniff OBJ;spin OBJ;squeez OBJ;stand/carry/get/grab/hold/remove/take up OBJ;strike OBJ;swing/thrust OBJ;wear OBJ;wind OBJ;wind up OBJ;apply OBJ to OBJ;attach/fasten/secure/tie OBJ to OBJ;attach/fasten/secure/tie up OBJ with OBJ;attack/fight/hit/hurt/injure OBJ with OBJ;blind/jab/poke OBJ with OBJ;block/break/damage/destro/smash OBJ with OBJ;block/break/damage/destro/smash down OBJ with OBJ;blow up OBJ with OBJ;brandi/wave OBJ at OBJ;burn/ignite/incine OBJ with OBJ;burn/ignite/incine down OBJ with OBJ;carry/get/grab/hold/remove/take OBJ from OBJ;carry/get/grab/hold/remove/take OBJ off OBJ;carry/get/grab/hold/remove/take OBJ out OBJ;chuck/hurl/throw/toss OBJ at OBJ;chuck/hurl/throw/toss OBJ in OBJ;chuck/hurl/throw/toss OBJ off OBJ;chuck/hurl/throw/toss OBJ on OBJ;chuck/hurl/throw/toss OBJ over OBJ;chuck/hurl/throw/toss OBJ with OBJ;cut/pierce/slice OBJ with OBJ;dig OBJ with OBJ;dig in OBJ with OBJ;dispat/kill/murder/slay/stab OBJ with OBJ;donate/feed/give/hand/offer OBJ OBJ;donate/feed/give/hand/offer OBJ to OBJ;drop/releas OBJ down OBJ;drop/releas OBJ on OBJ;drop/releas/hide/insert/place/put/stuff OBJ in OBJ;feel/pat/pet/rub/touch OBJ with OBJ;fill OBJ with OBJ;fix/glue/patch/plug/repair OBJ with OBJ;flip/set/shut/turn OBJ for OBJ;flip/set/shut/turn OBJ to OBJ;flip/set/shut/turn OBJ with OBJ;free/unatta/unfast/unhook/untie OBJ from OBJ;gaze/l/look/stare at OBJ with OBJ;grease/lubric/oil OBJ with OBJ;hide/insert/place/put/stuff OBJ behind OBJ;hide/insert/place/put/stuff OBJ on OBJ;hide/insert/place/put/stuff OBJ under OBJ;inflat OBJ with OBJ;is OBJ in OBJ;is OBJ on OBJ;light OBJ with OBJ;liquif/melt OBJ with OBJ;lock OBJ with OBJ;open OBJ with OBJ;pick OBJ with OBJ;pour/spill OBJ from OBJ;pour/spill OBJ in OBJ;pour/spill OBJ on OBJ;press/push/slide OBJ OBJ;press/push/slide OBJ to OBJ;press/push/slide OBJ under OBJ;pump up OBJ with OBJ;read/skim OBJ with OBJ;spray OBJ on OBJ;spray OBJ with OBJ;squeez OBJ on OBJ;strike OBJ with OBJ;swing/thrust OBJ at OBJ;unlock OBJ with OBJ;",
"max_word_length" : 6
}
ztuu = {
"name" : "ztuu",
"rom": "ztuu.z5",
"walkthrough": 'ne/examine boulders/ne/n/nw/talk to man/take mask/n/wear fish/wear fur/wear razor/s/take rune/take gores/take implementeers/take zm$100000/se/drop lamp/n/take glasses/s/remove mask/remove suit/drop mask/drop suit/take lamp/s/e/e/take blue/take red/hit lamp/put blue in glasses/put red in glasses/east/open trunk/take coin/w/w/w/s/n/drop rune/s/drop gores/s/examine tree/examine ball/take tinsel/examine coin/tie tinsel to coin/n/e/put coin in slot/pull tinsel/put coin in slot/w/w/pry sixth rib with sword/sw/take can/ne/e/s/shake lamp/wear glasses/drop all/open lid/push button/examine symbols/take all/put flathead on circle/put tree on square/put house on triangle/put balloon on pentagon/drop zm$100000/se/drop multi/nw/n/n/sw/wait/sw',
"seed" : 0,
"grammar" : "awake/awaken/wake;awake/awaken/wake up;bother/curses/darn/drat;brief/normal;carry/get/take inventory;carry/get/take off;carry/get/take out;close/cover/shut up;damn/fuck/shit/sod;diagnose;die/q/quit;dive/swim;exit/out/outside/stand;full/fullscore;full/fullscore score;go/leave/run/walk;hear/listen;hello/hi;hello/hi sailor;help;hop/jump/skip;hum/sing;i/inv/inventory;i/inv/inventory tall;i/inv/inventory wide;in/inside/cross/enter;l/look;long/verbose;nap/sleep;no;noscript/unscript;notify off;notify on;nouns/pronouns;objects;places;pray;restart;restore;save;score;script;script off;script on;short/superbrie;smell/sniff;sorry;stand/carry/get/take up;think;time;verify;version;wait/z;wave;xyzzy;y/yes;zork;adjust/set OBJ;arrange/rearrange/sort/vandalize OBJ;attach/fasten/fix/tie OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck/kick/shake/stab OBJ;awake/awaken/wake OBJ;awake/awaken/wake OBJ up;awake/awaken/wake up OBJ;bend OBJ;blow out OBJ;bother/curses/darn/drat OBJ;burn/light OBJ;buy/purchase OBJ;carry/get/take OBJ;carry/get/take off OBJ;chop/cut/prune/slice OBJ;clean/dust/polish/rub/scrub/shine/sweep/wipe OBJ;clear/move/press/push/shift OBJ;climb/scale OBJ;climb/scale over OBJ;climb/scale up OBJ;climb/scale up OBJ;climb/scale/go/leave/run/walk/carry/get/take into OBJ;climb/scale/lie/sit/go/leave/run/walk/carry/get/take in OBJ;climb/scale/lie/sit/stand/carry/get/take on OBJ;close/cover/shut OBJ;consult OBJ;consult with OBJ;cross/enter/go/leave/run/walk OBJ;damn/fuck/shit/sod OBJ;detach/untie OBJ;dig OBJ;discard/drop OBJ;disrobe/doff/shed/remove OBJ;don/wear OBJ;douse/extinguis OBJ;drink/sip/swallow OBJ;eat OBJ;embrace/hug/kiss OBJ;empty OBJ;empty OBJ out;empty out OBJ;exit/out/outside OBJ;feel/fondle/grope/touch OBJ;fill OBJ;find/locate OBJ;go/leave/run/walk down OBJ;go/leave/run/walk through OBJ;hear/listen OBJ;hear/listen to OBJ;hold OBJ;hop/jump/skip over OBJ;l/look at OBJ;l/look in OBJ;l/look inside OBJ;l/look into OBJ;l/look through OBJ;l/look under OBJ;let OBJ go;let go OBJ;let go of OBJ;lick/taste OBJ;lie/sit/go/leave/run/walk inside OBJ;lift/raise OBJ;lower OBJ;open/uncover/undo/unwrap OBJ;peel OBJ;peel off OBJ;pick OBJ up;pick up OBJ;put OBJ down;put down OBJ;put on OBJ;read/check/describe/examine/watch/x OBJ;release OBJ;rotate/screw/turn/twist/unscrew OBJ;rotate/screw/turn/twist/unscrew OBJ off;rotate/screw/turn/twist/unscrew off OBJ;rotate/screw/turn/twist/unscrew/switch OBJ on;rotate/screw/turn/twist/unscrew/switch on OBJ;search OBJ;smell/sniff OBJ;speak/talk to OBJ;spray OBJ;squash/squeeze OBJ;swing OBJ;swing on OBJ;switch OBJ;switch OBJ off;switch/close/cover/shut off OBJ;throw OBJ;use OBJ;wave OBJ;yank/drag/pull OBJ;adjust/set OBJ to OBJ;ask OBJ for OBJ;attach/fasten/fix/tie OBJ to OBJ;attack/break/crack/destroy/fight/hit/kill/murder/punch/smash/thump/torture/wreck OBJ with OBJ;burn/light OBJ with OBJ;clear/move/press/push/shift OBJ OBJ;clear/move/press/push/shift/transfer OBJ to OBJ;detach/untie OBJ from OBJ;dig OBJ with OBJ;discard/drop OBJ down OBJ;discard/drop/insert/put OBJ in OBJ;discard/drop/insert/put OBJ into OBJ;discard/drop/put OBJ on OBJ;discard/drop/put OBJ onto OBJ;display/present/show OBJ OBJ;display/present/show OBJ to OBJ;empty OBJ into OBJ;empty OBJ on OBJ;empty OBJ onto OBJ;empty OBJ to OBJ;feed/give/offer/pay OBJ OBJ;feed/give/offer/pay OBJ to OBJ;feed/give/offer/pay over OBJ to OBJ;lock OBJ with OBJ;pry OBJ with OBJ;put OBJ inside OBJ;spray OBJ at OBJ;spray OBJ on OBJ;throw OBJ on OBJ;throw OBJ onto OBJ;throw/discard/drop OBJ against OBJ;throw/discard/drop OBJ at OBJ;unlock/open/uncover/undo/unwrap OBJ with OBJ;",
"max_word_length" : 9
}
| 417,373 | 752.382671 | 16,184 | py |
BBA_measures_classification | BBA_measures_classification-main/grad_opt.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: olympio
"""
import matplotlib.pyplot as plt
import numpy as np
import data_gen as dg
from aux_functions import *
import train_classifier as tc
from numpy.random import default_rng
rng = default_rng()
import tensorflow as tf
N1=500 #Number of points in class 1
N2=500 #Number of points in class 2
size_train = 100
size_test = 100
size_tot = size_train + size_test
data = []
Y=[]
for i in range(size_tot//2): #Sphere data
X=dg.gen_sphere(N1, 0, [6, 6])
intervals = dg.gen_dgm(X, [1])[0]
intervals = rot_diag(intervals)
data.append(intervals)
Y.append(0)
for i in range(size_tot//2): #Torus data
X=dg.gen_torus(N2, 0, [2, 2], [4, 4])
intervals = dg.gen_dgm(X, [1])[0]
intervals = rot_diag(intervals)
data.append(intervals)
Y.append(1)
print('data generated')
data=np.array(data)
Y=np.array(Y)
#Split train and test
data_train, data_test, Y_train, Y_test = dg.split_train_test(size_train, size_test, data, Y)
#%% Learning, distance to ball
N_centers = 1
C=tc.get_centers_all_dataset(data_train, N_centers)
scores_test = np.zeros(N_centers)
scores_train = np.zeros(N_centers)
#betainit = np.array([0.2, 3.5, 0.4, 1, 0.1, 0.1])
for c in range(N_centers):
mean_mass=0 #initialize parameters
center = C[c]
std = np.random.rand(1)
radius = np.random.rand(1)
for d in data_train:
mean_mass+=lap_dist_to_ball(d, center, std, radius)
mean_mass/=size_train #Normalize threshold parameter
thresh = np.random.rand(1)
betainit = np.concatenate([center, std, thresh, radius])
beta = tf.Variable(initial_value=np.array(betainit[:,np.newaxis], dtype=np.float64), trainable=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.02)
#Gradient descent
losses, betas = [], []
N_epochs = 40
for epoch in range(N_epochs): #number of epochs of SGD
with tf.GradientTape() as tape:
loss = 0
for k, d in enumerate(data_train):
p = sigmo (lap_dist_to_ball(d, tf.transpose(beta[:2]), beta[2], beta[2+2])/mean_mass - beta[2+1])
loss-=(Y_train[k]*tf.math.log(p)+(1-Y_train[k])*tf.math.log(1-p)) #cross-entropy loss
# Compute and apply gradients
gradients = tape.gradient(loss, [beta])
optimizer.apply_gradients(zip(gradients, [beta]))
losses.append(loss.numpy())
betas.append(beta.numpy())
beta_final=betas[-1]
score_test = tc.validation_dist_ball(beta_final, mean_mass, data_test, Y_test, 2)
score_train = tc.validation_dist_ball(beta_final, mean_mass, data_train, Y_train, 2)
scores_test[c] = score_test
scores_train[c] = score_train
print(scores_train)
print(scores_test)
#%%Visualization
plt.scatter(data[0][:,0], data[0][:,1], label = 'Persistence diagram of a sphere')
plt.scatter(data[-1][:,0], data[-1][:,1], label = 'Persistence diagram of a torus')
plt.scatter(C[:,0], C[:,1], label = 'K-means centers')
plt.scatter(beta_final[0], beta_final[1], color = 'r', label='center of ball')
plt.legend() | 3,182 | 32.505263 | 113 | py |
scicite | scicite-master/scicite/training/multitask_trainer_two_tasks.py | """
This module is an extended trainer based on the allennlp's default trainer to handle multitask training
for two auxiliary tasks
A :class:`~allennlp.training.trainer.Trainer` is responsible for training a
:class:`~allennlp.models.model.Model`.
Typically you might create a configuration file specifying the model and
training parameters and then use :mod:`~allennlp.commands.train`
rather than instantiating a ``Trainer`` yourself.
"""
# pylint: disable=too-many-lines
import logging
import os
import shutil
import time
import re
import datetime
import traceback
from typing import Dict, Optional, List, Tuple, Union, Iterable, Any, Set
import torch
import torch.optim.lr_scheduler
from torch.nn.parallel import replicate, parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from tensorboardX import SummaryWriter
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import peak_memory_mb, gpu_memory_mb, dump_metrics
from allennlp.common.tqdm import Tqdm
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.models.model import Model
from allennlp.nn import util
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def is_sparse(tensor):
return tensor.is_sparse
def sparse_clip_norm(parameters, max_norm, norm_type=2) -> float:
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Supports sparse gradients.
Parameters
----------
parameters : ``(Iterable[torch.Tensor])``
An iterable of Tensors that will have gradients normalized.
max_norm : ``float``
The max norm of the gradients.
norm_type : ``float``
The type of the used p-norm. Can be ``'inf'`` for infinity norm.
Returns
-------
Total norm of the parameters (viewed as a single vector).
"""
# pylint: disable=invalid-name,protected-access
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
if is_sparse(p.grad):
# need to coalesce the repeated indices before finding norm
grad = p.grad.data.coalesce()
param_norm = grad._values().norm(norm_type)
else:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
if is_sparse(p.grad):
p.grad.data._values().mul_(clip_coef)
else:
p.grad.data.mul_(clip_coef)
return total_norm
def move_optimizer_to_cuda(optimizer):
"""
Move the optimizer state to GPU, if necessary.
After calling, any parameter specific state in the optimizer
will be located on the same device as the parameter.
"""
for param_group in optimizer.param_groups:
for param in param_group['params']:
if param.is_cuda:
param_state = optimizer.state[param]
for k in param_state.keys():
if isinstance(param_state[k], torch.Tensor):
param_state[k] = param_state[k].cuda(device=param.get_device())
class TensorboardWriter:
"""
Wraps a pair of ``SummaryWriter`` instances but is a no-op if they're ``None``.
Allows Tensorboard logging without always checking for Nones first.
"""
def __init__(self, train_log: SummaryWriter = None, validation_log: SummaryWriter = None) -> None:
self._train_log = train_log
self._validation_log = validation_log
@staticmethod
def _item(value: Any):
if hasattr(value, 'item'):
val = value.item()
else:
val = value
return val
def add_train_scalar(self, name: str, value: float, global_step: int) -> None:
# get the scalar
if self._train_log is not None:
self._train_log.add_scalar(name, self._item(value), global_step)
def add_train_histogram(self, name: str, values: torch.Tensor, global_step: int) -> None:
if self._train_log is not None:
if isinstance(values, torch.Tensor):
values_to_write = values.cpu().data.numpy().flatten()
self._train_log.add_histogram(name, values_to_write, global_step)
def add_validation_scalar(self, name: str, value: float, global_step: int) -> None:
if self._validation_log is not None:
self._validation_log.add_scalar(name, self._item(value), global_step)
def time_to_str(timestamp: int) -> str:
"""
Convert seconds past Epoch to human readable string.
"""
datetimestamp = datetime.datetime.fromtimestamp(timestamp)
return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format(
datetimestamp.year, datetimestamp.month, datetimestamp.day,
datetimestamp.hour, datetimestamp.minute, datetimestamp.second
)
def str_to_time(time_str: str) -> datetime.datetime:
"""
Convert human readable string to datetime.datetime.
"""
pieces: Any = [int(piece) for piece in time_str.split('-')]
return datetime.datetime(*pieces)
class MultiTaskTrainer2:
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
train_dataset_aux: Iterable[Instance],
train_dataset_aux2: Optional[Iterable[Instance]],
mixing_ratio: float = 0.17,
mixing_ratio2: float = 0.17,
cutoff_epoch: int = -1,
validation_dataset: Optional[Iterable[Instance]] = None,
validation_dataset_aux: Optional[Iterable] = None,
validation_dataset_aux2: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
iterator_aux: Optional[DataIterator] = None,
iterator_aux2: Optional[DataIterator] = None) -> None:
"""
Parameters
----------
model : ``Model``, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their ``forward`` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
optimizer : ``torch.nn.Optimizer``, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
iterator : ``DataIterator``, required.
A method for iterating over a ``Dataset``, yielding padded indexed batches.
train_dataset : ``Dataset``, required.
A ``Dataset`` to train on. The dataset should have already been indexed.
train_dataset_aux : ``Dataset``, required.
A ``Dataset`` for auxiliary task 1 to train on.
train_dataset_aux2 : ``Dataset``, required.
A ``Dataset`` for second auxiliary task to train on. The dataset should have already been indexed.
mixing_ratio: a float specifying the influence of the first auxiliary task on the final loss
mixing_ratio2: a float specifying the influence of the second auxiliary task on the final loss
cutoff_epoch: multitask training starts from the epoch after the epoch specified by cutoff_epoch
validation_dataset : ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on. The dataset should have already been indexed.
validation_dataset_aux : a validation dataset for the first auxiliary task
validation_dataset_aux_2 : a validation dataset for the second auxiliary task
patience : Optional[int] > 0, optional (default=None)
Number of epochs to be patient before early stopping: the training is stopped
after ``patience`` epochs with no improvement. If given, it must be ``> 0``.
If None, early stopping is disabled.
validation_metric : str, optional (default="loss")
Validation metric to measure for whether to stop training using patience
and whether to serialize an ``is_best`` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_iterator : ``DataIterator``, optional (default=None)
An iterator to use for the validation set. If ``None``, then
use the training `iterator`.
shuffle: ``bool``, optional (default=True)
Whether to shuffle the instances in the iterator or not.
num_epochs : int, optional (default = 20)
Number of training epochs.
serialization_dir : str, optional (default=None)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
num_serialized_models_to_keep : ``int``, optional (default=20)
Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.
A value of None or -1 means all checkpoints will be kept.
keep_serialized_model_every_num_seconds : ``int``, optional (default=None)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
model_save_interval : ``float``, optional (default=None)
If provided, then serialize models every ``model_save_interval``
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if ``serialization_dir`` is provided.
cuda_device : ``int``, optional (default = -1)
An integer specifying the CUDA device to use. If -1, the CPU is used.
grad_norm : ``float``, optional, (default = None).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : ``float``, optional (default = ``None``).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting ``NaNs`` in your gradients during training
that are not solved by using ``grad_norm``, you may need this.
learning_rate_scheduler : ``PytorchLRScheduler``, optional, (default = None)
A Pytorch learning rate scheduler. The learning rate will be decayed with respect to
this schedule at the end of each epoch. If you use
:class:`torch.optim.lr_scheduler.ReduceLROnPlateau`, this will use the ``validation_metric``
provided to determine if learning has plateaued. To support updating the learning
rate on every batch, this can optionally implement ``step_batch(batch_num_total)`` which
updates the learning rate given the batch number.
summary_interval: ``int``, optional, (default = 100)
Number of batches between logging scalars to tensorboard
histogram_interval : ``int``, optional, (default = ``None``)
If not None, then log histograms to tensorboard every ``histogram_interval`` batches.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
``model.get_parameters_for_histogram_tensorboard_logging``.
The layer activations are logged for any modules in the ``Model`` that have
the attribute ``should_log_activations`` set to ``True``. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
should_log_parameter_statistics : ``bool``, optional, (default = True)
Whether to send parameter statistics (mean and standard deviation
of parameters and gradients) to tensorboard.
should_log_learning_rate : ``bool``, optional, (default = False)
Whether to send parameter specific learning rate to tensorboard.
iterator_aux : ``DataIterator``, required.
A method for iterating over a ``Dataset`` for the first auxiliary task, yielding padded indexed batches.
iterator_aux2 : ``DataIterator``, required.
A method for iterating over a ``Dataset`` for the second auxiliary task, yielding padded indexed batches.
"""
self._model = model
self._iterator = iterator
self._validation_iterator = validation_iterator
self._shuffle = shuffle
self._optimizer = optimizer
self._train_data = train_dataset
self._validation_data = validation_dataset
self._train_dataset_aux = train_dataset_aux
self._train_dataset_aux2 = train_dataset_aux2
self._validation_data_aux = validation_dataset_aux
self._validation_data_aux2 = validation_dataset_aux2
self._cutoff_epoch = cutoff_epoch
self._mixing_ratio = mixing_ratio
self._mixing_ratio2 = mixing_ratio2
self._iterator_aux = iterator_aux
self._iterator_aux2 = iterator_aux2
if patience is None: # no early stopping
if validation_dataset:
logger.warning('You provided a validation dataset but patience was set to None, '
'meaning that early stopping is disabled')
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError('{} is an invalid value for "patience": it must be a positive integer '
'or None (if you want to disable early stopping)'.format(patience))
self._patience = patience
self._num_epochs = num_epochs
self._serialization_dir = serialization_dir
self._num_serialized_models_to_keep = num_serialized_models_to_keep
self._keep_serialized_model_every_num_seconds = keep_serialized_model_every_num_seconds
self._serialized_paths: List[Any] = []
self._last_permanent_saved_checkpoint_time = time.time()
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
increase_or_decrease = validation_metric[0]
if increase_or_decrease not in ["+", "-"]:
raise ConfigurationError("Validation metrics must specify whether they should increase "
"or decrease by pre-pending the metric name with a +/-.")
self._validation_metric = validation_metric[1:]
self._validation_metric_decreases = increase_or_decrease == "-"
if not isinstance(cuda_device, int) and not isinstance(cuda_device, list):
raise ConfigurationError("Expected an int or list for cuda_device, got {}".format(cuda_device))
if isinstance(cuda_device, list):
logger.warning(f"Multiple GPU support is experimental not recommended for use. "
"In some cases it may lead to incorrect results or undefined behavior.")
self._multiple_gpu = True
self._cuda_devices = cuda_device
else:
self._multiple_gpu = False
self._cuda_devices = [cuda_device]
if self._cuda_devices[0] != -1:
self._model = self._model.cuda(self._cuda_devices[0])
self._cuda_device = self._cuda_devices[0]
self._log_interval = 10 # seconds
self._summary_interval = summary_interval
self._histogram_interval = histogram_interval
self._log_histograms_this_batch = False
self._should_log_parameter_statistics = should_log_parameter_statistics
self._should_log_learning_rate = should_log_learning_rate
# We keep the total batch number as a class variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._last_log = 0.0 # time of last logging
if serialization_dir is not None:
train_log = SummaryWriter(os.path.join(serialization_dir, "log", "train"))
validation_log = SummaryWriter(os.path.join(serialization_dir, "log", "validation"))
self._tensorboard = TensorboardWriter(train_log, validation_log)
else:
self._tensorboard = TensorboardWriter()
self._warned_tqdm_ignores_underscores = False
def _enable_gradient_clipping(self) -> None:
if self._grad_clipping is not None:
# Pylint is unable to tell that we're in the case that _grad_clipping is not None...
# pylint: disable=invalid-unary-operand-type
clip_function = lambda grad: grad.clamp(-self._grad_clipping, self._grad_clipping)
for parameter in self._model.parameters():
if parameter.requires_grad:
parameter.register_hook(clip_function)
def _enable_activation_logging(self) -> None:
"""
Log activations to tensorboard
"""
if self._histogram_interval is not None:
# To log activation histograms to the forward pass, we register
# a hook on forward to capture the output tensors.
# This uses a closure on self._log_histograms_this_batch to
# determine whether to send the activations to tensorboard,
# since we don't want them on every call.
for _, module in self._model.named_modules():
if not getattr(module, 'should_log_activations', False):
# skip it
continue
def hook(module_, inputs, outputs):
# pylint: disable=unused-argument,cell-var-from-loop
log_prefix = 'activation_histogram/{0}'.format(module_.__class__)
if self._log_histograms_this_batch:
if isinstance(outputs, torch.Tensor):
log_name = log_prefix
self._tensorboard.add_train_histogram(log_name,
outputs,
self._batch_num_total)
elif isinstance(outputs, (list, tuple)):
for i, output in enumerate(outputs):
log_name = "{0}_{1}".format(log_prefix, i)
self._tensorboard.add_train_histogram(log_name,
output,
self._batch_num_total)
elif isinstance(outputs, dict):
for k, tensor in outputs.items():
log_name = "{0}_{1}".format(log_prefix, k)
self._tensorboard.add_train_histogram(log_name,
tensor,
self._batch_num_total)
else:
# skip it
pass
module.register_forward_hook(hook)
def _rescale_gradients(self) -> Optional[float]:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
"""
if self._grad_norm:
parameters_to_clip = [p for p in self._model.parameters()
if p.grad is not None]
return sparse_clip_norm(parameters_to_clip, self._grad_norm)
return None
def _data_parallel(self, batch):
"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()}
def _batch_loss(self, batch: torch.Tensor,
for_training: bool,
batch_aux: torch.Tensor=None,
batch_aux2: torch.Tensor=None) -> torch.Tensor:
"""
Does a forward pass on the given batch and auxiliary data batches and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = self._data_parallel(batch)
if batch_aux is not None:
raise ConfigurationError('multi-gpu not supported for multi-task training.')
else:
batch = util.move_to_device(batch, self._cuda_devices[0])
output_dict = self._model(**batch)
try:
loss = output_dict["loss"]
if for_training:
loss += self._model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError("The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
loss = None
if batch_aux is not None and batch_aux2 is not None:
batch_aux = util.move_to_device(batch_aux, self._cuda_devices[0])
batch_aux2 = util.move_to_device(batch_aux2, self._cuda_devices[0])
output_dict_aux = self._model(**batch_aux)
output_dict_aux2 = self._model(**batch_aux2)
try:
loss_aux = output_dict_aux["loss"]
loss_aux2 = output_dict_aux2["loss"]
if for_training:
loss_aux += self._model.get_regularization_penalty()
loss_aux2 += self._model.get_regularization_penalty()
except KeyError:
raise ConfigurationError("The auxiliary model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
# multi-task loss
loss = loss + self._mixing_ratio * loss_aux + self._mixing_ratio2 * loss_aux2
return loss
def _get_metrics(self, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]:
"""
Gets the metrics but sets ``"loss"`` to
the total loss divided by the ``num_batches`` so that
the ``"loss"`` metric is "average loss per batch".
"""
metrics = self._model.get_metrics(reset=reset)
metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0
return metrics
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
logger.info(f"Peak CPU memory usage MB: {peak_memory_mb()}")
for gpu, memory in gpu_memory_mb().items():
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self._model.train()
# Get tqdm for the training batches
train_generator = self._iterator(self._train_data,
num_epochs=1,
shuffle=self._shuffle)
train_generator_aux = self._iterator_aux(self._train_dataset_aux,
num_epochs=1,
shuffle=self._shuffle)
train_generator_aux2 = self._iterator_aux2(self._train_dataset_aux2,
num_epochs=1,
shuffle=self._shuffle)
multitask_training = False
if epoch > self._cutoff_epoch:
multitask_training = True
logger.info("Multitask Training")
else:
logger.info("Training")
num_training_batches = self._iterator.get_num_batches(self._train_data)
num_training_batches_aux = self._iterator_aux.get_num_batches(self._train_dataset_aux)
num_training_batches_aux2 = self._iterator_aux2.get_num_batches(self._train_dataset_aux2)
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
if self._histogram_interval is not None:
histogram_parameters = set(self._model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
train_generator_tqdm = Tqdm.tqdm(train_generator,
total=num_training_batches)
# train_aux_generator_tqdm = Tqdm.tqdm(train_generator_aux,
# total=num_training_batches_aux)
for batch, batch_aux, batch_aux2 in zip(train_generator_tqdm, train_generator_aux, train_generator_aux2):
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self._log_histograms_this_batch = self._histogram_interval is not None and (
batch_num_total % self._histogram_interval == 0)
self._optimizer.zero_grad()
if multitask_training:
loss = self._batch_loss(batch,
for_training=True,
batch_aux=batch_aux,
batch_aux2=batch_aux2)
else:
loss = self._batch_loss(batch, for_training=True)
loss.backward()
train_loss += loss.item()
batch_grad_norm = self._rescale_gradients()
# This does nothing if batch_num_total is None or you are using an
# LRScheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._log_histograms_this_batch:
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {name: param.detach().cpu().clone()
for name, param in self._model.named_parameters()}
self._optimizer.step()
for name, param in self._model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1, ))
param_norm = torch.norm(param.view(-1, )).cpu()
self._tensorboard.add_train_scalar("gradient_update/" + name,
update_norm / (param_norm + 1e-7),
batch_num_total)
else:
self._optimizer.step()
# Update the description with the latest metrics
metrics = self._get_metrics(train_loss, batches_this_epoch)
description = self._description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if batch_num_total % self._summary_interval == 0:
if self._should_log_parameter_statistics:
self._parameter_and_gradient_statistics_to_tensorboard(batch_num_total, batch_grad_norm)
if self._should_log_learning_rate:
self._learning_rates_to_tensorboard(batch_num_total)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"], batch_num_total)
self._metrics_to_tensorboard(batch_num_total,
{"epoch_metrics/" + k: v for k, v in metrics.items()})
if self._log_histograms_this_batch:
self._histograms_to_tensorboard(batch_num_total, histogram_parameters)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
'{0}.{1}'.format(epoch, time_to_str(int(last_save_time))), [], is_best=False
)
return self._get_metrics(train_loss, batches_this_epoch, reset=True)
def _should_stop_early(self, metric_history: List[float]) -> bool:
"""
uses patience and the validation metric to determine if training should stop early
"""
if self._patience and self._patience < len(metric_history):
# Pylint can't figure out that in this branch `self._patience` is an int.
# pylint: disable=invalid-unary-operand-type
# Is the best score in the past N epochs worse than or equal the best score overall?
if self._validation_metric_decreases:
return min(metric_history[-self._patience:]) >= min(metric_history[:-self._patience])
else:
return max(metric_history[-self._patience:]) <= max(metric_history[:-self._patience])
return False
def _parameter_and_gradient_statistics_to_tensorboard(self, # pylint: disable=invalid-name
epoch: int,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
# Log parameter values to Tensorboard
for name, param in self._model.named_parameters():
self._tensorboard.add_train_scalar("parameter_mean/" + name,
param.data.mean(),
epoch)
self._tensorboard.add_train_scalar("parameter_std/" + name, param.data.std(), epoch)
if param.grad is not None:
if is_sparse(param.grad):
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self._tensorboard.add_train_scalar("gradient_mean/" + name,
grad_data.mean(),
epoch)
self._tensorboard.add_train_scalar("gradient_std/" + name,
grad_data.std(),
epoch)
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self._tensorboard.add_train_scalar("gradient_norm",
batch_grad_norm,
epoch)
def _learning_rates_to_tensorboard(self, batch_num_total: int):
"""
Send current parameter specific learning rates to tensorboard
"""
# optimizer stores lr info keyed by parameter tensor
# we want to log with parameter name
names = {param: name for name, param in self._model.named_parameters()}
for group in self._optimizer.param_groups:
if 'lr' not in group:
continue
rate = group['lr']
for param in group['params']:
# check whether params has requires grad or not
effective_rate = rate * float(param.requires_grad)
self._tensorboard.add_train_scalar(
"learning_rate/" + names[param],
effective_rate,
batch_num_total
)
def _histograms_to_tensorboard(self, epoch: int, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in self._model.named_parameters():
if name in histogram_parameters:
self._tensorboard.add_train_histogram("parameter_histogram/" + name,
param,
epoch)
def _metrics_to_tensorboard(self,
epoch: int,
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
"""
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
for name in metric_names:
train_metric = train_metrics.get(name)
if train_metric is not None:
self._tensorboard.add_train_scalar(name, train_metric, epoch)
val_metric = val_metrics.get(name)
if val_metric is not None:
self._tensorboard.add_validation_scalar(name, val_metric, epoch)
def _metrics_to_console(self, # pylint: disable=no-self-use
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Logs all of the train metrics (and validation metrics, if provided) to the console.
"""
val_metrics = val_metrics or {}
dual_message_template = "%s | %8.3f | %8.3f"
no_val_message_template = "%s | %8.3f | %8s"
no_train_message_template = "%s | %8s | %8.3f"
header_template = "%s | %-10s"
metric_names = set(train_metrics.keys())
if val_metrics:
metric_names.update(val_metrics.keys())
name_length = max([len(x) for x in metric_names])
logger.info(header_template, "Training".rjust(name_length + 13), "Validation")
for name in metric_names:
train_metric = train_metrics.get(name)
val_metric = val_metrics.get(name)
if val_metric is not None and train_metric is not None:
logger.info(dual_message_template, name.ljust(name_length), train_metric, val_metric)
elif val_metric is not None:
logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric)
elif train_metric is not None:
logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A")
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._model.eval()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self._iterator
val_generator = val_iterator(self._validation_data,
num_epochs=1,
shuffle=False)
num_validation_batches = val_iterator.get_num_batches(self._validation_data)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch in val_generator_tqdm:
loss = self._batch_loss(batch, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = self._get_metrics(val_loss, batches_this_epoch)
description = self._description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
return val_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
epoch_counter, validation_metric_per_epoch = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError("Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?")
self._enable_gradient_clipping()
self._enable_activation_logging()
logger.info("Beginning training.")
train_metrics: Dict[str, float] = {}
val_metrics: Dict[str, float] = {}
metrics: Dict[str, Any] = {}
epochs_trained = 0
training_start_time = time.time()
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
if self._validation_data is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, num_batches = self._validation_loss()
val_metrics = self._get_metrics(val_loss, num_batches, reset=True)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
# Check validation metric to see if it's the best so far
is_best_so_far = self._is_best_so_far(this_epoch_val_metric, validation_metric_per_epoch)
validation_metric_per_epoch.append(this_epoch_val_metric)
if self._should_stop_early(validation_metric_per_epoch):
logger.info("Ran out of patience. Stopping training.")
break
else:
# No validation set, so just assume it's the best so far.
is_best_so_far = True
val_metrics = {}
this_epoch_val_metric = None
self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)
self._metrics_to_console(train_metrics, val_metrics)
# Create overall metrics dict
training_elapsed_time = time.time() - training_start_time
metrics["training_duration"] = time.strftime("%H:%M:%S", time.gmtime(training_elapsed_time))
metrics["training_start_epoch"] = epoch_counter
metrics["training_epochs"] = epochs_trained
metrics["epoch"] = epoch
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if is_best_so_far:
# Update all the best_ metrics.
# (Otherwise they just stay the same as they were.)
metrics['best_epoch'] = epoch
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
if self._serialization_dir:
dump_metrics(os.path.join(self._serialization_dir, f'metrics_epoch_{epoch}.json'), metrics)
if self._learning_rate_scheduler:
# The LRScheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", time.strftime("%H:%M:%S", time.gmtime(epoch_elapsed_time)))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
return metrics
def _is_best_so_far(self,
this_epoch_val_metric: float,
validation_metric_per_epoch: List[float]):
if not validation_metric_per_epoch:
return True
elif self._validation_metric_decreases:
return this_epoch_val_metric < min(validation_metric_per_epoch)
else:
return this_epoch_val_metric > max(validation_metric_per_epoch)
def _description_from_metrics(self, metrics: Dict[str, float]) -> str:
if (not self._warned_tqdm_ignores_underscores and
any(metric_name.startswith("_") for metric_name in metrics)):
logger.warning("Metrics with names beginning with \"_\" will "
"not be logged to the tqdm progress bar.")
self._warned_tqdm_ignores_underscores = True
return ', '.join(["%s: %.4f" % (name, value) for name, value in
metrics.items() if not name.startswith("_")]) + " ||"
def _save_checkpoint(self,
epoch: Union[int, str],
val_metric_per_epoch: List[float],
is_best: Optional[bool] = None) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
is_best: bool, optional (default = None)
A flag which causes the model weights at the given epoch to
be copied to a "best.th" file. The value of this flag should
be based on some validation metric computed by your model.
"""
if self._serialization_dir is not None:
model_path = os.path.join(self._serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = self._model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': self._optimizer.state_dict(),
'batch_num_total': self._batch_num_total}
if self._learning_rate_scheduler is not None:
training_state["learning_rate_scheduler"] = \
self._learning_rate_scheduler.lr_scheduler.state_dict()
training_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
logger.info("Best validation performance so far. "
"Copying weights to '%s/best.th'.", self._serialization_dir)
shutil.copyfile(model_path, os.path.join(self._serialization_dir, "best.th"))
if self._num_serialized_models_to_keep and self._num_serialized_models_to_keep >= 0:
self._serialized_paths.append([time.time(), model_path, training_path])
if len(self._serialized_paths) > self._num_serialized_models_to_keep:
paths_to_remove = self._serialized_paths.pop(0)
# Check to see if we should keep this checkpoint, if it has been longer
# then self._keep_serialized_model_every_num_seconds since the last
# kept checkpoint.
remove_path = True
if self._keep_serialized_model_every_num_seconds is not None:
save_time = paths_to_remove[0]
time_since_checkpoint_kept = save_time - self._last_permanent_saved_checkpoint_time
if time_since_checkpoint_kept > self._keep_serialized_model_every_num_seconds:
# We want to keep this checkpoint.
remove_path = False
self._last_permanent_saved_checkpoint_time = save_time
if remove_path:
for fname in paths_to_remove[1:]:
os.remove(fname)
def find_latest_checkpoint(self) -> Tuple[str, str]:
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (self._serialization_dir is not None and
any("model_state_epoch_" in x for x in os.listdir(self._serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(self._serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
# pylint: disable=anomalous-backslash-in-string
re.search("model_state_epoch_([0-9\.\-]+)\.th", x).group(1)
for x in model_checkpoints
]
int_epochs: Any = []
for epoch in found_epochs:
pieces = epoch.split('.')
if len(pieces) == 1:
# Just a single epoch without timestamp
int_epochs.append([int(pieces[0]), 0])
else:
# has a timestamp
int_epochs.append([int(pieces[0]), pieces[1]])
last_epoch = sorted(int_epochs, reverse=True)[0]
if last_epoch[1] == 0:
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])
model_path = os.path.join(self._serialization_dir,
"model_state_epoch_{}.th".format(epoch_to_load))
training_state_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch_to_load))
return (model_path, training_state_path)
def _restore_checkpoint(self) -> Tuple[int, List[float]]:
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
latest_checkpoint = self.find_latest_checkpoint()
if latest_checkpoint is None:
# No checkpoint to restore, start at 0
return 0, []
model_path, training_state_path = latest_checkpoint
# Load the parameters onto CPU, then transfer to GPU.
# This avoids potential OOM on GPU for large models that
# load parameters onto GPU then make a new GPU copy into the parameter
# buffer. The GPU transfer happens implicitly in load_state_dict.
model_state = torch.load(model_path, map_location=util.device_mapping(-1))
training_state = torch.load(training_state_path, map_location=util.device_mapping(-1))
self._model.load_state_dict(model_state)
self._optimizer.load_state_dict(training_state["optimizer"])
if self._learning_rate_scheduler is not None and "learning_rate_scheduler" in training_state:
self._learning_rate_scheduler.lr_scheduler.load_state_dict(
training_state["learning_rate_scheduler"])
move_optimizer_to_cuda(self._optimizer)
# We didn't used to save `validation_metric_per_epoch`, so we can't assume
# that it's part of the trainer state. If it's not there, an empty list is all
# we can do.
if "val_metric_per_epoch" not in training_state:
logger.warning("trainer state `val_metric_per_epoch` not found, using empty list")
val_metric_per_epoch: List[float] = []
else:
val_metric_per_epoch = training_state["val_metric_per_epoch"]
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split('.')[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get('batch_num_total')
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return, val_metric_per_epoch
# Requires custom from_params.
@classmethod
def from_params(cls,
model: Model,
serialization_dir: str,
iterator: DataIterator,
iterator_aux: DataIterator,
iterator_aux2: DataIterator,
train_data: Iterable[Instance],
train_data_aux: Iterable[Instance],
train_data_aux2: Iterable[Instance],
mixing_ratio: float,
mixing_ratio2: float,
cutoff_epoch: int,
validation_data: Optional[Iterable[Instance]],
validation_data_aux: Optional[Iterable[Instance]],
validation_data_aux2: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None) -> 'MultiTaskTrainer2':
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
shuffle = params.pop_bool("shuffle", True)
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = params.pop_int("cuda_device", -1)
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
if cuda_device >= 0:
model = model.cuda(cuda_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if lr_scheduler_params:
scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
scheduler = None
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
should_log_parameter_statistics = params.pop_bool("should_log_parameter_statistics", True)
should_log_learning_rate = params.pop_bool("should_log_learning_rate", False)
params.assert_empty(cls.__name__)
return MultiTaskTrainer2(model, optimizer, iterator,
train_data,
train_data_aux,
train_data_aux2,
mixing_ratio,
mixing_ratio2,
cutoff_epoch,
validation_data,
validation_data_aux,
validation_data_aux2,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
shuffle=shuffle,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=scheduler,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate,
iterator_aux=iterator_aux,
iterator_aux2=iterator_aux2)
| 58,023 | 48.977606 | 119 | py |
scicite | scicite-master/scicite/training/train_multitask_two_tasks.py | """
The `train_multitask` subcommand that can be used to train the model in the multitask fashion
It requires a configuration file and a directory in
which to write the results.
.. code-block:: bash
$ allennlp train --help
usage: allennlp train [-h] -s SERIALIZATION_DIR [-r] [-o OVERRIDES]
[--file-friendly-logging]
[--include-package INCLUDE_PACKAGE]
param_path
Train the specified model on the specified dataset.
positional arguments:
param_path path to parameter file describing the model to be
trained
optional arguments:
-h, --help show this help message and exit
-s SERIALIZATION_DIR, --serialization-dir SERIALIZATION_DIR
directory in which to save the model and its logs
-r, --recover recover training from the state in serialization_dir
-o OVERRIDES, --overrides OVERRIDES
a JSON structure used to override the experiment
configuration
--include-package INCLUDE_PACKAGE
additional packages to include
--file-friendly-logging
outputs tqdm status on separate lines and slows tqdm
refresh rate
"""
import random
from typing import Dict, Iterable, Tuple
import argparse
import logging
import os
import re
import torch
from allennlp.commands.evaluate import evaluate
from allennlp.commands.subcommand import Subcommand
from allennlp.common.checks import ConfigurationError, check_for_gpu
from allennlp.common import Params
from allennlp.common.util import prepare_environment, prepare_global_logging, \
get_frozen_and_tunable_parameter_names, dump_metrics
from allennlp.data import Vocabulary
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.models.archival import archive_model, CONFIG_NAME
from allennlp.models.model import Model, _DEFAULT_WEIGHTS
from allennlp.training.trainer import Trainer
from scicite.training.multitask_trainer_two_tasks import MultiTaskTrainer2
from scicite.training.vocabulary_multitask import VocabularyMultitask
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class TrainMultiTask2(Subcommand):
""" Class for training the model with two scaffold tasks """
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Train the specified model on the specified dataset.'''
subparser = parser.add_parser(name, description=description, help='Train a model')
subparser.add_argument('param_path',
type=str,
help='path to parameter file describing the model to be trained')
subparser.add_argument('-s', '--serialization-dir',
required=True,
type=str,
help='directory in which to save the model and its logs')
subparser.add_argument('-r', '--recover',
action='store_true',
default=False,
help='recover training from the state in serialization_dir')
subparser.add_argument('-o', '--overrides',
type=str,
default="",
help='a JSON structure used to override the experiment configuration')
subparser.add_argument('--file-friendly-logging',
action='store_true',
default=False,
help='outputs tqdm status on separate lines and slows tqdm refresh rate')
subparser.set_defaults(func=train_model_from_args)
return subparser
def train_model_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to string paths.
"""
train_model_from_file(args.param_path,
args.serialization_dir,
args.overrides,
args.file_friendly_logging,
args.recover)
def train_model_from_file(parameter_filename: str,
serialization_dir: str,
overrides: str = "",
file_friendly_logging: bool = False,
recover: bool = False) -> Model:
"""
A wrapper around :func:`train_model` which loads the params from a file.
Parameters
----------
param_path : ``str``
A json parameter file specifying an AllenNLP experiment.
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`train_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`train_model`.
recover : ``bool`, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
"""
# Load the experiment config from a file and pass it to ``train_model``.
params = Params.from_file(parameter_filename, overrides)
return train_model(params, serialization_dir, file_friendly_logging, recover)
def datasets_from_params(params: Params) -> Tuple[Dict[str, Iterable[Instance]], Dict[str, Iterable[Instance]], Dict[str, Iterable[Instance]]]:
"""
Load all the datasets specified by the config.
This includes the main dataset and the two scaffold auxiliary datasets
"""
dataset_reader = DatasetReader.from_params(params.pop('dataset_reader'))
validation_dataset_reader_params = params.pop("validation_dataset_reader", None)
validation_and_test_dataset_reader: DatasetReader = dataset_reader
if validation_dataset_reader_params is not None:
logger.info("Using a separate dataset reader to load validation and test data.")
validation_and_test_dataset_reader = DatasetReader.from_params(validation_dataset_reader_params)
train_data_path = params.pop('train_data_path')
logger.info("Reading training data from %s", train_data_path)
train_data = dataset_reader.read(train_data_path)
datasets: Dict[str, Iterable[Instance]] = {"train": train_data}
# 2. Auxillary training data.
dataset_reader_aux = DatasetReader.from_params(params.pop('dataset_reader_aux'))
train_data_path_aux = params.pop('train_data_path_aux')
logger.info("Reading auxiliary training data from %s", train_data_path_aux)
train_data_aux = dataset_reader_aux.read(train_data_path_aux)
dataset_reader_aux2 = DatasetReader.from_params(params.pop('dataset_reader_aux2'))
train_data_path_aux2 = params.pop('train_data_path_aux2')
logger.info("Reading second auxiliary training data for from %s", train_data_path_aux2)
train_data_aux2 = dataset_reader_aux2.read(train_data_path_aux2)
# If only using a fraction of the auxiliary data.
aux_sample_fraction = params.pop("aux_sample_fraction", 1.0)
if aux_sample_fraction < 1.0:
sample_size = int(aux_sample_fraction * len(train_data_aux))
train_data_aux = random.sample(train_data_aux, sample_size)
train_data_aux2 = random.sample(train_data_aux2, sample_size)
# Balance the datasets by inflating the size of the smaller dataset to the size of the larger dataset.
train_size = len(train_data)
aux_train_size = len(train_data_aux)
aux2_train_size = len(train_data_aux2)
# Make second auxillary dataset the same size of the first auxiliary dataset
if aux2_train_size > aux_train_size:
train_data_aux2 = random.sample(train_data_aux2, aux_train_size)
else:
train_data_aux = random.sample(train_data_aux, aux2_train_size)
# inflate training size to be as large as auxiliary training data
if train_size > aux_train_size:
difference = train_size - aux_train_size
aux_sample = [random.choice(train_data_aux) for _ in range(difference)]
train_data_aux = train_data_aux + aux_sample
logger.info("Inflating auxiliary train data from {} to {} samples".format(
aux_train_size, len(train_data_aux)))
else:
difference = aux_train_size - train_size
train_sample = [random.choice(train_data) for _ in range(difference)]
train_data = train_data + train_sample
logger.info("Inflating train data from {} to {} samples".format(
train_size, len(train_data)))
datasets["train"] = train_data
datasets_aux = {"train_aux": train_data_aux}
datasets_aux2 = {"train_aux": train_data_aux2}
validation_data_path = params.pop('validation_data_path', None)
if validation_data_path is not None:
logger.info("Reading validation data from %s", validation_data_path)
validation_data = validation_and_test_dataset_reader.read(validation_data_path)
datasets["validation"] = validation_data
# Auxiliary validation data.
validation_data_path_aux = params.pop('validation_data_path_aux', None)
if validation_data_path_aux is not None:
logger.info(f"Reading auxilliary validation data from {validation_data_path_aux}")
validation_data_aux = dataset_reader_aux.read(validation_data_path_aux)
datasets_aux["validation_aux"] = validation_data_aux
else:
validation_data_aux = None
validation_data_path_aux2 = params.pop('validation_data_path_aux2', None)
if validation_data_path_aux2 is not None:
logger.info(f"Reading auxilliary validation data from {validation_data_path_aux2}")
validation_data_aux2 = dataset_reader_aux2.read(validation_data_path_aux2)
datasets_aux2["validation_aux"] = validation_data_aux2
else:
validation_data_aux2 = None
test_data_path = params.pop("test_data_path", None)
if test_data_path is not None:
logger.info("Reading test data from %s", test_data_path)
test_data = validation_and_test_dataset_reader.read(test_data_path)
datasets["test"] = test_data
# Auxillary test data
test_data_path_aux = params.pop("test_data_path_aux", None)
if test_data_path_aux is not None:
logger.info(f"Reading auxiliary test data from {test_data_path_aux}")
test_data_aux = dataset_reader_aux.read(test_data_path_aux)
datasets_aux["test_aux"] = test_data_aux
else:
test_data_aux = None
test_data_path_aux2 = params.pop("test_data_path_aux2", None)
if test_data_path_aux2 is not None:
logger.info(f"Reading auxillary test data from {test_data_path_aux2}")
test_data_aux2 = dataset_reader_aux2.read(test_data_path_aux2)
datasets_aux2["test_aux"] = test_data_aux2
else:
test_data_aux2 = None
return datasets, datasets_aux, datasets_aux2
def create_serialization_dir(params: Params, serialization_dir: str, recover: bool) -> None:
"""
This function creates the serialization directory if it doesn't exist. If it already exists
and is non-empty, then it verifies that we're recovering from a training with an identical configuration.
Parameters
----------
params: ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir: ``str``
The directory in which to save results and logs.
recover: ``bool``
If ``True``, we will try to recover from an existing serialization directory, and crash if
the directory doesn't exist, or doesn't match the configuration we're given.
"""
if os.path.exists(serialization_dir) and os.listdir(serialization_dir):
if not recover:
raise ConfigurationError(f"Serialization directory ({serialization_dir}) already exists and is "
f"not empty. Specify --recover to recover training from existing output.")
logger.info(f"Recovering from prior training at {serialization_dir}.")
recovered_config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(recovered_config_file):
raise ConfigurationError("The serialization directory already exists but doesn't "
"contain a config.json. You probably gave the wrong directory.")
else:
loaded_params = Params.from_file(recovered_config_file)
# Check whether any of the training configuration differs from the configuration we are
# resuming. If so, warn the user that training may fail.
fail = False
flat_params = params.as_flat_dict()
flat_loaded = loaded_params.as_flat_dict()
for key in flat_params.keys() - flat_loaded.keys():
logger.error(f"Key '{key}' found in training configuration but not in the serialization "
f"directory we're recovering from.")
fail = True
for key in flat_loaded.keys() - flat_params.keys():
logger.error(f"Key '{key}' found in the serialization directory we're recovering from "
f"but not in the training config.")
fail = True
for key in flat_params.keys():
if flat_params.get(key, None) != flat_loaded.get(key, None):
logger.error(f"Value for '{key}' in training configuration does not match that the value in "
f"the serialization directory we're recovering from: "
f"{flat_params[key]} != {flat_loaded[key]}")
fail = True
if fail:
raise ConfigurationError("Training configuration does not match the configuration we're "
"recovering from.")
else:
if recover:
raise ConfigurationError(f"--recover specified but serialization_dir ({serialization_dir}) "
"does not exist. There is nothing to recover from.")
os.makedirs(serialization_dir, exist_ok=True)
def train_model(params: Params,
serialization_dir: str,
file_friendly_logging: bool = False,
recover: bool = False) -> Model:
"""
Trains the model specified in the given :class:`Params` object, using the data and training
parameters also specified in that object, and saves the results in ``serialization_dir``.
Parameters
----------
params : ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir : ``str``
The directory in which to save results and logs.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
recover : ``bool``, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
Returns
-------
best_model: ``Model``
The model with the best epoch weights.
"""
prepare_environment(params)
create_serialization_dir(params, serialization_dir, recover)
prepare_global_logging(serialization_dir, file_friendly_logging)
check_for_gpu(params.get('trainer').get('cuda_device', -1))
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
all_datasets, all_datasets_aux, all_datasets_aux2 = datasets_from_params(params)
datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets))
datasets_for_vocab_creation_aux = set(params.pop("auxiliary_datasets_for_vocab_creation", all_datasets_aux))
datasets_for_vocab_creation_aux2 = set(params.pop("auxiliary_datasets_for_vocab_creation_2", all_datasets_aux2))
mixing_ratio = params.pop_float("mixing_ratio")
mixing_ratio2 = params.pop_float("mixing_ratio2")
cutoff_epoch = params.pop("cutoff_epoch", -1)
for dataset in datasets_for_vocab_creation:
if dataset not in all_datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}")
logger.info("From dataset instances, %s will be considered for vocabulary creation.",
", ".join(datasets_for_vocab_creation))
vocab_instances_aux = [
instance for key, dataset in all_datasets_aux.items()
for instance in dataset
if key in datasets_for_vocab_creation_aux
]
vocab_instances_aux.extend([
instance for key, dataset in all_datasets_aux2.items()
for instance in dataset
if key in datasets_for_vocab_creation_aux2
])
vocab = VocabularyMultitask.from_params(
params.pop("vocabulary", {}),
(instance for key, dataset in all_datasets.items()
for instance in dataset
if key in datasets_for_vocab_creation),
instances_aux=vocab_instances_aux
)
model = Model.from_params(vocab=vocab, params=params.pop('model'))
# Initializing the model can have side effect of expanding the vocabulary
vocab.save_to_files(os.path.join(serialization_dir, "vocabulary"))
iterator = DataIterator.from_params(params.pop("iterator"))
iterator.index_with(vocab)
iterator_aux = DataIterator.from_params(params.pop("iterator_aux"))
iterator_aux.index_with(vocab)
iterator_aux2 = DataIterator.from_params(params.pop("iterator_aux2"))
iterator_aux2.index_with(vocab)
validation_iterator_params = params.pop("validation_iterator", None)
if validation_iterator_params:
validation_iterator = DataIterator.from_params(validation_iterator_params)
validation_iterator.index_with(vocab)
else:
validation_iterator = None
# TODO: if validation in multi-task need to add validation iterator as above
train_data = all_datasets.get('train')
validation_data = all_datasets.get('validation')
test_data = all_datasets.get('test')
train_data_aux = all_datasets_aux.get('train_aux')
validation_data_aux = all_datasets_aux.get('validation_aux')
test_data_aux = all_datasets_aux.get('test_aux')
train_data_aux2 = all_datasets_aux2.get('train_aux')
validation_data_aux2 = all_datasets_aux2.get('validation_aux')
test_data_aux2 = all_datasets_aux2.get('test_aux')
trainer_params = params.pop("trainer")
no_grad_regexes = trainer_params.pop("no_grad", ())
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in no_grad_regexes):
parameter.requires_grad_(False)
frozen_parameter_names, tunable_parameter_names = \
get_frozen_and_tunable_parameter_names(model)
logger.info("Following parameters are Frozen (without gradient):")
for name in frozen_parameter_names:
logger.info(name)
logger.info("Following parameters are Tunable (with gradient):")
for name in tunable_parameter_names:
logger.info(name)
trainer = MultiTaskTrainer2.from_params(model=model,
serialization_dir=serialization_dir,
iterator=iterator,
iterator_aux=iterator_aux,
iterator_aux2=iterator_aux2,
train_data=train_data,
train_data_aux=train_data_aux,
train_data_aux2=train_data_aux2,
mixing_ratio=mixing_ratio,
mixing_ratio2=mixing_ratio2,
cutoff_epoch=cutoff_epoch,
validation_data_aux=validation_data_aux,
validation_data_aux2=validation_data_aux2,
validation_data=validation_data,
params=trainer_params,
validation_iterator=validation_iterator)
evaluate_on_test = params.pop_bool("evaluate_on_test", False)
evaluate_aux_on_test = params.pop_bool("evaluate_aux_on_test", False)
params.assert_empty('base train command')
try:
metrics = trainer.train()
except KeyboardInterrupt:
# if we have completed an epoch, try to create a model archive.
if os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info("Training interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights.")
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
raise
# Now tar up results
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
logger.info("Loading the best epoch weights.")
best_model_state_path = os.path.join(serialization_dir, 'best.th')
best_model_state = torch.load(best_model_state_path)
best_model = model
best_model.load_state_dict(best_model_state)
if test_data and evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = evaluate(
best_model, test_data, validation_iterator or iterator,
cuda_device=trainer._cuda_devices[0] # pylint: disable=protected-access
)
for key, value in test_metrics.items():
metrics["test_" + key] = value
elif test_data:
logger.info("To evaluate on the test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command.")
if test_data_aux and evaluate_aux_on_test:
# for instance in test_data_aux:
# instance.index_fields(vocab)
# for instance in test_data_aux2:
# instance.index_fields(vocab)
test_metrics_aux = evaluate(best_model, test_data_aux, iterator_aux,
cuda_device=trainer._cuda_devices[0]) # pylint: disable=protected-access
test_metrics_aux2 = evaluate(best_model, test_data_aux2, iterator_aux2,
cuda_device=trainer._cuda_devices[0]) # pylint: disable=protected-access
for key, value in test_metrics_aux.items():
metrics["test_aux_" + key] = value
for key, value in test_metrics_aux2.items():
metrics["test_aux2_" + key] = value
elif test_data_aux:
logger.info("To evaluate on the auxiliary test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command.")
dump_metrics(os.path.join(serialization_dir, "metrics.json"), metrics, log=True)
return best_model
| 23,845 | 46.692 | 143 | py |
scicite | scicite-master/scicite/training/multitask_trainer.py | """
This module is an extended trainer based on the allennlp's default trainer to handle multitask training
A :class:`~allennlp.training.trainer.Trainer` is responsible for training a
:class:`~allennlp.models.model.Model`.
Typically you might create a configuration file specifying the model and
training parameters and then use :mod:`~allennlp.commands.train`
rather than instantiating a ``Trainer`` yourself.
"""
# pylint: disable=too-many-lines
import logging
import os
import shutil
import time
import re
import datetime
import traceback
from typing import Dict, Optional, List, Tuple, Union, Iterable, Any, Set
import torch
import torch.optim.lr_scheduler
from torch.nn.parallel import replicate, parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from tensorboardX import SummaryWriter
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import peak_memory_mb, gpu_memory_mb, dump_metrics
from allennlp.common.tqdm import Tqdm
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.models.model import Model
from allennlp.nn import util
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def is_sparse(tensor):
return tensor.is_sparse
def sparse_clip_norm(parameters, max_norm, norm_type=2) -> float:
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Supports sparse gradients.
Parameters
----------
parameters : ``(Iterable[torch.Tensor])``
An iterable of Tensors that will have gradients normalized.
max_norm : ``float``
The max norm of the gradients.
norm_type : ``float``
The type of the used p-norm. Can be ``'inf'`` for infinity norm.
Returns
-------
Total norm of the parameters (viewed as a single vector).
"""
# pylint: disable=invalid-name,protected-access
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
if is_sparse(p.grad):
# need to coalesce the repeated indices before finding norm
grad = p.grad.data.coalesce()
param_norm = grad._values().norm(norm_type)
else:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
if is_sparse(p.grad):
p.grad.data._values().mul_(clip_coef)
else:
p.grad.data.mul_(clip_coef)
return total_norm
def move_optimizer_to_cuda(optimizer):
"""
Move the optimizer state to GPU, if necessary.
After calling, any parameter specific state in the optimizer
will be located on the same device as the parameter.
"""
for param_group in optimizer.param_groups:
for param in param_group['params']:
if param.is_cuda:
param_state = optimizer.state[param]
for k in param_state.keys():
if isinstance(param_state[k], torch.Tensor):
param_state[k] = param_state[k].cuda(device=param.get_device())
class TensorboardWriter:
"""
Wraps a pair of ``SummaryWriter`` instances but is a no-op if they're ``None``.
Allows Tensorboard logging without always checking for Nones first.
"""
def __init__(self, train_log: SummaryWriter = None, validation_log: SummaryWriter = None) -> None:
self._train_log = train_log
self._validation_log = validation_log
@staticmethod
def _item(value: Any):
if hasattr(value, 'item'):
val = value.item()
else:
val = value
return val
def add_train_scalar(self, name: str, value: float, global_step: int) -> None:
# get the scalar
if self._train_log is not None:
self._train_log.add_scalar(name, self._item(value), global_step)
def add_train_histogram(self, name: str, values: torch.Tensor, global_step: int) -> None:
if self._train_log is not None:
if isinstance(values, torch.Tensor):
values_to_write = values.cpu().data.numpy().flatten()
self._train_log.add_histogram(name, values_to_write, global_step)
def add_validation_scalar(self, name: str, value: float, global_step: int) -> None:
if self._validation_log is not None:
self._validation_log.add_scalar(name, self._item(value), global_step)
def time_to_str(timestamp: int) -> str:
"""
Convert seconds past Epoch to human readable string.
"""
datetimestamp = datetime.datetime.fromtimestamp(timestamp)
return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format(
datetimestamp.year, datetimestamp.month, datetimestamp.day,
datetimestamp.hour, datetimestamp.minute, datetimestamp.second
)
def str_to_time(time_str: str) -> datetime.datetime:
"""
Convert human readable string to datetime.datetime.
"""
pieces: Any = [int(piece) for piece in time_str.split('-')]
return datetime.datetime(*pieces)
class MultiTaskTrainer:
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
train_dataset_aux: Iterable[Instance],
mixing_ratio: float = 0.17,
cutoff_epoch: int = -1,
validation_dataset: Optional[Iterable[Instance]] = None,
validation_dataset_aux: Optional[Iterable] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
iterator_aux: Optional[DataIterator] = None) -> None:
"""
Parameters
----------
model : ``Model``, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their ``forward`` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
optimizer : ``torch.nn.Optimizer``, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
iterator : ``DataIterator``, required.
A method for iterating over a ``Dataset``, yielding padded indexed batches.
train_dataset : ``Dataset``, required.
A ``Dataset`` to train on. The dataset should have already been indexed.
train_dataset_aux : ``Dataset``, required.
A ``Dataset`` for auxilliary task. The dataset should have already been indexed.
mixing_ratio: ratio for mixing the losses for the main and auxilliary task
cutoff_epoch: start multitask training after the cutoff_epoch
validation_dataset : ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on. The dataset should have already been indexed.
validation_dataset_aux: ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on for the auxilliary task
patience : Optional[int] > 0, optional (default=None)
Number of epochs to be patient before early stopping: the training is stopped
after ``patience`` epochs with no improvement. If given, it must be ``> 0``.
If None, early stopping is disabled.
validation_metric : str, optional (default="loss")
Validation metric to measure for whether to stop training using patience
and whether to serialize an ``is_best`` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_iterator : ``DataIterator``, optional (default=None)
An iterator to use for the validation set. If ``None``, then
use the training `iterator`.
shuffle: ``bool``, optional (default=True)
Whether to shuffle the instances in the iterator or not.
num_epochs : int, optional (default = 20)
Number of training epochs.
serialization_dir : str, optional (default=None)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
num_serialized_models_to_keep : ``int``, optional (default=20)
Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.
A value of None or -1 means all checkpoints will be kept.
keep_serialized_model_every_num_seconds : ``int``, optional (default=None)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
model_save_interval : ``float``, optional (default=None)
If provided, then serialize models every ``model_save_interval``
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if ``serialization_dir`` is provided.
cuda_device : ``int``, optional (default = -1)
An integer specifying the CUDA device to use. If -1, the CPU is used.
grad_norm : ``float``, optional, (default = None).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : ``float``, optional (default = ``None``).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting ``NaNs`` in your gradients during training
that are not solved by using ``grad_norm``, you may need this.
learning_rate_scheduler : ``PytorchLRScheduler``, optional, (default = None)
A Pytorch learning rate scheduler. The learning rate will be decayed with respect to
this schedule at the end of each epoch. If you use
:class:`torch.optim.lr_scheduler.ReduceLROnPlateau`, this will use the ``validation_metric``
provided to determine if learning has plateaued. To support updating the learning
rate on every batch, this can optionally implement ``step_batch(batch_num_total)`` which
updates the learning rate given the batch number.
summary_interval: ``int``, optional, (default = 100)
Number of batches between logging scalars to tensorboard
histogram_interval : ``int``, optional, (default = ``None``)
If not None, then log histograms to tensorboard every ``histogram_interval`` batches.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
``model.get_parameters_for_histogram_tensorboard_logging``.
The layer activations are logged for any modules in the ``Model`` that have
the attribute ``should_log_activations`` set to ``True``. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
should_log_parameter_statistics : ``bool``, optional, (default = True)
Whether to send parameter statistics (mean and standard deviation
of parameters and gradients) to tensorboard.
should_log_learning_rate : ``bool``, optional, (default = False)
Whether to send parameter specific learning rate to tensorboard.
iterator : ``DataIterator``, required.
A method for iterating over the auxilliary ``Dataset``, yielding padded indexed batches.
"""
self._model = model
self._iterator = iterator
self._validation_iterator = validation_iterator
self._shuffle = shuffle
self._optimizer = optimizer
self._train_data = train_dataset
self._validation_data = validation_dataset
self._train_dataset_aux = train_dataset_aux
self._validation_data_aux = validation_dataset_aux
self._cutoff_epoch = cutoff_epoch
self._mixing_ratio = mixing_ratio
self._iterator_aux = iterator_aux
if patience is None: # no early stopping
if validation_dataset:
logger.warning('You provided a validation dataset but patience was set to None, '
'meaning that early stopping is disabled')
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError('{} is an invalid value for "patience": it must be a positive integer '
'or None (if you want to disable early stopping)'.format(patience))
self._patience = patience
self._num_epochs = num_epochs
self._serialization_dir = serialization_dir
self._num_serialized_models_to_keep = num_serialized_models_to_keep
self._keep_serialized_model_every_num_seconds = keep_serialized_model_every_num_seconds
self._serialized_paths: List[Any] = []
self._last_permanent_saved_checkpoint_time = time.time()
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
increase_or_decrease = validation_metric[0]
if increase_or_decrease not in ["+", "-"]:
raise ConfigurationError("Validation metrics must specify whether they should increase "
"or decrease by pre-pending the metric name with a +/-.")
self._validation_metric = validation_metric[1:]
self._validation_metric_decreases = increase_or_decrease == "-"
if not isinstance(cuda_device, int) and not isinstance(cuda_device, list):
raise ConfigurationError("Expected an int or list for cuda_device, got {}".format(cuda_device))
if isinstance(cuda_device, list):
logger.warning(f"Multiple GPU support is experimental not recommended for use. "
"In some cases it may lead to incorrect results or undefined behavior.")
self._multiple_gpu = True
self._cuda_devices = cuda_device
else:
self._multiple_gpu = False
self._cuda_devices = [cuda_device]
if self._cuda_devices[0] != -1:
self._model = self._model.cuda(self._cuda_devices[0])
self._cuda_device = self._cuda_devices[0]
self._log_interval = 10 # seconds
self._summary_interval = summary_interval
self._histogram_interval = histogram_interval
self._log_histograms_this_batch = False
self._should_log_parameter_statistics = should_log_parameter_statistics
self._should_log_learning_rate = should_log_learning_rate
# We keep the total batch number as a class variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._last_log = 0.0 # time of last logging
if serialization_dir is not None:
train_log = SummaryWriter(os.path.join(serialization_dir, "log", "train"))
validation_log = SummaryWriter(os.path.join(serialization_dir, "log", "validation"))
self._tensorboard = TensorboardWriter(train_log, validation_log)
else:
self._tensorboard = TensorboardWriter()
self._warned_tqdm_ignores_underscores = False
def _enable_gradient_clipping(self) -> None:
if self._grad_clipping is not None:
# Pylint is unable to tell that we're in the case that _grad_clipping is not None...
# pylint: disable=invalid-unary-operand-type
clip_function = lambda grad: grad.clamp(-self._grad_clipping, self._grad_clipping)
for parameter in self._model.parameters():
if parameter.requires_grad:
parameter.register_hook(clip_function)
def _enable_activation_logging(self) -> None:
"""
Log activations to tensorboard
"""
if self._histogram_interval is not None:
# To log activation histograms to the forward pass, we register
# a hook on forward to capture the output tensors.
# This uses a closure on self._log_histograms_this_batch to
# determine whether to send the activations to tensorboard,
# since we don't want them on every call.
for _, module in self._model.named_modules():
if not getattr(module, 'should_log_activations', False):
# skip it
continue
def hook(module_, inputs, outputs):
# pylint: disable=unused-argument,cell-var-from-loop
log_prefix = 'activation_histogram/{0}'.format(module_.__class__)
if self._log_histograms_this_batch:
if isinstance(outputs, torch.Tensor):
log_name = log_prefix
self._tensorboard.add_train_histogram(log_name,
outputs,
self._batch_num_total)
elif isinstance(outputs, (list, tuple)):
for i, output in enumerate(outputs):
log_name = "{0}_{1}".format(log_prefix, i)
self._tensorboard.add_train_histogram(log_name,
output,
self._batch_num_total)
elif isinstance(outputs, dict):
for k, tensor in outputs.items():
log_name = "{0}_{1}".format(log_prefix, k)
self._tensorboard.add_train_histogram(log_name,
tensor,
self._batch_num_total)
else:
# skip it
pass
module.register_forward_hook(hook)
def _rescale_gradients(self) -> Optional[float]:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
"""
if self._grad_norm:
parameters_to_clip = [p for p in self._model.parameters()
if p.grad is not None]
return sparse_clip_norm(parameters_to_clip, self._grad_norm)
return None
def _data_parallel(self, batch):
"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()}
def _batch_loss(self, batch: torch.Tensor, for_training: bool, batch_aux: torch.Tensor=None) -> torch.Tensor:
"""
Does a forward pass on the given batch and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = self._data_parallel(batch)
if batch_aux is not None:
raise ConfigurationError('multi-gpu not supported for multi-task training.')
else:
batch = util.move_to_device(batch, self._cuda_devices[0])
output_dict = self._model(**batch)
try:
loss = output_dict["loss"]
if for_training:
loss += self._model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError("The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
loss = None
if batch_aux is not None:
batch_aux = util.move_to_device(batch_aux, self._cuda_devices[0])
output_dict_aux = self._model(**batch_aux)
try:
loss_aux = output_dict_aux["loss"]
if for_training:
loss_aux += self._model.get_regularization_penalty()
except KeyError:
raise ConfigurationError("The auxilliary model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
# multi-task loss
loss = loss + self._mixing_ratio * loss_aux
return loss
def _get_metrics(self, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]:
"""
Gets the metrics but sets ``"loss"`` to
the total loss divided by the ``num_batches`` so that
the ``"loss"`` metric is "average loss per batch".
"""
metrics = self._model.get_metrics(reset=reset)
metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0
return metrics
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
logger.info(f"Peak CPU memory usage MB: {peak_memory_mb()}")
for gpu, memory in gpu_memory_mb().items():
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self._model.train()
# Get tqdm for the training batches
train_generator = self._iterator(self._train_data,
num_epochs=1,
shuffle=self._shuffle)
train_generator_aux = self._iterator_aux(self._train_dataset_aux,
num_epochs=1,
shuffle=self._shuffle)
multitask_training = False
if epoch > self._cutoff_epoch:
multitask_training = True
logger.info("Multitask Training")
else:
logger.info("Training")
num_training_batches = self._iterator.get_num_batches(self._train_data)
num_training_batches_aux = self._iterator_aux.get_num_batches(self._train_dataset_aux)
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
if self._histogram_interval is not None:
histogram_parameters = set(self._model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
train_generator_tqdm = Tqdm.tqdm(train_generator,
total=num_training_batches)
# train_aux_generator_tqdm = Tqdm.tqdm(train_generator_aux,
# total=num_training_batches_aux)
for batch, batch_aux in zip(train_generator_tqdm, train_generator_aux):
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self._log_histograms_this_batch = self._histogram_interval is not None and (
batch_num_total % self._histogram_interval == 0)
self._optimizer.zero_grad()
if multitask_training:
loss = self._batch_loss(batch,
for_training=True,
batch_aux=batch_aux)
else:
loss = self._batch_loss(batch, for_training=True)
loss.backward()
train_loss += loss.item()
batch_grad_norm = self._rescale_gradients()
# This does nothing if batch_num_total is None or you are using an
# LRScheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._log_histograms_this_batch:
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {name: param.detach().cpu().clone()
for name, param in self._model.named_parameters()}
self._optimizer.step()
for name, param in self._model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1, ))
param_norm = torch.norm(param.view(-1, )).cpu()
self._tensorboard.add_train_scalar("gradient_update/" + name,
update_norm / (param_norm + 1e-7),
batch_num_total)
else:
self._optimizer.step()
# Update the description with the latest metrics
metrics = self._get_metrics(train_loss, batches_this_epoch)
description = self._description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if batch_num_total % self._summary_interval == 0:
if self._should_log_parameter_statistics:
self._parameter_and_gradient_statistics_to_tensorboard(batch_num_total, batch_grad_norm)
if self._should_log_learning_rate:
self._learning_rates_to_tensorboard(batch_num_total)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"], batch_num_total)
self._metrics_to_tensorboard(batch_num_total,
{"epoch_metrics/" + k: v for k, v in metrics.items()})
if self._log_histograms_this_batch:
self._histograms_to_tensorboard(batch_num_total, histogram_parameters)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
'{0}.{1}'.format(epoch, time_to_str(int(last_save_time))), [], is_best=False
)
return self._get_metrics(train_loss, batches_this_epoch, reset=True)
def _should_stop_early(self, metric_history: List[float]) -> bool:
"""
uses patience and the validation metric to determine if training should stop early
"""
if self._patience and self._patience < len(metric_history):
# Pylint can't figure out that in this branch `self._patience` is an int.
# pylint: disable=invalid-unary-operand-type
# Is the best score in the past N epochs worse than or equal the best score overall?
if self._validation_metric_decreases:
return min(metric_history[-self._patience:]) >= min(metric_history[:-self._patience])
else:
return max(metric_history[-self._patience:]) <= max(metric_history[:-self._patience])
return False
def _parameter_and_gradient_statistics_to_tensorboard(self, # pylint: disable=invalid-name
epoch: int,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
# Log parameter values to Tensorboard
for name, param in self._model.named_parameters():
self._tensorboard.add_train_scalar("parameter_mean/" + name,
param.data.mean(),
epoch)
self._tensorboard.add_train_scalar("parameter_std/" + name, param.data.std(), epoch)
if param.grad is not None:
if is_sparse(param.grad):
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self._tensorboard.add_train_scalar("gradient_mean/" + name,
grad_data.mean(),
epoch)
self._tensorboard.add_train_scalar("gradient_std/" + name,
grad_data.std(),
epoch)
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self._tensorboard.add_train_scalar("gradient_norm",
batch_grad_norm,
epoch)
def _learning_rates_to_tensorboard(self, batch_num_total: int):
"""
Send current parameter specific learning rates to tensorboard
"""
# optimizer stores lr info keyed by parameter tensor
# we want to log with parameter name
names = {param: name for name, param in self._model.named_parameters()}
for group in self._optimizer.param_groups:
if 'lr' not in group:
continue
rate = group['lr']
for param in group['params']:
# check whether params has requires grad or not
effective_rate = rate * float(param.requires_grad)
self._tensorboard.add_train_scalar(
"learning_rate/" + names[param],
effective_rate,
batch_num_total
)
def _histograms_to_tensorboard(self, epoch: int, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in self._model.named_parameters():
if name in histogram_parameters:
self._tensorboard.add_train_histogram("parameter_histogram/" + name,
param,
epoch)
def _metrics_to_tensorboard(self,
epoch: int,
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
"""
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
for name in metric_names:
train_metric = train_metrics.get(name)
if train_metric is not None:
self._tensorboard.add_train_scalar(name, train_metric, epoch)
val_metric = val_metrics.get(name)
if val_metric is not None:
self._tensorboard.add_validation_scalar(name, val_metric, epoch)
def _metrics_to_console(self, # pylint: disable=no-self-use
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Logs all of the train metrics (and validation metrics, if provided) to the console.
"""
val_metrics = val_metrics or {}
dual_message_template = "%s | %8.3f | %8.3f"
no_val_message_template = "%s | %8.3f | %8s"
no_train_message_template = "%s | %8s | %8.3f"
header_template = "%s | %-10s"
metric_names = set(train_metrics.keys())
if val_metrics:
metric_names.update(val_metrics.keys())
name_length = max([len(x) for x in metric_names])
logger.info(header_template, "Training".rjust(name_length + 13), "Validation")
for name in metric_names:
train_metric = train_metrics.get(name)
val_metric = val_metrics.get(name)
if val_metric is not None and train_metric is not None:
logger.info(dual_message_template, name.ljust(name_length), train_metric, val_metric)
elif val_metric is not None:
logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric)
elif train_metric is not None:
logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A")
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._model.eval()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self._iterator
val_generator = val_iterator(self._validation_data,
num_epochs=1,
shuffle=False)
num_validation_batches = val_iterator.get_num_batches(self._validation_data)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch in val_generator_tqdm:
loss = self._batch_loss(batch, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = self._get_metrics(val_loss, batches_this_epoch)
description = self._description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
return val_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
epoch_counter, validation_metric_per_epoch = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError("Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?")
self._enable_gradient_clipping()
self._enable_activation_logging()
logger.info("Beginning training.")
train_metrics: Dict[str, float] = {}
val_metrics: Dict[str, float] = {}
metrics: Dict[str, Any] = {}
epochs_trained = 0
training_start_time = time.time()
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
if self._validation_data is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, num_batches = self._validation_loss()
val_metrics = self._get_metrics(val_loss, num_batches, reset=True)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
# Check validation metric to see if it's the best so far
is_best_so_far = self._is_best_so_far(this_epoch_val_metric, validation_metric_per_epoch)
validation_metric_per_epoch.append(this_epoch_val_metric)
if self._should_stop_early(validation_metric_per_epoch):
logger.info("Ran out of patience. Stopping training.")
break
else:
# No validation set, so just assume it's the best so far.
is_best_so_far = True
val_metrics = {}
this_epoch_val_metric = None
self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)
self._metrics_to_console(train_metrics, val_metrics)
# Create overall metrics dict
training_elapsed_time = time.time() - training_start_time
metrics["training_duration"] = time.strftime("%H:%M:%S", time.gmtime(training_elapsed_time))
metrics["training_start_epoch"] = epoch_counter
metrics["training_epochs"] = epochs_trained
metrics["epoch"] = epoch
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if is_best_so_far:
# Update all the best_ metrics.
# (Otherwise they just stay the same as they were.)
metrics['best_epoch'] = epoch
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
if self._serialization_dir:
dump_metrics(os.path.join(self._serialization_dir, f'metrics_epoch_{epoch}.json'), metrics)
if self._learning_rate_scheduler:
# The LRScheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", time.strftime("%H:%M:%S", time.gmtime(epoch_elapsed_time)))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
return metrics
def _is_best_so_far(self,
this_epoch_val_metric: float,
validation_metric_per_epoch: List[float]):
if not validation_metric_per_epoch:
return True
elif self._validation_metric_decreases:
return this_epoch_val_metric < min(validation_metric_per_epoch)
else:
return this_epoch_val_metric > max(validation_metric_per_epoch)
def _description_from_metrics(self, metrics: Dict[str, float]) -> str:
if (not self._warned_tqdm_ignores_underscores and
any(metric_name.startswith("_") for metric_name in metrics)):
logger.warning("Metrics with names beginning with \"_\" will "
"not be logged to the tqdm progress bar.")
self._warned_tqdm_ignores_underscores = True
return ', '.join(["%s: %.4f" % (name, value) for name, value in
metrics.items() if not name.startswith("_")]) + " ||"
def _save_checkpoint(self,
epoch: Union[int, str],
val_metric_per_epoch: List[float],
is_best: Optional[bool] = None) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
is_best: bool, optional (default = None)
A flag which causes the model weights at the given epoch to
be copied to a "best.th" file. The value of this flag should
be based on some validation metric computed by your model.
"""
if self._serialization_dir is not None:
model_path = os.path.join(self._serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = self._model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': self._optimizer.state_dict(),
'batch_num_total': self._batch_num_total}
if self._learning_rate_scheduler is not None:
training_state["learning_rate_scheduler"] = \
self._learning_rate_scheduler.lr_scheduler.state_dict()
training_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
logger.info("Best validation performance so far. "
"Copying weights to '%s/best.th'.", self._serialization_dir)
shutil.copyfile(model_path, os.path.join(self._serialization_dir, "best.th"))
if self._num_serialized_models_to_keep and self._num_serialized_models_to_keep >= 0:
self._serialized_paths.append([time.time(), model_path, training_path])
if len(self._serialized_paths) > self._num_serialized_models_to_keep:
paths_to_remove = self._serialized_paths.pop(0)
# Check to see if we should keep this checkpoint, if it has been longer
# then self._keep_serialized_model_every_num_seconds since the last
# kept checkpoint.
remove_path = True
if self._keep_serialized_model_every_num_seconds is not None:
save_time = paths_to_remove[0]
time_since_checkpoint_kept = save_time - self._last_permanent_saved_checkpoint_time
if time_since_checkpoint_kept > self._keep_serialized_model_every_num_seconds:
# We want to keep this checkpoint.
remove_path = False
self._last_permanent_saved_checkpoint_time = save_time
if remove_path:
for fname in paths_to_remove[1:]:
os.remove(fname)
def find_latest_checkpoint(self) -> Tuple[str, str]:
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (self._serialization_dir is not None and
any("model_state_epoch_" in x for x in os.listdir(self._serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(self._serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
# pylint: disable=anomalous-backslash-in-string
re.search("model_state_epoch_([0-9\.\-]+)\.th", x).group(1)
for x in model_checkpoints
]
int_epochs: Any = []
for epoch in found_epochs:
pieces = epoch.split('.')
if len(pieces) == 1:
# Just a single epoch without timestamp
int_epochs.append([int(pieces[0]), 0])
else:
# has a timestamp
int_epochs.append([int(pieces[0]), pieces[1]])
last_epoch = sorted(int_epochs, reverse=True)[0]
if last_epoch[1] == 0:
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])
model_path = os.path.join(self._serialization_dir,
"model_state_epoch_{}.th".format(epoch_to_load))
training_state_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch_to_load))
return (model_path, training_state_path)
def _restore_checkpoint(self) -> Tuple[int, List[float]]:
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
latest_checkpoint = self.find_latest_checkpoint()
if latest_checkpoint is None:
# No checkpoint to restore, start at 0
return 0, []
model_path, training_state_path = latest_checkpoint
# Load the parameters onto CPU, then transfer to GPU.
# This avoids potential OOM on GPU for large models that
# load parameters onto GPU then make a new GPU copy into the parameter
# buffer. The GPU transfer happens implicitly in load_state_dict.
model_state = torch.load(model_path, map_location=util.device_mapping(-1))
training_state = torch.load(training_state_path, map_location=util.device_mapping(-1))
self._model.load_state_dict(model_state)
self._optimizer.load_state_dict(training_state["optimizer"])
if self._learning_rate_scheduler is not None and "learning_rate_scheduler" in training_state:
self._learning_rate_scheduler.lr_scheduler.load_state_dict(
training_state["learning_rate_scheduler"])
move_optimizer_to_cuda(self._optimizer)
# We didn't used to save `validation_metric_per_epoch`, so we can't assume
# that it's part of the trainer state. If it's not there, an empty list is all
# we can do.
if "val_metric_per_epoch" not in training_state:
logger.warning("trainer state `val_metric_per_epoch` not found, using empty list")
val_metric_per_epoch: List[float] = []
else:
val_metric_per_epoch = training_state["val_metric_per_epoch"]
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split('.')[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get('batch_num_total')
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return, val_metric_per_epoch
# Requires custom from_params.
@classmethod
def from_params(cls,
model: Model,
serialization_dir: str,
iterator: DataIterator,
iterator_aux: DataIterator,
train_data: Iterable[Instance],
train_data_aux: Iterable[Instance],
mixing_ratio: float,
cutoff_epoch: int,
validation_data: Optional[Iterable[Instance]],
validation_data_aux: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None) -> 'MultiTaskTrainer':
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
shuffle = params.pop_bool("shuffle", True)
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = params.pop_int("cuda_device", -1)
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
if cuda_device >= 0:
model = model.cuda(cuda_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if lr_scheduler_params:
scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
scheduler = None
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
should_log_parameter_statistics = params.pop_bool("should_log_parameter_statistics", True)
should_log_learning_rate = params.pop_bool("should_log_learning_rate", False)
params.assert_empty(cls.__name__)
return MultiTaskTrainer(model, optimizer, iterator,
train_data,
train_data_aux,
mixing_ratio,
cutoff_epoch,
validation_data,
validation_data_aux,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
shuffle=shuffle,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=scheduler,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate,
iterator_aux=iterator_aux)
| 55,632 | 48.407638 | 114 | py |
scicite | scicite-master/scicite/training/train_multitask.py | """
The ``train`` subcommand can be used to train a model.
It requires a configuration file and a directory in
which to write the results.
.. code-block:: bash
$ allennlp train --help
usage: allennlp train [-h] -s SERIALIZATION_DIR [-r] [-o OVERRIDES]
[--file-friendly-logging]
[--include-package INCLUDE_PACKAGE]
param_path
Train the specified model on the specified dataset.
positional arguments:
param_path path to parameter file describing the model to be
trained
optional arguments:
-h, --help show this help message and exit
-s SERIALIZATION_DIR, --serialization-dir SERIALIZATION_DIR
directory in which to save the model and its logs
-r, --recover recover training from the state in serialization_dir
-o OVERRIDES, --overrides OVERRIDES
a JSON structure used to override the experiment
configuration
--include-package INCLUDE_PACKAGE
additional packages to include
--file-friendly-logging
outputs tqdm status on separate lines and slows tqdm
refresh rate
"""
import random
from typing import Dict, Iterable, Tuple
import argparse
import logging
import os
import re
import torch
from allennlp.commands.evaluate import evaluate
from allennlp.commands.subcommand import Subcommand
from allennlp.common.checks import ConfigurationError, check_for_gpu
from allennlp.common import Params
from allennlp.common.util import prepare_environment, prepare_global_logging, \
get_frozen_and_tunable_parameter_names, dump_metrics
from allennlp.data import Vocabulary
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.models.archival import archive_model, CONFIG_NAME
from allennlp.models.model import Model, _DEFAULT_WEIGHTS
from allennlp.training.trainer import Trainer
from scicite.training.multitask_trainer import MultiTaskTrainer
from scicite.training.vocabulary_multitask import VocabularyMultitask
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class TrainMultiTask(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Train the specified model on the specified dataset.'''
subparser = parser.add_parser(name, description=description, help='Train a model')
subparser.add_argument('param_path',
type=str,
help='path to parameter file describing the model to be trained')
subparser.add_argument('-s', '--serialization-dir',
required=True,
type=str,
help='directory in which to save the model and its logs')
subparser.add_argument('-r', '--recover',
action='store_true',
default=False,
help='recover training from the state in serialization_dir')
subparser.add_argument('-o', '--overrides',
type=str,
default="",
help='a JSON structure used to override the experiment configuration')
subparser.add_argument('--file-friendly-logging',
action='store_true',
default=False,
help='outputs tqdm status on separate lines and slows tqdm refresh rate')
subparser.set_defaults(func=train_model_from_args)
return subparser
def train_model_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to string paths.
"""
train_model_from_file(args.param_path,
args.serialization_dir,
args.overrides,
args.file_friendly_logging,
args.recover)
def train_model_from_file(parameter_filename: str,
serialization_dir: str,
overrides: str = "",
file_friendly_logging: bool = False,
recover: bool = False) -> Model:
"""
A wrapper around :func:`train_model` which loads the params from a file.
Parameters
----------
param_path : ``str``
A json parameter file specifying an AllenNLP experiment.
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`train_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`train_model`.
recover : ``bool`, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
"""
# Load the experiment config from a file and pass it to ``train_model``.
params = Params.from_file(parameter_filename, overrides)
return train_model(params, serialization_dir, file_friendly_logging, recover)
def datasets_from_params(params: Params) -> Tuple[Dict[str, Iterable[Instance]], Dict[str, Iterable[Instance]]]:
"""
Load all the datasets specified by the config.
"""
dataset_reader = DatasetReader.from_params(params.pop('dataset_reader'))
validation_dataset_reader_params = params.pop("validation_dataset_reader", None)
validation_and_test_dataset_reader: DatasetReader = dataset_reader
if validation_dataset_reader_params is not None:
logger.info("Using a separate dataset reader to load validation and test data.")
validation_and_test_dataset_reader = DatasetReader.from_params(validation_dataset_reader_params)
train_data_path = params.pop('train_data_path')
logger.info("Reading training data from %s", train_data_path)
train_data = dataset_reader.read(train_data_path)
datasets: Dict[str, Iterable[Instance]] = {"train": train_data}
# 2. Auxillary training data.
dataset_reader_aux = DatasetReader.from_params(params.pop('dataset_reader_aux'))
train_data_path_aux = params.pop('train_data_path_aux')
logger.info("Reading auxilliary training data from %s", train_data_path_aux)
train_data_aux = dataset_reader_aux.read(train_data_path_aux)
# If only using a fraction of the auxiliary data.
aux_sample_fraction = params.pop("aux_sample_fraction", 1.0)
if aux_sample_fraction < 1.0:
sample_size = int(aux_sample_fraction * len(train_data_aux))
train_data_aux = random.sample(train_data_aux, sample_size)
# Balance the two datasets by inflating the size of the smaller dataset to the size of the larger dataset.
train_size = len(train_data)
aux_train_size = len(train_data_aux)
# inflate training size to be as large as auxilary training data
if train_size > aux_train_size:
difference = train_size - aux_train_size
aux_sample = [random.choice(train_data_aux) for _ in range(difference)]
train_data_aux = train_data_aux + aux_sample
logger.info("Inflating auxiliary train data from {} to {} samples".format(
aux_train_size, len(train_data_aux)))
else:
difference = aux_train_size - train_size
train_sample = [random.choice(train_data) for _ in range(difference)]
train_data = train_data + train_sample
logger.info("Inflating train data from {} to {} samples".format(
train_size, len(train_data)))
datasets["train"] = train_data
datasets_aux = {"train_aux": train_data_aux}
validation_data_path = params.pop('validation_data_path', None)
if validation_data_path is not None:
logger.info("Reading validation data from %s", validation_data_path)
validation_data = validation_and_test_dataset_reader.read(validation_data_path)
datasets["validation"] = validation_data
# Auxillary validation data.
validation_data_path_aux = params.pop('validation_data_path_aux', None)
if validation_data_path_aux is not None:
logger.info(f"Reading auxilliary validation data from {validation_data_path_aux}")
validation_data_aux = dataset_reader_aux.read(validation_data_path_aux)
datasets_aux["validation_aux"] = validation_data_aux
else:
validation_data_aux = None
test_data_path = params.pop("test_data_path", None)
if test_data_path is not None:
logger.info("Reading test data from %s", test_data_path)
test_data = validation_and_test_dataset_reader.read(test_data_path)
datasets["test"] = test_data
# Auxillary test data
test_data_path_aux = params.pop("test_data_path_aux", None)
if test_data_path_aux is not None:
logger.info(f"Reading auxillary test data from {test_data_path_aux}")
test_data_aux = dataset_reader_aux.read(test_data_path_aux)
datasets_aux["test_aux"] = test_data_aux
else:
test_data_aux = None
return datasets, datasets_aux
def create_serialization_dir(params: Params, serialization_dir: str, recover: bool) -> None:
"""
This function creates the serialization directory if it doesn't exist. If it already exists
and is non-empty, then it verifies that we're recovering from a training with an identical configuration.
Parameters
----------
params: ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir: ``str``
The directory in which to save results and logs.
recover: ``bool``
If ``True``, we will try to recover from an existing serialization directory, and crash if
the directory doesn't exist, or doesn't match the configuration we're given.
"""
if os.path.exists(serialization_dir) and os.listdir(serialization_dir):
if not recover:
raise ConfigurationError(f"Serialization directory ({serialization_dir}) already exists and is "
f"not empty. Specify --recover to recover training from existing output.")
logger.info(f"Recovering from prior training at {serialization_dir}.")
recovered_config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(recovered_config_file):
raise ConfigurationError("The serialization directory already exists but doesn't "
"contain a config.json. You probably gave the wrong directory.")
else:
loaded_params = Params.from_file(recovered_config_file)
# Check whether any of the training configuration differs from the configuration we are
# resuming. If so, warn the user that training may fail.
fail = False
flat_params = params.as_flat_dict()
flat_loaded = loaded_params.as_flat_dict()
for key in flat_params.keys() - flat_loaded.keys():
logger.error(f"Key '{key}' found in training configuration but not in the serialization "
f"directory we're recovering from.")
fail = True
for key in flat_loaded.keys() - flat_params.keys():
logger.error(f"Key '{key}' found in the serialization directory we're recovering from "
f"but not in the training config.")
fail = True
for key in flat_params.keys():
if flat_params.get(key, None) != flat_loaded.get(key, None):
logger.error(f"Value for '{key}' in training configuration does not match that the value in "
f"the serialization directory we're recovering from: "
f"{flat_params[key]} != {flat_loaded[key]}")
fail = True
if fail:
raise ConfigurationError("Training configuration does not match the configuration we're "
"recovering from.")
else:
if recover:
raise ConfigurationError(f"--recover specified but serialization_dir ({serialization_dir}) "
"does not exist. There is nothing to recover from.")
os.makedirs(serialization_dir, exist_ok=True)
def train_model(params: Params,
serialization_dir: str,
file_friendly_logging: bool = False,
recover: bool = False) -> Model:
"""
Trains the model specified in the given :class:`Params` object, using the data and training
parameters also specified in that object, and saves the results in ``serialization_dir``.
Parameters
----------
params : ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir : ``str``
The directory in which to save results and logs.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
recover : ``bool``, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
Returns
-------
best_model: ``Model``
The model with the best epoch weights.
"""
prepare_environment(params)
create_serialization_dir(params, serialization_dir, recover)
prepare_global_logging(serialization_dir, file_friendly_logging)
check_for_gpu(params.get('trainer').get('cuda_device', -1))
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
all_datasets, all_datasets_aux = datasets_from_params(params)
datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets))
datasets_for_vocab_creation_aux = set(params.pop("auxillary_datasets_for_vocab_creation", all_datasets_aux))
mixing_ratio = params.pop("mixing_ratio")
cutoff_epoch = params.pop("cutoff_epoch", -1)
for dataset in datasets_for_vocab_creation:
if dataset not in all_datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}")
logger.info("From dataset instances, %s will be considered for vocabulary creation.",
", ".join(datasets_for_vocab_creation))
vocab = VocabularyMultitask.from_params(
params.pop("vocabulary", {}),
(instance for key, dataset in all_datasets.items()
for instance in dataset
if key in datasets_for_vocab_creation),
instances_aux=(instance for key, dataset in all_datasets_aux.items()
for instance in dataset
if key in datasets_for_vocab_creation_aux)
)
model = Model.from_params(vocab=vocab, params=params.pop('model'))
# Initializing the model can have side effect of expanding the vocabulary
vocab.save_to_files(os.path.join(serialization_dir, "vocabulary"))
iterator = DataIterator.from_params(params.pop("iterator"))
iterator.index_with(vocab)
iterator_aux = DataIterator.from_params(params.pop("iterator_aux"))
iterator_aux.index_with(vocab)
validation_iterator_params = params.pop("validation_iterator", None)
if validation_iterator_params:
validation_iterator = DataIterator.from_params(validation_iterator_params)
validation_iterator.index_with(vocab)
else:
validation_iterator = None
train_data = all_datasets.get('train')
validation_data = all_datasets.get('validation')
test_data = all_datasets.get('test')
train_data_aux = all_datasets_aux.get('train_aux')
validation_data_aux = all_datasets_aux.get('validation_aux')
test_data_aux = all_datasets_aux.get('test_aux')
trainer_params = params.pop("trainer")
no_grad_regexes = trainer_params.pop("no_grad", ())
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in no_grad_regexes):
parameter.requires_grad_(False)
frozen_parameter_names, tunable_parameter_names = \
get_frozen_and_tunable_parameter_names(model)
logger.info("Following parameters are Frozen (without gradient):")
for name in frozen_parameter_names:
logger.info(name)
logger.info("Following parameters are Tunable (with gradient):")
for name in tunable_parameter_names:
logger.info(name)
trainer = MultiTaskTrainer.from_params(model=model,
serialization_dir=serialization_dir,
iterator=iterator,
iterator_aux=iterator_aux,
train_data=train_data,
train_data_aux=train_data_aux,
mixing_ratio=mixing_ratio,
cutoff_epoch=cutoff_epoch,
validation_data_aux=validation_data_aux,
validation_data=validation_data,
params=trainer_params,
validation_iterator=validation_iterator)
evaluate_on_test = params.pop_bool("evaluate_on_test", False)
params.assert_empty('base train command')
try:
metrics = trainer.train()
except KeyboardInterrupt:
# if we have completed an epoch, try to create a model archive.
if os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info("Training interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights.")
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
raise
# Now tar up results
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
logger.info("Loading the best epoch weights.")
best_model_state_path = os.path.join(serialization_dir, 'best.th')
best_model_state = torch.load(best_model_state_path)
best_model = model
best_model.load_state_dict(best_model_state)
if test_data and evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = evaluate(
best_model, test_data, validation_iterator or iterator,
cuda_device=trainer._cuda_devices[0] # pylint: disable=protected-access
)
for key, value in test_metrics.items():
metrics["test_" + key] = value
elif test_data:
logger.info("To evaluate on the test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command.")
if test_data_aux and evaluate_on_test:
test_data_aux.index_instances(vocab)
test_metricx_aux = evaluate(best_model, test_data_aux, iterator_aux,
cuda_device=trainer._cuda_devices[0]) # pylint: disable=protected-access
for key, value in test_metricx_aux.items():
metrics["test_aux_" + key] = value
elif test_data_aux:
logger.info("To evaluate on the auxillary test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command.")
dump_metrics(os.path.join(serialization_dir, "metrics.json"), metrics, log=True)
return best_model
| 20,414 | 45.083521 | 113 | py |
scicite | scicite-master/scicite/models/scaffold_bilstm_attention_classifier.py | import operator
from copy import deepcopy
from distutils.version import StrictVersion
from typing import Dict, Optional
import allennlp
import numpy as np
import torch
import torch.nn.functional as F
from allennlp.common import Params
from allennlp.data import Instance
from allennlp.data import Vocabulary
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2VecEncoder, Seq2SeqEncoder, TextFieldEmbedder, Embedding, TimeDistributed
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from overrides import overrides
from torch.nn import Parameter, Linear
from scicite.constants import Scicite_Format_Nested_Jsonlines
import torch.nn as nn
@Model.register("scaffold_bilstm_attention_classifier")
class ScaffoldBilstmAttentionClassifier(Model):
"""
This ``Model`` performs text classification for citation intents. We assume we're given a
citation text, and we predict some output label.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
citation_text_encoder: Seq2SeqEncoder,
classifier_feedforward: FeedForward,
classifier_feedforward_2: FeedForward,
classifier_feedforward_3: FeedForward,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
report_auxiliary_metrics: bool = False,
predict_mode: bool = False,
) -> None:
"""
Additional Args:
lexicon_embedder_params: parameters for the lexicon attention model
use_sparse_lexicon_features: whether to use sparse (onehot) lexicon features
multilabel: whether the classification is multi-label
data_format: s2 or jurgens
report_auxiliary_metrics: report metrics for aux tasks
predict_mode: predict unlabeled examples
"""
super(ScaffoldBilstmAttentionClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.num_classes_sections = self.vocab.get_vocab_size("section_labels")
self.num_classes_cite_worthiness = self.vocab.get_vocab_size("cite_worthiness_labels")
self.citation_text_encoder = citation_text_encoder
self.classifier_feedforward = classifier_feedforward
self.classifier_feedforward_2 = classifier_feedforward_2
self.classifier_feedforward_3 = classifier_feedforward_3
self.label_accuracy = CategoricalAccuracy()
self.label_f1_metrics = {}
self.label_f1_metrics_sections = {}
self.label_f1_metrics_cite_worthiness = {}
# for i in range(self.num_classes):
# self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] =\
# F1Measure(positive_label=i)
for i in range(self.num_classes):
self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] =\
F1Measure(positive_label=i)
for i in range(self.num_classes_sections):
self.label_f1_metrics_sections[vocab.get_token_from_index(index=i, namespace="section_labels")] =\
F1Measure(positive_label=i)
for i in range(self.num_classes_cite_worthiness):
self.label_f1_metrics_cite_worthiness[vocab.get_token_from_index(index=i, namespace="cite_worthiness_labels")] =\
F1Measure(positive_label=i)
self.loss = torch.nn.CrossEntropyLoss()
self.attention_seq2seq = Attention(citation_text_encoder.get_output_dim())
self.report_auxiliary_metrics = report_auxiliary_metrics
self.predict_mode = predict_mode
initializer(self)
@overrides
def forward(self,
citation_text: Dict[str, torch.LongTensor],
labels: torch.LongTensor = None,
lexicon_features: Optional[torch.IntTensor] = None,
year_diff: Optional[torch.Tensor] = None,
citing_paper_id: Optional[str] = None,
cited_paper_id: Optional[str] = None,
citation_excerpt_index: Optional[str] = None,
citation_id: Optional[str] = None,
section_label: Optional[torch.Tensor] = None,
is_citation: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
"""
Forward pass of the model
Args:
citation_text: citation text of shape (batch, sent_len, embedding_dim)
labels: labels
lexicon_features: lexicon sparse features (batch, lexicon_feature_len)
year_diff: difference between cited and citing years
citing_paper_id: id of the citing paper
cited_paper_id: id of the cited paper
citation_excerpt_index: index of the excerpt
citation_id: unique id of the citation
section_label: label of the section
is_citation: citation worthiness label
"""
# pylint: disable=arguments-differ
citation_text_embedding = self.text_field_embedder(citation_text)
citation_text_mask = util.get_text_field_mask(citation_text)
# shape: [batch, sent, output_dim]
encoded_citation_text = self.citation_text_encoder(citation_text_embedding, citation_text_mask)
# shape: [batch, output_dim]
attn_dist, encoded_citation_text = self.attention_seq2seq(encoded_citation_text, return_attn_distribution=True)
# In training mode, labels are the citation intents
# If in predict_mode, predict the citation intents
if labels is not None:
logits = self.classifier_feedforward(encoded_citation_text)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
loss = self.loss(logits, labels)
output_dict["loss"] = loss
# compute F1 per label
for i in range(self.num_classes):
metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace="labels")]
metric(class_probs, labels)
output_dict['labels'] = labels
if section_label is not None: # this is the first scaffold task
logits = self.classifier_feedforward_2(encoded_citation_text)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
loss = self.loss(logits, section_label)
output_dict["loss"] = loss
for i in range(self.num_classes_sections):
metric = self.label_f1_metrics_sections[self.vocab.get_token_from_index(index=i, namespace="section_labels")]
metric(logits, section_label)
if is_citation is not None: # second scaffold task
logits = self.classifier_feedforward_3(encoded_citation_text)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
loss = self.loss(logits, is_citation)
output_dict["loss"] = loss
for i in range(self.num_classes_cite_worthiness):
metric = self.label_f1_metrics_cite_worthiness[
self.vocab.get_token_from_index(index=i, namespace="cite_worthiness_labels")]
metric(logits, is_citation)
if self.predict_mode:
logits = self.classifier_feedforward(encoded_citation_text)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
output_dict['citing_paper_id'] = citing_paper_id
output_dict['cited_paper_id'] = cited_paper_id
output_dict['citation_excerpt_index'] = citation_excerpt_index
output_dict['citation_id'] = citation_id
output_dict['attn_dist'] = attn_dist # also return attention distribution for analysis
output_dict['citation_text'] = citation_text['tokens']
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
class_probabilities = F.softmax(output_dict['logits'], dim=-1)
predictions = class_probabilities.cpu().data.numpy()
argmax_indices = np.argmax(predictions, axis=-1)
labels = [self.vocab.get_token_from_index(x, namespace="labels")
for x in argmax_indices]
output_dict['probabilities'] = class_probabilities
output_dict['positive_labels'] = labels
output_dict['prediction'] = labels
citation_text = []
for batch_text in output_dict['citation_text']:
citation_text.append([self.vocab.get_token_from_index(token_id.item()) for token_id in batch_text])
output_dict['citation_text'] = citation_text
output_dict['all_labels'] = [self.vocab.get_index_to_token_vocabulary(namespace="labels")
for _ in range(output_dict['logits'].shape[0])]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metric_dict = {}
sum_f1 = 0.0
for name, metric in self.label_f1_metrics.items():
metric_val = metric.get_metric(reset)
metric_dict[name + '_P'] = metric_val[0]
metric_dict[name + '_R'] = metric_val[1]
metric_dict[name + '_F1'] = metric_val[2]
if name != 'none': # do not consider `none` label in averaging F1
sum_f1 += metric_val[2]
names = list(self.label_f1_metrics.keys())
total_len = len(names) if 'none' not in names else len(names) - 1
average_f1 = sum_f1 / total_len
# metric_dict['combined_metric'] = (accuracy + average_f1) / 2
metric_dict['average_F1'] = average_f1
if self.report_auxiliary_metrics:
sum_f1 = 0.0
for name, metric in self.label_f1_metrics_sections.items():
metric_val = metric.get_metric(reset)
metric_dict['aux-sec--' + name + '_P'] = metric_val[0]
metric_dict['aux-sec--' + name + '_R'] = metric_val[1]
metric_dict['aux-sec--' + name + '_F1'] = metric_val[2]
if name != 'none': # do not consider `none` label in averaging F1
sum_f1 += metric_val[2]
names = list(self.label_f1_metrics_sections.keys())
total_len = len(names) if 'none' not in names else len(names) - 1
average_f1 = sum_f1 / total_len
# metric_dict['combined_metric'] = (accuracy + average_f1) / 2
metric_dict['aux-sec--' + 'average_F1'] = average_f1
sum_f1 = 0.0
for name, metric in self.label_f1_metrics_cite_worthiness.items():
metric_val = metric.get_metric(reset)
metric_dict['aux-worth--' + name + '_P'] = metric_val[0]
metric_dict['aux-worth--' + name + '_R'] = metric_val[1]
metric_dict['aux-worth--' + name + '_F1'] = metric_val[2]
if name != 'none': # do not consider `none` label in averaging F1
sum_f1 += metric_val[2]
names = list(self.label_f1_metrics_cite_worthiness.keys())
total_len = len(names) if 'none' not in names else len(names) - 1
average_f1 = sum_f1 / total_len
# metric_dict['combined_metric'] = (accuracy + average_f1) / 2
metric_dict['aux-worth--' + 'average_F1'] = average_f1
return metric_dict
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ScaffoldBilstmAttentionClassifier':
with_elmo = params.pop_bool("with_elmo", False)
if with_elmo:
embedder_params = params.pop("elmo_text_field_embedder")
else:
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(embedder_params, vocab=vocab)
# citation_text_encoder = Seq2VecEncoder.from_params(params.pop("citation_text_encoder"))
citation_text_encoder = Seq2SeqEncoder.from_params(params.pop("citation_text_encoder"))
classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward"))
classifier_feedforward_2 = FeedForward.from_params(params.pop("classifier_feedforward_2"))
classifier_feedforward_3 = FeedForward.from_params(params.pop("classifier_feedforward_3"))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
use_lexicon = params.pop_bool("use_lexicon_features", False)
use_sparse_lexicon_features = params.pop_bool("use_sparse_lexicon_features", False)
data_format = params.pop('data_format')
report_auxiliary_metrics = params.pop_bool("report_auxiliary_metrics", False)
predict_mode = params.pop_bool("predict_mode", False)
print(f"pred mode: {predict_mode}")
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
citation_text_encoder=citation_text_encoder,
classifier_feedforward=classifier_feedforward,
classifier_feedforward_2=classifier_feedforward_2,
classifier_feedforward_3=classifier_feedforward_3,
initializer=initializer,
regularizer=regularizer,
report_auxiliary_metrics=report_auxiliary_metrics,
predict_mode=predict_mode)
def new_parameter(*size):
out = Parameter(torch.FloatTensor(*size))
torch.nn.init.xavier_normal_(out)
return out
class Attention(nn.Module):
""" Simple multiplicative attention"""
def __init__(self, attention_size):
super(Attention, self).__init__()
self.attention = new_parameter(attention_size, 1)
def forward(self, x_in, reduction_dim=-2, return_attn_distribution=False):
"""
return_attn_distribution: if True it will also return the original attention distribution
this reduces the one before last dimension in x_in to a weighted sum of the last dimension
e.g., x_in.shape == [64, 30, 100] -> output.shape == [64, 100]
Usage: You have a sentence of shape [batch, sent_len, embedding_dim] and you want to
represent sentence to a single vector using attention [batch, embedding_dim]
Here we use it to aggregate the lexicon-aware representation of the sentence
In two steps we convert [batch, sent_len, num_words_in_category, num_categories] into [batch, num_categories]
"""
# calculate attn weights
attn_score = torch.matmul(x_in, self.attention).squeeze()
# add one dimension at the end and get a distribution out of scores
attn_distrib = F.softmax(attn_score.squeeze(), dim=-1).unsqueeze(-1)
scored_x = x_in * attn_distrib
weighted_sum = torch.sum(scored_x, dim=reduction_dim)
if return_attn_distribution:
return attn_distrib.reshape(x_in.shape[0], -1), weighted_sum
else:
return weighted_sum
| 15,619 | 47.8125 | 125 | py |
scicite | scicite-master/scicite/dataset_readers/citation_data_reader_aclarc.py | """ Data reader for AllenNLP """
from typing import Dict, List
import json
import jsonlines
import logging
import torch
from allennlp.data import Field
from overrides import overrides
from allennlp.common import Params
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField, MultiLabelField, ListField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from scicite.resources.lexicons import ALL_ACTION_LEXICONS, ALL_CONCEPT_LEXICONS
from scicite.data import DataReaderJurgens
from scicite.data import read_jurgens_jsonline
from scicite.compute_features import is_in_lexicon
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
from scicite.constants import S2_CATEGORIES, NONE_LABEL_NAME
@DatasetReader.register("aclarc_dataset_reader")
class AclarcDatasetReader(DatasetReader):
"""
Reads a JSON-lines file containing papers from the Semantic Scholar database, and creates a
dataset suitable for document classification using these papers.
where the ``label`` is derived from the citation intent
Parameters
----------
lazy : ``bool`` (optional, default=False)
Passed to ``DatasetReader``. If this is ``True``, training will start sooner, but will
take longer per batch. This also allows training with datasets that are too large to fit
in memory.
tokenizer : ``Tokenizer``, optional
Tokenizer to use to split the title and abstrct into words or other kinds of tokens.
Defaults to ``WordTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
Indexers used to define input token representations. Defaults to ``{"tokens":
SingleIdTokenIndexer()}``.
"""
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
use_lexicon_features: bool = False,
use_sparse_lexicon_features: bool = False,
with_elmo: bool = False
) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
if with_elmo:
self._token_indexers = {"elmo": ELMoTokenCharactersIndexer(),
"tokens": SingleIdTokenIndexer()}
else:
self._token_indexers = {"tokens": SingleIdTokenIndexer()}
self.use_lexicon_features = use_lexicon_features
self.use_sparse_lexicon_features = use_sparse_lexicon_features
if self.use_lexicon_features or self.use_sparse_lexicon_features:
self.lexicons = {**ALL_ACTION_LEXICONS, **ALL_CONCEPT_LEXICONS}
@overrides
def _read(self, file_path):
for ex in jsonlines.open(file_path):
citation = read_jurgens_jsonline(ex)
yield self.text_to_instance(
citation_text=citation.text,
intent=citation.intent,
citing_paper_id=citation.citing_paper_id,
cited_paper_id=citation.cited_paper_id
)
@overrides
def text_to_instance(self,
citation_text: str,
citing_paper_id: str,
cited_paper_id: str,
intent: List[str] = None,
citing_paper_title: str = None,
cited_paper_title: str = None,
citing_paper_year: int = None,
cited_paper_year: int = None,
citing_author_ids: List[str] = None,
cited_author_ids: List[str] = None,
extended_context: str = None,
section_number: int = None,
section_title: str = None,
sents_before: List[str] = None,
sents_after: List[str] = None,
cite_marker_begin: int = None,
cite_marker_end: int = None,
cleaned_cite_text: str = None,
citation_excerpt_index: str = None,
citation_id: str = None,
venue: str = None) -> Instance: # type: ignore
citation_tokens = self._tokenizer.tokenize(citation_text)
# tok_cited_title = self._tokenizer.tokenize(cited_paper_title)
# tok_citing_title = self._tokenizer.tokenize(citing_paper_title)
# tok_extended_context = self._tokenizer.tokenize(extended_context)
fields = {
'citation_text': TextField(citation_tokens, self._token_indexers),
}
if self.use_sparse_lexicon_features:
# convert to regular string
sent = [token.text.lower() for token in citation_tokens]
lexicon_features, _ = is_in_lexicon(self.lexicons, sent)
fields["lexicon_features"] = ListField([LabelField(feature, skip_indexing=True)
for feature in lexicon_features])
if intent is not None:
fields['labels'] = LabelField(intent)
if citing_paper_year and cited_paper_year and \
citing_paper_year > -1 and cited_paper_year > -1:
year_diff = citing_paper_year - cited_paper_year
else:
year_diff = -1
fields['year_diff'] = ArrayField(torch.Tensor([year_diff]))
fields['citing_paper_id'] = MetadataField(citing_paper_id)
fields['cited_paper_id'] = MetadataField(cited_paper_id)
fields['citation_excerpt_index'] = MetadataField(citation_excerpt_index)
fields['citation_id'] = MetadataField(citation_id)
return Instance(fields)
@classmethod
def from_params(cls, params: Params) -> 'AclarcDatasetReader':
lazy = params.pop('lazy', False)
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
use_lexicon_features = params.pop_bool("use_lexicon_features", False)
use_sparse_lexicon_features = params.pop_bool("use_sparse_lexicon_features", False)
with_elmo = params.pop_bool("with_elmo", False)
params.assert_empty(cls.__name__)
return cls(lazy=lazy, tokenizer=tokenizer,
use_lexicon_features=use_lexicon_features,
use_sparse_lexicon_features=use_sparse_lexicon_features,
with_elmo=with_elmo)
| 6,739 | 44.234899 | 109 | py |
scicite | scicite-master/scicite/dataset_readers/citation_data_reader_scicite.py | """ Data reader for AllenNLP """
from typing import Dict, List
import json
import logging
import torch
from allennlp.data import Field
from overrides import overrides
from allennlp.common import Params
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField, MultiLabelField, ListField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from scicite.resources.lexicons import ALL_ACTION_LEXICONS, ALL_CONCEPT_LEXICONS
from scicite.data import DataReaderJurgens
from scicite.data import DataReaderS2, DataReaderS2ExcerptJL
from scicite.compute_features import is_in_lexicon
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
from scicite.constants import S2_CATEGORIES, NONE_LABEL_NAME
@DatasetReader.register("scicite_datasetreader")
class SciciteDatasetReader(DatasetReader):
"""
Reads a JSON-lines file containing citations from the Semantic Scholar database, and creates a
dataset suitable for document classification using these papers.
The output of ``read`` is a list of ``Instance`` s with the fields:
citation_text: ``TextField``
label: ``LabelField``
where the ``label`` is derived from the methodology/comparison labels.
Parameters
----------
lazy : ``bool`` (optional, default=False)
Passed to ``DatasetReader``. If this is ``True``, training will start sooner, but will
take longer per batch. This also allows training with datasets that are too large to fit
in memory.
tokenizer : ``Tokenizer``, optional
Tokenizer to use to split the title and abstrct into words or other kinds of tokens.
Defaults to ``WordTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
Indexers used to define input token representations. Defaults to ``{"tokens":
SingleIdTokenIndexer()}``.
reader_format : can be `flat` or `nested`. `flat` for flat json format and nested for
Json format where the each object contains multiple excerpts
"""
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
use_lexicon_features: bool=False,
use_sparse_lexicon_features: bool = False,
multilabel: bool = False,
with_elmo: bool = False,
reader_format: str = 'flat') -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
if with_elmo:
# self._token_indexers = {"tokens": SingleIdTokenIndexer()}
self._token_indexers = {"elmo": ELMoTokenCharactersIndexer(),
"tokens": SingleIdTokenIndexer()}
else:
self._token_indexers = {"tokens": SingleIdTokenIndexer()}
self.use_lexicon_features = use_lexicon_features
self.use_sparse_lexicon_features = use_sparse_lexicon_features
if self.use_lexicon_features or self.use_sparse_lexicon_features:
self.lexicons = {**ALL_ACTION_LEXICONS, **ALL_CONCEPT_LEXICONS}
self.multilabel = multilabel
self.reader_format = reader_format
@overrides
def _read(self, jsonl_file: str):
if self.reader_format == 'flat':
reader_s2 = DataReaderS2ExcerptJL(jsonl_file)
elif self.reader_format == 'nested':
reader_s2 = DataReaderS2(jsonl_file)
for citation in reader_s2.read():
yield self.text_to_instance(
citation_text=citation.text,
intent=citation.intent,
citing_paper_id=citation.citing_paper_id,
cited_paper_id=citation.cited_paper_id,
citation_excerpt_index=citation.citation_excerpt_index
)
@overrides
def text_to_instance(self,
citation_text: str,
citing_paper_id: str,
cited_paper_id: str,
intent: List[str] = None,
citing_paper_title: str = None,
cited_paper_title: str = None,
citing_paper_year: int = None,
cited_paper_year: int = None,
citing_author_ids: List[str] = None,
cited_author_ids: List[str] = None,
extended_context: str = None,
section_number: int = None,
section_title: str = None,
cite_marker_begin: int = None,
cite_marker_end: int = None,
sents_before: List[str] = None,
sents_after: List[str] = None,
cleaned_cite_text: str = None,
citation_excerpt_index: str = None,
venue: str = None) -> Instance: # type: ignore
citation_tokens = self._tokenizer.tokenize(citation_text)
fields = {
'citation_text': TextField(citation_tokens, self._token_indexers),
}
if self.use_sparse_lexicon_features:
# convert to regular string
sent = [token.text.lower() for token in citation_tokens]
lexicon_features, _ = is_in_lexicon(self.lexicons, sent)
fields["lexicon_features"] = ListField([LabelField(feature, skip_indexing=True)
for feature in lexicon_features])
if intent:
if self.multilabel:
fields['labels'] = MultiLabelField([S2_CATEGORIES[e] for e in intent], skip_indexing=True,
num_labels=len(S2_CATEGORIES))
else:
if not isinstance(intent, str):
raise TypeError(f"Undefined label format. Should be a string. Got: f'{intent}'")
fields['labels'] = LabelField(intent)
if citing_paper_year and cited_paper_year and \
citing_paper_year > -1 and cited_paper_year > -1:
year_diff = citing_paper_year - cited_paper_year
else:
year_diff = -1
fields['year_diff'] = ArrayField(torch.Tensor([year_diff]))
fields['citing_paper_id'] = MetadataField(citing_paper_id)
fields['cited_paper_id'] = MetadataField(cited_paper_id)
fields['citation_excerpt_index'] = MetadataField(citation_excerpt_index)
fields['citation_id'] = MetadataField(f"{citing_paper_id}>{cited_paper_id}")
return Instance(fields)
@classmethod
def from_params(cls, params: Params) -> 'SciciteDatasetReader':
lazy = params.pop('lazy', False)
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
use_lexicon_features = params.pop_bool("use_lexicon_features", False)
use_sparse_lexicon_features = params.pop_bool("use_sparse_lexicon_features", False)
multilabel = params.pop_bool("multilabel")
with_elmo = params.pop_bool("with_elmo", False)
reader_format = params.pop("reader_format", 'flat')
params.assert_empty(cls.__name__)
return cls(lazy=lazy, tokenizer=tokenizer,
use_lexicon_features=use_lexicon_features,
use_sparse_lexicon_features=use_sparse_lexicon_features,
multilabel=multilabel,
with_elmo=with_elmo,
reader_format=reader_format)
| 7,766 | 45.232143 | 109 | py |
sharpDARTS | sharpDARTS-master/cnn/warmup_scheduler.py | # https://github.com/ildoonet/pytorch-gradual-warmup-lr
# License: MIT
from torch.optim.lr_scheduler import _LRScheduler
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier <= 1.:
raise ValueError('multiplier should be greater than 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step(self, epoch=None):
if self.finished and self.after_scheduler:
return self.after_scheduler.step(epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch) | 1,749 | 41.682927 | 122 | py |
sharpDARTS | sharpDARTS-master/cnn/costar_baseline_model.py | import math
import torch
import torch.nn as nn
from torch.nn import functional as F
# from . import operations
# from . import genotypes
# from .operations import ReLUConvBN
# from .operations import ConvBNReLU
# from .operations import FactorizedReduce
# from .operations import Identity
from torch.autograd import Variable
# from .utils import drop_path
# from .model_search import MixedAux
import operations
import genotypes
from operations import FactorizedReduce
from operations import Identity
from operations import ReLUConvBN
from operations import SepConv
from utils import drop_path
from model import Cell
from model import AuxiliaryHeadImageNet
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
class NetworkResNetCOSTAR(nn.Module):
# Baseline model based on https://arxiv.org/pdf/1611.08036.pdf
def __init__(self, C, num_classes, layers, auxiliary, genotype, in_channels=6, reduce_spacing=None,
mixed_aux=False, op_dict=None, C_mid=None, stem_multiplier=3, vector_size=15):
super(NetworkResNetCOSTAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self._in_channels = in_channels
self._vector_size = vector_size
self.drop_path_prob = 0.
resnet_linear_count = 2048
pretrained = True
self.stem0 = models.resnet50(num_classes=resnet_linear_count)
# self.stem1 = models.resnet50(num_classes=resnet_linear_count)
if pretrained:
weights = model_zoo.load_url(models.resnet.model_urls['resnet50'])
# remove weights which we will not be loading
del weights['fc.weight']
del weights['fc.bias']
# load pretrained weights
self.stem0.load_state_dict(weights, strict=False)
# self.stem1.load_state_dict(weights, strict=False)
self.global_pooling = nn.AvgPool2d(7)
# if auxiliary:
# self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
# self.global_pooling = nn.AvgPool2d(7)
# Input minus the image channels
combined_state_size = resnet_linear_count + resnet_linear_count + vector_size
# print('>>>>> C:' + str(C) + ' combined state size: ' + str(combined_state_size))
# print('>>>>> C:' + str(C) + ' combined state size: ' + str(combined_state_size))
self.classifier = nn.Sequential(
nn.Linear(combined_state_size, 1024),
nn.ReLU(inplace=True),
nn.Dropout(0.2),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.2),
nn.Linear(512, num_classes)
)
def forward(self, img, vec):
logits_aux = None
# print('vector_in: ' + str(vector_in.data))
# print('pixel_sample: ' + str(batch_input[:,:,1,1]))
s0 = self.stem0(img[:, :3, :, :])
s1 = self.stem0(img[:, 3:, :, :])
# x = torch.cat([s0, s1], dim=1)
# x = self.global_pooling(x)
# vector_in = batch_input[:,6:,1,1]
# out = torch.cat([x, vector_in], dim=-1)
out = torch.cat([s0, s1, vec], dim=-1)
# print('>>>>>>> out shape: ' + str(out.shape))
# print('>>>>>>> out shape: ' + str(out.shape))
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
class NetworkNASCOSTAR(nn.Module):
# Copied from NetworkImageNet
def __init__(self, C, num_classes, layers, auxiliary, genotype, in_channels=55, reduce_spacing=None,
mixed_aux=False, op_dict=None, C_mid=None, stem_multiplier=3):
super(NetworkNASCOSTAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self._in_channels = in_channels
self.drop_path_prob = 0.
self.stem0 = nn.Sequential(
nn.Conv2d(in_channels, C // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
cell = Cell(genotype.reduce, genotype.reduce_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, op_dict=op_dict, C_mid=C_mid)
else:
reduction = False
cell = Cell(genotype.normal, genotype.reduce_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, op_dict=op_dict, C_mid=C_mid)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, C_prev)
self.classifier2 = nn.Linear(C_prev, num_classes)
def forward(self, batch_input):
logits_aux = None
s0 = self.stem0(batch_input)
s1 = self.stem1(s0)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
class NetworkCOSTAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype, in_channels=6, reduce_spacing=None,
mixed_aux=False, op_dict=None, C_mid=None, stem_multiplier=3, vector_size=15):
super(NetworkCOSTAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self._in_channels = in_channels
self._vector_size = vector_size
self.drop_path_prob = 0.
self.stem0 = nn.Sequential(
nn.Conv2d(in_channels, C // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.vector_stem = nn.Sequential(
nn.Linear(vector_size, C),
nn.ReLU(inplace=True),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
cell = Cell(genotype.reduce, genotype.reduce_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, op_dict=op_dict, C_mid=C_mid)
else:
reduction = False
cell = Cell(genotype.normal, genotype.reduce_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, op_dict=op_dict, C_mid=C_mid)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, img, vector):
logits_aux = None
s0 = self.stem0(img)
s1 = self.stem1(s0)
v = self.vector_stem(vector)
s1.add_(v.unsqueeze(2).unsqueeze(3).expand(-1, -1, s1.shape[2], s1.shape[3]))
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux | 7,916 | 35.652778 | 146 | py |
sharpDARTS | sharpDARTS-master/cnn/test.py | import os
import sys
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
def infer(test_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(test_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 3,599 | 33.285714 | 102 | py |
sharpDARTS | sharpDARTS-master/cnn/train_costar.py | # source: https://github.com/NVIDIA/apex/tree/master/examples/imagenet
# license: BSD 3-Clause
#
# to install apex:
# pip3 install --user --upgrade -e . --global-option="build_ext" --global-option="--cpp_ext" --global-option="--cuda_ext"
#
# ### Multi-process training with FP16_Optimizer, dynamic loss scaling
# $ python3 -m torch.distributed.launch --nproc_per_node=2 train_costar.py --fp16 --b 256 --save `git rev-parse --short HEAD` --epochs 300 --dynamic-loss-scale --workers 14 --data ~/.keras/datasets/costar_block_stacking_dataset_v0.4
#
# # note that --nproc_per_node is NUM_GPUS.
# # Can add --sync_bn to sync bachnorm values if batch size is "very small" but note this also reduces img/s by ~10%.
#
# Example command:
#
# export CUDA_VISIBLE_DEVICES="2" && python3 train_costar.py --auxiliary --cutout --batch_size 128 --epochs 200 --save `git rev-parse --short HEAD` --arch SHARP_DARTS --mid_channels 32 --init_channels 36 --wd 0.0003 --lr_power_annealing_exponent_order 2 --learning_rate_min 0.0005 --learning_rate 0.05
import argparse
import os
import shutil
import time
import glob
import json
import copy
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision
import numpy as np
import random
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
try:
import costar_dataset
except ImportError:
ImportError('The costar dataset is not available. '
'See https://github.com/ahundt/costar_dataset for details')
from tqdm import tqdm
import dataset
import genotypes
import autoaugment
import operations
import utils
import warmup_scheduler
from cosine_power_annealing import cosine_power_annealing
from costar_baseline_model import NetworkResNetCOSTAR, NetworkCOSTAR
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--data', type=str, default='~/.keras/datasets/costar_block_stacking_dataset_v0.4',
help='path to dataset', metavar='DIR')
parser.add_argument('--arch', '-a', metavar='ARCH', default='SHARP_DARTS',
# choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: SHARP_DARTS)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run (default: 300)')
parser.add_argument('--start_epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful for restarts)')
parser.add_argument('-b', '--batch_size', default=256, type=int,
metavar='N', help='mini-batch size per process (default: 256)')
parser.add_argument('--lr', '--learning_rate', dest='learning_rate', default=0.1, type=float,
metavar='LR', help='Initial learning rate based on autoaugment https://arxiv.org/pdf/1805.09501.pdf. Will be scaled by <global batch size>/256: args.learning_rate = args.learning_rate*float(args.batch_size*args.world_size)/256. A warmup schedule will also be applied over the first 5 epochs.')
parser.add_argument('--learning_rate_min', type=float, default=0.00016, help='min learning rate')
parser.add_argument('--warmup_epochs', default=10, type=int, help='number of epochs for warmup (default: 10)')
parser.add_argument('--warmup_lr_divisor', default=10, type=int, help='factor by which to reduce lr at warmup start (default: 10)')
parser.add_argument('--lr_power_annealing_exponent_order', type=float, default=10,
help='Cosine Power Annealing Schedule Base, larger numbers make '
'the exponential more dominant, smaller make cosine more dominant.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', dest='weight_decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print_freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--restart_lr', action='store_true',
help='Used in conjunction with --resume, '
'this will restart the lr curve as if it was epoch 1, '
'but otherwise retain your current epoch count.')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
parser.add_argument('--prof', dest='prof', action='store_true',
help='Only run 10 iterations for profiling.')
parser.add_argument('--deterministic', action='store_true', default=False)
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--ops', type=str, default='OPS', help='which operations to use, options are OPS and DARTS_OPS')
parser.add_argument('--primitives', type=str, default='PRIMITIVES',
help='which primitive layers to use inside a cell search space,'
' options are PRIMITIVES and DARTS_PRIMITIVES')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--mid_channels', type=int, default=96, help='C_mid channels in choke SharpSepConv')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--autoaugment', action='store_true', default=False, help='use cifar10 autoaugment https://arxiv.org/abs/1805.09501')
parser.add_argument('--random_eraser', action='store_true', default=False, help='use random eraser')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=112, help='cutout length')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('-e', '--evaluate', dest='evaluate', type=str, metavar='PATH', default='',
help='evaluate model at specified path on training, test, and validation datasets')
parser.add_argument('--load', type=str, default='', metavar='PATH', help='load weights at specified location')
parser.add_argument('--load_args', type=str, default='', metavar='PATH',
help='load command line args from a json file, this will override '
'all currently set args except for --evaluate, and arguments '
'that did not exist when the json file was originally saved out.')
# CoSTAR BSD specific arguments
parser.add_argument('--dataset', type=str, default='stacking', help='which dataset, only option is stacking')
parser.add_argument('--version', type=str, default='v0.4', help='the CoSTAR BSD version to use. Defaults to "v0.4"')
parser.add_argument('--set_name', type=str, default='blocks_only',
help='which set to use in the CoSTAR BSD. Options are "blocks_only" or "blocks_with_plush_toy". '
'Defaults to "blocks_only"')
parser.add_argument('--subset_name', type=str, default='success_only',
help='which subset to use in the CoSTAR BSD. Options are "success_only", '
'"error_failure_only", "task_failure_only", or "task_and_error_failure". Defaults to "success_only"')
parser.add_argument('--feature_mode', type=str, default='all_features',
help='which feature mode to use. Options are "translation_only", "rotation_only", "stacking_reward", '
'or the default "all_features"')
parser.add_argument('--num_images_per_example', type=int, default=200,
help='Number of times an example is visited per epoch. Default value is 200. Since the image for each visit to an '
'example is randomly chosen, and since the number of images in an example is different, we simply visit each '
'example multiple times according to this number to ensure most images are visited.')
parser.add_argument('--cart_weight', type=float, default=0.7,
help='the weight for the cartesian error. In validation, the metric to determine whether a run is good is '
'comparing the weighted sum of cart_weight*cart_error+(1-cart_weight)*angle_error. Defaults to 0.7 '
'because translational error is more important than rotational error.')
parser.add_argument('--abs_cart_error_output_csv_name', type=str, default='abs_cart_error.csv',
help='the output csv file name for the absolute cartesian error of ALL samples in ALL epochs. '
'Actual output file will have train_/val_/test_ prefix')
parser.add_argument('--abs_angle_error_output_csv_name', type=str, default='abs_angle_error.csv',
help='the output csv file name for the absolute cartesian error of ALL samples in ALL epochs. '
'Actual output file will have train_/val_/test_ prefix')
cudnn.benchmark = True
best_combined_error = float('inf')
args = parser.parse_args()
logger = None
VECTOR_SIZE = dataset.costar_vec_size_dict[args.feature_mode]
def fast_collate(batch):
data, targets = torch.utils.data.dataloader.default_collate(batch)
# data is a list of [image_0, image_1, vector]
return torch.cat((data[0], data[1]), dim=1), data[2], targets
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
def main():
global best_combined_error, args, logger
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
# commented because it is now set as an argparse param.
# args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
# note the gpu is used for directory creation and log files
# which is needed when run as multiple processes
args = utils.initialize_files_and_args(args)
logger = utils.logging_setup(args.log_file_path)
# # load the correct ops dictionary
op_dict_to_load = "operations.%s" % args.ops
logger.info('loading op dict: ' + str(op_dict_to_load))
op_dict = eval(op_dict_to_load)
# load the correct primitives list
primitives_to_load = "genotypes.%s" % args.primitives
logger.info('loading primitives:' + primitives_to_load)
primitives = eval(primitives_to_load)
logger.info('primitives: ' + str(primitives))
# get the number of output channels
classes = dataset.costar_class_dict[args.feature_mode]
if args.arch == 'NetworkResNetCOSTAR':
# baseline model for comparison
model = NetworkResNetCOSTAR(args.init_channels, classes, args.layers, args.auxiliary, None, vector_size=VECTOR_SIZE, op_dict=op_dict, C_mid=args.mid_channels)
else:
# create model
genotype = eval("genotypes.%s" % args.arch)
# create the neural network
model = NetworkCOSTAR(args.init_channels, classes, args.layers, args.auxiliary, genotype, vector_size=VECTOR_SIZE, op_dict=op_dict, C_mid=args.mid_channels)
model.drop_path_prob = 0.0
# if args.pretrained:
# logger.info("=> using pre-trained model '{}'".format(args.arch))
# model = models.__dict__[args.arch](pretrained=True)
# else:
# logger.info("=> creating model '{}'".format(args.arch))
# model = models.__dict__[args.arch]()
if args.sync_bn:
import apex
logger.info("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda()
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# model = DDP(model)
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.MSELoss().cuda()
# NOTE(rexxarchl): MSLE loss, indicated as better for rotation in costar_hyper/costar_block_stacking_train_regression.py
# is not available in PyTorch by default
# Scale learning rate based on global batch size
args.learning_rate = args.learning_rate * float(args.batch_size * args.world_size)/256.
init_lr = args.learning_rate / args.warmup_lr_divisor
optimizer = torch.optim.SGD(model.parameters(), init_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# epoch_count = args.epochs - args.start_epoch
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(epoch_count))
# scheduler = warmup_scheduler.GradualWarmupScheduler(
# optimizer, args.warmup_lr_divisor, args.warmup_epochs, scheduler)
# Optionally resume from a checkpoint
if args.resume or args.evaluate:
if args.evaluate:
args.resume = args.evaluate
# Use a local scope to avoid dangling references
def resume():
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch'] + 1
if 'best_combined_error' in checkpoint:
best_combined_error = checkpoint['best_combined_error']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
# TODO(ahundt) make sure scheduler loading isn't broken
if 'lr_scheduler' in checkpoint:
scheduler.load_state_dict(checkpoint['lr_scheduler'])
elif 'lr_schedule' in checkpoint:
lr_schedule = checkpoint['lr_schedule']
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
resume()
# Get preprocessing functions (i.e. transforms) to apply on data
# normalize_as_tensor = False because we normalize and convert to a
# tensor in our custom prefetching function, rather than as part of
# the transform preprocessing list.
# train_transform, valid_transform = utils.get_data_transforms(args, normalize_as_tensor=False)
train_transform = valid_transform = None # NOTE(rexxarchl): data transforms are not applicable for CoSTAR BSD at the moment
evaluate = True if args.evaluate else False
# Get the training queue, select training and validation from training set
train_loader, val_loader = dataset.get_training_queues(
args.dataset, train_transform, valid_transform, args.data,
args.batch_size, train_proportion=1.0,
collate_fn=fast_collate,
distributed=args.distributed,
num_workers=args.workers,
costar_set_name=args.set_name, costar_subset_name=args.subset_name,
costar_feature_mode=args.feature_mode, costar_version=args.version, costar_num_images_per_example=args.num_images_per_example,
costar_output_shape=(224, 224, 3), costar_random_augmentation=None, costar_one_hot_encoding=True, evaluate=evaluate)
if args.evaluate:
# Load the test set
test_loader = dataset.get_costar_test_queue(
args.data, costar_set_name=args.set_name, costar_subset_name=args.subset_name, collate_fn=fast_collate,
costar_feature_mode=args.feature_mode, costar_version=args.version, costar_num_images_per_example=args.num_images_per_example,
costar_output_shape=(224, 224, 3), costar_random_augmentation=None, costar_one_hot_encoding=True)
# Evaluate on all splits, without any augmentation
validate(train_loader, model, criterion, args, prefix='evaluate_train_')
validate(val_loader, model, criterion, args, prefix='evaluate_val_')
validate(test_loader, model, criterion, args, prefix='evaluate_test_')
return
lr_schedule = cosine_power_annealing(
epochs=args.epochs, max_lr=args.learning_rate, min_lr=args.learning_rate_min,
warmup_epochs=args.warmup_epochs, exponent_order=args.lr_power_annealing_exponent_order,
restart_lr=args.restart_lr)
epochs = np.arange(args.epochs) + args.start_epoch
stats_csv = args.epoch_stats_file
stats_csv = stats_csv.replace('.json', '.csv')
with tqdm(epochs, dynamic_ncols=True, disable=args.local_rank != 0, leave=False, initial=args.start_epoch) as prog_epoch:
best_stats = {}
stats = {}
epoch_stats = []
best_epoch = 0
logger.info('Initial Learning Rate: ' + str(lr_schedule[0]))
for epoch, learning_rate in zip(prog_epoch, lr_schedule):
if args.distributed and train_loader.sampler is not None:
train_loader.sampler.set_epoch(int(epoch))
# if args.distributed:
# train_sampler.set_epoch(epoch)
# update the learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
# scheduler.step()
model.drop_path_prob = args.drop_path_prob * float(epoch) / float(args.epochs)
# train for one epoch
train_stats = train(train_loader, model, criterion, optimizer, int(epoch), args)
if args.prof:
break
# evaluate on validation set
combined_error, val_stats = validate(val_loader, model, criterion, args)
stats.update(train_stats)
stats.update(val_stats)
# stats['lr'] = '{0:.5f}'.format(scheduler.get_lr()[0])
stats['lr'] = '{0:.5f}'.format(learning_rate)
stats['epoch'] = epoch
# remember best combined_error and save checkpoint
if args.local_rank == 0:
is_best = combined_error < best_combined_error
best_combined_error = min(combined_error, best_combined_error)
stats['best_combined_error'] = '{0:.3f}'.format(best_combined_error)
if is_best:
best_epoch = epoch
best_stats = copy.deepcopy(stats)
stats['best_epoch'] = best_epoch
stats_str = utils.dict_to_log_string(stats)
logger.info(stats_str)
save_checkpoint({
'epoch': epoch,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_combined_error': best_combined_error,
'optimizer': optimizer.state_dict(),
# 'lr_scheduler': scheduler.state_dict()
'lr_schedule': lr_schedule,
'stats': best_stats
}, is_best, path=args.save)
prog_epoch.set_description(
'Overview ***** best_epoch: {0} best_valid_combined_error: {1:.2f} ***** Progress'
.format(best_epoch, best_combined_error))
epoch_stats += [copy.deepcopy(stats)]
with open(args.epoch_stats_file, 'w') as f:
json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
stats_str = utils.dict_to_log_string(best_stats, key_prepend='best_')
logger.info(stats_str)
with open(args.stats_file, 'w') as f:
arg_dict = vars(args)
arg_dict.update(best_stats)
json.dump(arg_dict, f, cls=utils.NumpyEncoder)
with open(args.epoch_stats_file, 'w') as f:
json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
logger.info('Training of Final Model Complete!')
# Do a final evaluation
logger.info('Final evaluation')
# Load the best model
best_model_path = os.path.join(args.save, 'model_best.pth.tar')
best_model = torch.load(best_model_path, map_location=lambda storage, loc: storage.cuda(args.gpu))
model.load_state_dict(best_model['state_dict'])
# optimizer.load_state_dict(best_model['optimizer'])
logger.info("=> loaded best_model '{}' from epoch {}".format(best_model_path, best_model['epoch']))
# Get the train and validation set in evaluate mode
train_loader, val_loader = dataset.get_training_queues(
args.dataset, train_transform, valid_transform, args.data,
args.batch_size, train_proportion=1.0,
collate_fn=fast_collate,
distributed=args.distributed,
num_workers=args.workers,
costar_set_name=args.set_name, costar_subset_name=args.subset_name,
costar_feature_mode=args.feature_mode, costar_version=args.version, costar_num_images_per_example=args.num_images_per_example,
costar_output_shape=(224, 224, 3), costar_random_augmentation=None, costar_one_hot_encoding=True, evaluate=evaluate)
# Get the test set
test_loader = dataset.get_costar_test_queue(
args.data, costar_set_name=args.set_name, costar_subset_name=args.subset_name, collate_fn=fast_collate,
costar_feature_mode=args.feature_mode, costar_version=args.version, costar_num_images_per_example=args.num_images_per_example,
costar_output_shape=(224, 224, 3), costar_random_augmentation=None, costar_one_hot_encoding=True)
# Evaluate on all splits, without any augmentation
validate(train_loader, model, criterion, args, prefix='best_final_train_')
validate(val_loader, model, criterion, args, prefix='best_final_val_')
validate(test_loader, model, criterion, args, prefix='best_final_test_')
logger.info("Final evaluation complete! Save dir: ' + str(args.save)")
class data_prefetcher():
def __init__(self, loader, cutout=False, cutout_length=112, cutout_cuts=1):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
cutout_dtype = np.float32
self.cutout = None
if cutout:
self.cutout = utils.BatchCutout(cutout_length, cutout_cuts, dtype=cutout_dtype)
self.preload()
def preload(self):
try:
self.next_input_img, self.next_input_vec, self.next_target = next(self.loader)
except StopIteration:
self.next_input_img = self.next_input_vec = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input_img = self.next_input_img.cuda(non_blocking=True).float()
self.next_input_vec = self.next_input_vec.cuda(non_blocking=True).float()
self.next_target = self.next_target.cuda(non_blocking=True).float()
if self.cutout is not None:
# TODO(ahundt) Fix performance of this cutout call, it makes batch loading time go from 0.001 seconds to 0.05 seconds.
self.next_input_img = self.cutout(self.next_input_img)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input_img = self.next_input_img
input_vec = self.next_input_vec
target = self.next_target
self.preload()
return input_img, input_vec, target
def train(train_loader, model, criterion, optimizer, epoch, args):
loader_len = len(train_loader)
if loader_len < 2:
raise ValueError('train_loader only supports 2 or more batches and loader_len: ' + str(loader_len))
batch_time = AverageMeter()
data_time = AverageMeter()
speed = AverageMeter()
losses = AverageMeter()
abs_cart_m = AverageMeter()
abs_angle_m = AverageMeter()
sigmoid = torch.nn.Sigmoid()
# switch to train mode
model.train()
end = time.time()
prefetcher = data_prefetcher(train_loader, cutout=args.cutout, cutout_length=args.cutout_length)
cart_error, angle_error = [], []
input_img, input_vec, target = prefetcher.next()
batch_size = input_img.size(0)
i = -1
if args.local_rank == 0:
progbar = tqdm(total=len(train_loader), leave=False, dynamic_ncols=True)
else:
progbar = None
while input_img is not None:
i += 1
# scheduler in main now adjusts the lr
# adjust_learning_rate(optimizer, epoch, i, len(train_loader))
if args.prof:
if i > 10:
break
# measure data loading time
data_time.update(time.time() - end)
# compute output
# note here the term output is equivalent to logits
output, logits_aux = model(input_img, input_vec)
output = sigmoid(output)
loss = criterion(output, target)
if logits_aux is not None and args.auxiliary:
logits_aux = sigmoid(logits_aux)
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight * loss_aux
# measure accuracy and record loss
with torch.no_grad():
output_np = output.cpu().detach().numpy()
target_np = target.cpu().detach().numpy()
batch_abs_cart_distance, batch_abs_angle_distance = accuracy(output_np, target_np)
abs_cart_f, abs_angle_f = np.mean(batch_abs_cart_distance), np.mean(batch_abs_angle_distance)
cart_error.extend(batch_abs_cart_distance)
angle_error.extend(batch_abs_angle_distance)
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
abs_cart_f = reduce_tensor(abs_cart_f)
abs_angle_f = reduce_tensor(abs_angle_f)
else:
reduced_loss = loss.data
losses.update(reduced_loss, batch_size)
abs_cart_m.update(abs_cart_f, batch_size)
abs_angle_m.update(abs_angle_f, batch_size)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
input_img, input_vec, target = prefetcher.next()
if args.local_rank == 0:
progbar.update()
if args.local_rank == 0 and i % args.print_freq == 0 and i > 1:
speed.update(args.world_size * args.batch_size / batch_time.val)
progbar.set_description(
# 'Epoch: [{0}][{1}/{2}]\t'
'Train (cur/avg) '
'batch_t: {batch_time.val:.3f}/{batch_time.avg:.3f}, '
'img/s: {0:.1f}/{1:.1f} '
'load_t: {data_time.val:.3f}/{data_time.avg:.3f}, '
'loss: {loss.val:.4f}/{loss.avg:.4f}, '
'cart: {abs_cart.val:.2f}/{abs_cart.avg:.2f}, '
'angle: {abs_angle.val:.2f}/{abs_angle.avg:.2f}, prog'.format(
# epoch, i, len(train_loader),
speed.val,
speed.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, abs_cart=abs_cart_m, abs_angle=abs_angle_m))
stats = {}
prefix = 'train_'
if args.feature_mode != 'rotation_only' and len(cart_error) > 0: # translation_only or all_features: save cartesian csv
utils.list_to_csv(os.path.join(args.save, prefix + args.abs_cart_error_output_csv_name),
cart_error)
if args.feature_mode != 'translation_only' and len(angle_error) > 0: # rotation_only or all_features: save angle csv
utils.list_to_csv(os.path.join(args.save, prefix + args.abs_angle_error_output_csv_name),
angle_error)
stats = get_stats(progbar, prefix, args, batch_time, data_time, abs_cart_m, abs_angle_m, losses, speed)
if progbar is not None:
progbar.close()
del progbar
return stats
def get_stats(progbar, prefix, args, batch_time, data_time, abs_cart, abs_angle, losses, speed):
stats = {}
if progbar is not None:
stats = utils.tqdm_stats(progbar, prefix=prefix)
stats.update({
prefix + 'time_step_wall': '{0:.3f}'.format(args.world_size * args.batch_size / batch_time.avg),
prefix + 'batch_time_one_gpu': '{0:.3f}'.format(batch_time.avg),
prefix + 'data_time': '{0:.3f}'.format(data_time.avg),
prefix + 'abs_cart': '{0:.3f}'.format(abs_cart.avg),
prefix + 'abs_angle': '{0:.3f}'.format(abs_angle.avg),
prefix + 'loss': '{0:.4f}'.format(losses.avg),
prefix + 'images_per_second': '{0:.4f}'.format(speed.avg),
})
return stats
def validate(val_loader, model, criterion, args, prefix='val_'):
loader_len = len(val_loader)
if loader_len < 2:
raise ValueError('val_loader only supports 2 or more batches and loader_len: ' + str(loader_len))
batch_time = AverageMeter()
data_time = AverageMeter()
speed = AverageMeter()
losses = AverageMeter()
abs_cart_m = AverageMeter()
abs_angle_m = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
cart_error, angle_error = [], []
prefetcher = data_prefetcher(val_loader)
input_img, input_vec, target = prefetcher.next()
batch_size = input_img.size(0)
i = -1
if args.local_rank == 0:
progbar = tqdm(total=loader_len)
else:
progbar = None
while input_img is not None:
i += 1
# measure data loading time
data_time.update(time.time() - end)
# compute output
with torch.no_grad():
# output = model(input)
# loss = criterion(output, target)
# note here the term output is equivalent to logits
output, _ = model(input_img, input_vec)
loss = criterion(output, target)
# measure accuracy and record loss
batch_abs_cart_distance, batch_abs_angle_distance = accuracy(output.data.cpu().numpy(), target.data.cpu().numpy())
abs_cart_f, abs_angle_f = np.mean(batch_abs_cart_distance), np.mean(batch_abs_angle_distance)
cart_error.extend(batch_abs_cart_distance)
angle_error.extend(batch_abs_angle_distance)
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
abs_cart_f = reduce_tensor(abs_cart_f)
abs_angle_f = reduce_tensor(abs_angle_f)
else:
reduced_loss = loss.data
losses.update(reduced_loss, batch_size)
abs_cart_m.update(abs_cart_f, batch_size)
abs_angle_m.update(abs_angle_f, batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0:
progbar.update()
if args.local_rank == 0 and i % args.print_freq == 0:
speed.update(args.world_size * args.batch_size / batch_time.val)
progbar.set_description(
# 'Test: [{0}/{1}]\t'
'Valid (cur/avg) '
'batch_t: {batch_time.val:.3f}/{batch_time.avg:.3f}, '
'img/s: {0:.1f}/{1:.1f}, '
'loss: {loss.val:.4f}/{loss.avg:.4f}, '
'abs_cart: {abs_cart.val:.2f}/{abs_cart.avg:.2f}, '
'abs_angle: {abs_angle.val:.2f}/{abs_angle.avg:.2f}, prog'.format(
# i, len(val_loader),
speed.val,
speed.avg,
batch_time=batch_time, loss=losses,
abs_cart=abs_cart_m, abs_angle=abs_angle_m))
input_img, input_vec, target = prefetcher.next()
# logger.info(' * combined_error {combined_error.avg:.3f} top5 {top5.avg:.3f}'
# .format(combined_error=combined_error, top5=top5))
if args.feature_mode != 'rotation_only': # translation_only or all_features: save cartesian csv
utils.list_to_csv(os.path.join(args.save, prefix + args.abs_cart_error_output_csv_name),
cart_error)
if args.feature_mode != 'translation_only': # rotation_only or all_features: save angle csv
utils.list_to_csv(os.path.join(args.save, prefix + args.abs_angle_error_output_csv_name),
angle_error)
stats = get_stats(progbar, prefix, args, batch_time, data_time, abs_cart_m, abs_angle_m, losses, speed)
if progbar is not None:
progbar.close()
del progbar
# Return the weighted sum of absolute cartesian and angle errors as the metric
return (args.cart_weight * abs_cart_m.avg + (1-args.cart_weight) * abs_angle_m.avg), stats
def save_checkpoint(state, is_best, path='', filename='checkpoint.pth.tar', best_filename='model_best.pth.tar'):
new_filename = os.path.join(path, filename)
torch.save(state, new_filename)
if is_best:
shutil.copyfile(new_filename, os.path.join(path, best_filename))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.learning_rate*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.local_rank == 0):
# logger.info("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target):
"""Computes the absolute cartesian and angle distance between output and target"""
batch_size, out_channels = target.shape
if out_channels == 3: # xyz
# Format into [batch, 8] by adding fake rotations
fake_rotation = np.zeros([batch_size, 5], dtype=np.float32)
target = np.concatenate((target, fake_rotation), 1)
output = np.concatenate((output, fake_rotation), 1)
elif out_channels == 5: # aaxyz_nsc
# Format into [batch, 8] by adding fake translations
fake_translation = np.zeros([batch_size, 3], dtype=np.float32)
target = np.concatenate((fake_translation, target), 1)
output = np.concatenate((fake_translation, output), 1)
elif out_channels == 8: # xyz + aaxyz_nsc
pass # Do nothing
else:
raise ValueError("accuracy: unknown number of output channels: {}".format(out_channels))
abs_cart_distance = costar_dataset.cart_error(target, output)
abs_angle_distance = costar_dataset.angle_error(target, output)
return abs_cart_distance, abs_angle_distance
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
| 37,252 | 47.192755 | 315 | py |
sharpDARTS | sharpDARTS-master/cnn/main_fp16_optimizer.py | # source: https://github.com/NVIDIA/apex/tree/master/examples/imagenet
# license: BSD 3-Clause
#
# to install apex:
# pip3 install --user --upgrade -e . --global-option="build_ext" --global-option="--cpp_ext" --global-option="--cuda_ext"
#
# ### Multi-process training with FP16_Optimizer, dynamic loss scaling
# $ python3 -m torch.distributed.launch --nproc_per_node=2 main_fp16_optimizer.py --fp16 --b 256 --save `git rev-parse --short HEAD` --epochs 300 --dynamic-loss-scale --workers 14 --data /home/costar/datasets/imagenet/
#
# # note that --nproc_per_node is NUM_GPUS.
# # Can add --sync_bn to sync bachnorm values if batch size is "very small" but note this also reduces img/s by ~10%.
#
# Example cifar10 command:
#
# # TODO(ahundt) verify these are the correct parameters
# export CUDA_VISIBLE_DEVICES="2" && python3 main_fp16_optimizer.py --autoaugment --auxiliary --cutout --batch_size 128 --epochs 2000 --save sharpDARTS_2k_`git rev-parse --short HEAD`_Cmid32 --arch SHARP_DARTS --mid_channels 32 --init_channels 36 --wd 0.0003 --lr_power_annealing_exponent_order 2 --learning_rate_min 0.0005 --learning_rate 0.05 --dataset cifar10
import argparse
import os
import shutil
import time
import glob
import json
import copy
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision
import numpy as np
import random
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
from model import NetworkImageNet as NetworkImageNet
from model import NetworkCIFAR as NetworkCIFAR
from tqdm import tqdm
import dataset
import genotypes
import autoaugment
import operations
import utils
import warmup_scheduler
from cosine_power_annealing import cosine_power_annealing
from train import evaluate
import cifar10_1
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--data', type=str, default='../data', help='path to dataset', metavar='DIR')
parser.add_argument('--arch', '-a', metavar='ARCH', default='SHARP_DARTS',
# choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: SHARP_DARTS)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run (default: 300)')
parser.add_argument('--start_epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful for restarts)')
parser.add_argument('-b', '--batch_size', default=256, type=int,
metavar='N', help='mini-batch size per process (default: 256)')
parser.add_argument('--lr', '--learning_rate', dest='learning_rate', default=0.1, type=float,
metavar='LR', help='Initial learning rate based on autoaugment https://arxiv.org/pdf/1805.09501.pdf. Will be scaled by <global batch size>/256: args.learning_rate = args.learning_rate*float(args.batch_size*args.world_size)/256. A warmup schedule will also be applied over the first 5 epochs.')
parser.add_argument('--learning_rate_min', type=float, default=0.00016, help='min learning rate')
parser.add_argument('--warmup_epochs', default=10, type=int, help='number of epochs for warmup (default: 10)')
parser.add_argument('--warmup_lr_divisor', default=10, type=int, help='factor by which to reduce lr at warmup start (default: 10)')
parser.add_argument('--lr_power_annealing_exponent_order', type=float, default=10,
help='Cosine Power Annealing Schedule Base, larger numbers make '
'the exponential more dominant, smaller make cosine more dominant.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', dest='weight_decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print_freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--restart_lr', action='store_true',
help='Used in conjunction with --resume, '
'this will restart the lr curve as if it was epoch 1, '
'but otherwise retain your current epoch count.')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--fp16', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
parser.add_argument('--prof', dest='prof', action='store_true',
help='Only run 10 iterations for profiling.')
parser.add_argument('--deterministic', action='store_true', default=False)
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--ops', type=str, default='OPS', help='which operations to use, options are OPS and DARTS_OPS')
parser.add_argument('--primitives', type=str, default='PRIMITIVES',
help='which primitive layers to use inside a cell search space,'
' options are PRIMITIVES and DARTS_PRIMITIVES')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--mid_channels', type=int, default=96, help='C_mid channels in choke SharpSepConv')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--dataset', type=str, default='imagenet', help='which dataset, only option is imagenet')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--autoaugment', action='store_true', default=False, help='use cifar10 autoaugment https://arxiv.org/abs/1805.09501')
parser.add_argument('--random_eraser', action='store_true', default=False, help='use random eraser')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=112, help='cutout length')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('-e', '--evaluate', dest='evaluate', type=str, metavar='PATH', default='',
help='evaluate model at specified path on training, test, and validation datasets')
parser.add_argument('--flops', action='store_true', default=False, help='count flops and exit, aka floating point operations.')
parser.add_argument('--load', type=str, default='', metavar='PATH', help='load weights at specified location')
parser.add_argument('--load_args', type=str, default='', metavar='PATH',
help='load command line args from a json file, this will override '
'all currently set args except for --evaluate, and arguments '
'that did not exist when the json file was originally saved out.')
cudnn.benchmark = True
best_top1 = 0
args = parser.parse_args()
logger = None
DATASET_CHANNELS = dataset.inp_channel_dict[args.dataset]
DATASET_MEAN = dataset.mean_dict[args.dataset]
DATASET_STD = dataset.std_dict[args.dataset]
# print('>>>>>>>DATASET_CHANNELS: ' + str(DATASET_CHANNELS))
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros((len(imgs), DATASET_CHANNELS, h, w), dtype=torch.uint8)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
# tens = torch.from_numpy(nump_array)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
# CLASSES = 1000
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
def main():
global best_top1, args, logger
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
# commented because it is now set as an argparse param.
# args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
# note the gpu is used for directory creation and log files
# which is needed when run as multiple processes
args = utils.initialize_files_and_args(args)
logger = utils.logging_setup(args.log_file_path)
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
if args.static_loss_scale != 1.0:
if not args.fp16:
logger.info("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
# # load the correct ops dictionary
op_dict_to_load = "operations.%s" % args.ops
logger.info('loading op dict: ' + str(op_dict_to_load))
op_dict = eval(op_dict_to_load)
# load the correct primitives list
primitives_to_load = "genotypes.%s" % args.primitives
logger.info('loading primitives:' + primitives_to_load)
primitives = eval(primitives_to_load)
logger.info('primitives: ' + str(primitives))
# create model
genotype = eval("genotypes.%s" % args.arch)
# get the number of output channels
classes = dataset.class_dict[args.dataset]
# create the neural network
if args.dataset == 'imagenet':
model = NetworkImageNet(args.init_channels, classes, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
flops_shape = [1, 3, 224, 224]
else:
model = NetworkCIFAR(args.init_channels, classes, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
flops_shape = [1, 3, 32, 32]
model.drop_path_prob = 0.0
# if args.pretrained:
# logger.info("=> using pre-trained model '{}'".format(args.arch))
# model = models.__dict__[args.arch](pretrained=True)
# else:
# logger.info("=> creating model '{}'".format(args.arch))
# model = models.__dict__[args.arch]()
if args.flops:
model = model.cuda()
logger.info("param size = %fMB", utils.count_parameters_in_MB(model))
logger.info("flops_shape = " + str(flops_shape))
logger.info("flops = " + utils.count_model_flops(model, data_shape=flops_shape))
return
if args.sync_bn:
import apex
logger.info("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda()
if args.fp16:
model = network_to_half(model)
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# model = DDP(model)
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# Scale learning rate based on global batch size
args.learning_rate = args.learning_rate * float(args.batch_size * args.world_size)/256.
init_lr = args.learning_rate / args.warmup_lr_divisor
optimizer = torch.optim.SGD(model.parameters(), init_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# epoch_count = args.epochs - args.start_epoch
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(epoch_count))
# scheduler = warmup_scheduler.GradualWarmupScheduler(
# optimizer, args.warmup_lr_divisor, args.warmup_epochs, scheduler)
if args.fp16:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=args.static_loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale)
# Optionally resume from a checkpoint
if args.resume or args.evaluate:
if args.evaluate:
args.resume = args.evaluate
# Use a local scope to avoid dangling references
def resume():
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
if 'best_top1' in checkpoint:
best_top1 = checkpoint['best_top1']
model.load_state_dict(checkpoint['state_dict'])
# An FP16_Optimizer instance's state dict internally stashes the master params.
optimizer.load_state_dict(checkpoint['optimizer'])
# TODO(ahundt) make sure scheduler loading isn't broken
if 'lr_scheduler' in checkpoint:
scheduler.load_state_dict(checkpoint['lr_scheduler'])
elif 'lr_schedule' in checkpoint:
lr_schedule = checkpoint['lr_schedule']
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
resume()
# # Data loading code
# traindir = os.path.join(args.data, 'train')
# valdir = os.path.join(args.data, 'val')
# if(args.arch == "inception_v3"):
# crop_size = 299
# val_size = 320 # I chose this value arbitrarily, we can adjust.
# else:
# crop_size = 224
# val_size = 256
# train_dataset = datasets.ImageFolder(
# traindir,
# transforms.Compose([
# transforms.RandomResizedCrop(crop_size),
# transforms.RandomHorizontalFlip(),
# autoaugment.ImageNetPolicy(),
# # transforms.ToTensor(), # Too slow, moved to data_prefetcher()
# # normalize,
# ]))
# val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
# transforms.Resize(val_size),
# transforms.CenterCrop(crop_size)
# ]))
# train_sampler = None
# val_sampler = None
# if args.distributed:
# train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
# val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
# train_loader = torch.utils.data.DataLoader(
# train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
# num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
# val_loader = torch.utils.data.DataLoader(
# val_dataset,
# batch_size=args.batch_size, shuffle=False,
# num_workers=args.workers, pin_memory=True,
# sampler=val_sampler,
# collate_fn=fast_collate)
# Get preprocessing functions (i.e. transforms) to apply on data
# normalize_as_tensor = False because we normalize and convert to a
# tensor in our custom prefetching function, rather than as part of
# the transform preprocessing list.
train_transform, valid_transform = utils.get_data_transforms(args, normalize_as_tensor=False)
# Get the training queue, select training and validation from training set
train_loader, val_loader = dataset.get_training_queues(
args.dataset, train_transform, valid_transform, args.data,
args.batch_size, train_proportion=1.0,
collate_fn=fast_collate, distributed=args.distributed,
num_workers=args.workers)
if args.evaluate:
if args.dataset == 'cifar10':
# evaluate best model weights on cifar 10.1
# https://github.com/modestyachts/CIFAR-10.1
train_transform, valid_transform = utils.get_data_transforms(args)
# Get the training queue, select training and validation from training set
# Get the training queue, use full training and test set
train_queue, valid_queue = dataset.get_training_queues(
args.dataset, train_transform, valid_transform, args.data, args.batch_size,
train_proportion=1.0, search_architecture=False)
test_data = cifar10_1.CIFAR10_1(root=args.data, download=True, transform=valid_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers)
eval_stats = evaluate(args, model, criterion, train_queue=train_queue,
valid_queue=valid_queue, test_queue=test_queue)
with open(args.stats_file, 'w') as f:
# TODO(ahundt) fix "TypeError: 1869 is not JSON serializable" to include arg info, see train.py
# arg_dict = vars(args)
# arg_dict.update(eval_stats)
# json.dump(arg_dict, f)
json.dump(eval_stats, f)
logger.info("flops = " + utils.count_model_flops(model))
logger.info(utils.dict_to_log_string(eval_stats))
logger.info('\nEvaluation of Loaded Model Complete! Save dir: ' + str(args.save))
else:
validate(val_loader, model, criterion, args)
return
lr_schedule = cosine_power_annealing(
epochs=args.epochs, max_lr=args.learning_rate, min_lr=args.learning_rate_min,
warmup_epochs=args.warmup_epochs, exponent_order=args.lr_power_annealing_exponent_order,
restart_lr=args.restart_lr)
epochs = np.arange(args.epochs) + args.start_epoch
stats_csv = args.epoch_stats_file
stats_csv = stats_csv.replace('.json', '.csv')
with tqdm(epochs, dynamic_ncols=True, disable=args.local_rank != 0, leave=False) as prog_epoch:
best_stats = {}
stats = {}
epoch_stats = []
best_epoch = 0
for epoch, learning_rate in zip(prog_epoch, lr_schedule):
if args.distributed and train_loader.sampler is not None:
train_loader.sampler.set_epoch(int(epoch))
# if args.distributed:
# train_sampler.set_epoch(epoch)
# update the learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
# scheduler.step()
model.drop_path_prob = args.drop_path_prob * float(epoch) / float(args.epochs)
# train for one epoch
train_stats = train(train_loader, model, criterion, optimizer, int(epoch), args)
if args.prof:
break
# evaluate on validation set
top1, val_stats = validate(val_loader, model, criterion, args)
stats.update(train_stats)
stats.update(val_stats)
# stats['lr'] = '{0:.5f}'.format(scheduler.get_lr()[0])
stats['lr'] = '{0:.5f}'.format(learning_rate)
stats['epoch'] = epoch
# remember best top1 and save checkpoint
if args.local_rank == 0:
is_best = top1 > best_top1
best_top1 = max(top1, best_top1)
stats['best_top1'] = '{0:.3f}'.format(best_top1)
if is_best:
best_epoch = epoch
best_stats = copy.deepcopy(stats)
stats['best_epoch'] = best_epoch
stats_str = utils.dict_to_log_string(stats)
logger.info(stats_str)
save_checkpoint({
'epoch': epoch,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_top1': best_top1,
'optimizer': optimizer.state_dict(),
# 'lr_scheduler': scheduler.state_dict()
'lr_schedule': lr_schedule,
'stats': best_stats
}, is_best, path=args.save)
prog_epoch.set_description(
'Overview ***** best_epoch: {0} best_valid_top1: {1:.2f} ***** Progress'
.format(best_epoch, best_top1))
epoch_stats += [copy.deepcopy(stats)]
with open(args.epoch_stats_file, 'w') as f:
json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
stats_str = utils.dict_to_log_string(best_stats, key_prepend='best_')
logger.info(stats_str)
with open(args.stats_file, 'w') as f:
arg_dict = vars(args)
arg_dict.update(best_stats)
json.dump(arg_dict, f, cls=utils.NumpyEncoder)
with open(args.epoch_stats_file, 'w') as f:
json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
logger.info('Training of Final Model Complete! Save dir: ' + str(args.save))
class data_prefetcher():
def __init__(self, loader, mean=None, std=None, cutout=False, cutout_length=112, cutout_cuts=2):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
if mean is None:
mean = [0.485, 0.456, 0.406]
if std is None:
std = [0.229, 0.224, 0.225]
mean = np.array(mean) * 255
std = np.array(std) * 255
self.mean = torch.tensor(mean).cuda().view(1,3,1,1)
self.std = torch.tensor(std).cuda().view(1,3,1,1)
cutout_dtype = np.float32
if args.fp16:
self.mean = self.mean.half()
self.std = self.std.half()
cutout_dtype = np.float16
else:
self.mean = self.mean.float()
self.std = self.std.float()
self.cutout = None
if cutout:
self.cutout = utils.BatchCutout(cutout_length, cutout_cuts, dtype=cutout_dtype)
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
if args.fp16:
self.next_input = self.next_input.half()
else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
if self.cutout is not None:
# TODO(ahundt) Fix performance of this cutout call, it makes batch loading time go from 0.001 seconds to 0.05 seconds.
self.next_input = self.cutout(self.next_input)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
def train(train_loader, model, criterion, optimizer, epoch, args):
loader_len = len(train_loader)
if loader_len < 2:
raise ValueError('train_loader only supports 2 or more batches and loader_len: ' + str(loader_len))
batch_time = AverageMeter()
data_time = AverageMeter()
speed = AverageMeter()
losses = AverageMeter()
top1m = AverageMeter()
top5m = AverageMeter()
# switch to train mode
model.train()
end = time.time()
prefetcher = data_prefetcher(train_loader, mean=args.mean, std=args.std, cutout=args.cutout, cutout_length=args.cutout_length)
input, target = prefetcher.next()
i = -1
if args.local_rank == 0:
progbar = tqdm(total=len(train_loader), leave=False, dynamic_ncols=True)
else:
progbar = None
while input is not None:
i += 1
# scheduler in main now adjusts the lr
# adjust_learning_rate(optimizer, epoch, i, len(train_loader))
if args.prof:
if i > 10:
break
# measure data loading time
data_time.update(time.time() - end)
# compute output
# output = model(input)
# loss = criterion(output, target)
# note here the term output is equivalent to logits
output, logits_aux = model(input)
loss = criterion(output, target)
if logits_aux is not None and args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight * loss_aux
# measure accuracy and record loss
top1f, top5f = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
top1f = reduce_tensor(top1f)
top5f = reduce_tensor(top5f)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1m.update(to_python_float(top1f), input.size(0))
top5m.update(to_python_float(top5f), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
optimizer.step()
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
input, target = prefetcher.next()
if args.local_rank == 0:
progbar.update()
if args.local_rank == 0 and i % args.print_freq == 0 and i > 1:
speed.update(args.world_size * args.batch_size / batch_time.val)
progbar.set_description(
# 'Epoch: [{0}][{1}/{2}]\t'
'Train (cur/avg) '
'batch_t: {batch_time.val:.3f}/{batch_time.avg:.3f}, '
'img/s: {0:.1f}/{1:.1f} '
'load_t: {data_time.val:.3f}/{data_time.avg:.3f}, '
'loss: {loss.val:.4f}/{loss.avg:.4f}, '
'top1: {top1.val:.2f}/{top1.avg:.2f}, '
'top5: {top5.val:.2f}/{top5.avg:.2f}, prog'.format(
# epoch, i, len(train_loader),
speed.val,
speed.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1m, top5=top5m))
stats = {}
prefix = 'train_'
stats = get_stats(progbar, prefix, args, batch_time, data_time, top1m, top5m, losses, speed)
if progbar is not None:
progbar.close()
del progbar
return stats
def get_stats(progbar, prefix, args, batch_time, data_time, top1, top5, losses, speed):
stats = {}
if progbar is not None:
stats = utils.tqdm_stats(progbar, prefix=prefix)
stats.update({
prefix + 'time_step_wall': '{0:.3f}'.format(args.world_size * args.batch_size / batch_time.avg),
prefix + 'batch_time_one_gpu': '{0:.3f}'.format(batch_time.avg),
prefix + 'data_time': '{0:.3f}'.format(data_time.avg),
prefix + 'top1': '{0:.3f}'.format(top1.avg),
prefix + 'top5': '{0:.3f}'.format(top5.avg),
prefix + 'loss': '{0:.4f}'.format(losses.avg),
prefix + 'images_per_second': '{0:.4f}'.format(speed.avg),
})
return stats
def validate(val_loader, model, criterion, args):
loader_len = len(val_loader)
if loader_len < 2:
raise ValueError('val_loader only supports 2 or more batches and loader_len: ' + str(loader_len))
batch_time = AverageMeter()
data_time = AverageMeter()
speed = AverageMeter()
losses = AverageMeter()
top1m = AverageMeter()
top5m = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader, mean=args.mean, std=args.std)
input, target = prefetcher.next()
i = -1
if args.local_rank == 0:
progbar = tqdm(total=loader_len)
else:
progbar = None
while input is not None:
i += 1
# measure data loading time
data_time.update(time.time() - end)
# compute output
with torch.no_grad():
# output = model(input)
# loss = criterion(output, target)
# note here the term output is equivalent to logits
output, _ = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
top1f, top5f = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
top1f = reduce_tensor(top1f)
top5f = reduce_tensor(top5f)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1m.update(to_python_float(top1f), input.size(0))
top5m.update(to_python_float(top5f), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0:
progbar.update()
if args.local_rank == 0 and i % args.print_freq == 0:
speed.update(args.world_size * args.batch_size / batch_time.val)
progbar.set_description(
# 'Test: [{0}/{1}]\t'
'Valid (cur/avg) '
'batch_t: {batch_time.val:.3f}/{batch_time.avg:.3f}, '
'img/s: {0:.1f}/{1:.1f}, '
'loss: {loss.val:.4f}/{loss.avg:.4f}, '
'top1: {top1.val:.2f}/{top1.avg:.2f}, '
'top5: {top5.val:.2f}/{top5.avg:.2f}, prog'.format(
# i, len(val_loader),
speed.val,
speed.avg,
batch_time=batch_time, loss=losses,
top1=top1m, top5=top5m))
input, target = prefetcher.next()
# logger.info(' * top1 {top1.avg:.3f} top5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
prefix = 'val_'
stats = get_stats(progbar, prefix, args, batch_time, data_time, top1m, top5m, losses, speed)
if progbar is not None:
progbar.close()
del progbar
return top1m.avg, stats
def save_checkpoint(state, is_best, path='', filename='checkpoint.pth.tar', best_filename='model_best.pth.tar'):
new_filename = os.path.join(path, filename)
torch.save(state, new_filename)
if is_best:
shutil.copyfile(new_filename, os.path.join(path, best_filename))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.learning_rate*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.local_rank == 0):
# logger.info("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
| 34,031 | 42.352866 | 365 | py |
sharpDARTS | sharpDARTS-master/cnn/cifar10_1.py | # Source: https://github.com/kharvd/cifar-10.1-pytorch
# License: MIT
import io
import os
import os.path
import pickle
import numpy as np
from PIL import Image
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
def load_new_test_data(root, version='default'):
data_path = root
filename = 'cifar10.1'
if version == 'default':
pass
elif version == 'v0':
filename += '-v0'
else:
raise ValueError('Unknown dataset version "{}".'.format(version))
label_filename = filename + '-labels.npy'
imagedata_filename = filename + '-data.npy'
label_filepath = os.path.join(data_path, label_filename)
imagedata_filepath = os.path.join(data_path, imagedata_filename)
labels = np.load(label_filepath).astype(np.int64)
imagedata = np.load(imagedata_filepath)
assert len(labels.shape) == 1
assert len(imagedata.shape) == 4
assert labels.shape[0] == imagedata.shape[0]
assert imagedata.shape[1] == 32
assert imagedata.shape[2] == 32
assert imagedata.shape[3] == 3
if version == 'default':
assert labels.shape[0] == 2000
elif version == 'v0':
assert labels.shape[0] == 2021
return imagedata, labels
class CIFAR10_1(data.Dataset):
images_url = 'https://github.com/modestyachts/CIFAR-10.1/raw/master/datasets/cifar10.1_v6_data.npy'
images_md5 = '29615bb88ff99bca6b147cee2520f010'
images_filename = 'cifar10.1-data.npy'
labels_url = 'https://github.com/modestyachts/CIFAR-10.1/raw/master/datasets/cifar10.1_v6_labels.npy'
labels_md5 = 'a27460fa134ae91e4a5cb7e6be8d269e'
labels_filename = 'cifar10.1-labels.npy'
classes = [
'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck'
]
@property
def targets(self):
return self.labels
def __init__(self,
root,
transform=None,
target_transform=None,
download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
images, labels = load_new_test_data(root)
self.data = images
self.labels = labels
self.class_to_idx = {
_class: i
for i, _class in enumerate(self.classes)
}
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
data_path = os.path.join(self.root, self.images_filename)
labels_path = os.path.join(self.root, self.labels_filename)
return (check_integrity(data_path, self.images_md5) and
check_integrity(labels_path, self.labels_md5))
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.root
download_url(self.images_url, root, self.images_filename, self.images_md5)
download_url(self.labels_url, root, self.labels_filename, self.labels_md5)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(
tmp,
self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(
tmp,
self.target_transform.__repr__().replace('\n',
'\n' + ' ' * len(tmp)))
return fmt_str | 4,555 | 31.542857 | 105 | py |
sharpDARTS | sharpDARTS-master/cnn/architect.py | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
class Architect(object):
def __init__(self, model, args):
self.network_momentum = args.momentum
self.network_weight_decay = args.weight_decay
self.model = model
self.optimizer = torch.optim.Adam(self.model.arch_parameters(),
lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)
def _compute_unrolled_model(self, input, target, eta, network_optimizer):
loss = self.model._loss(input, target)
theta = _concat(self.model.parameters()).data
try:
moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).mul_(self.network_momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(torch.autograd.grad(loss, self.model.parameters())).data + self.network_weight_decay*theta
unrolled_model = self._construct_model_from_theta(theta.sub(eta, moment+dtheta))
return unrolled_model
def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled):
self.optimizer.zero_grad()
if unrolled:
self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, eta, network_optimizer)
else:
self._backward_step(input_valid, target_valid)
self.optimizer.step()
def _backward_step(self, input_valid, target_valid):
loss = self.model._loss(input_valid, target_valid)
loss.backward()
def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer):
unrolled_model = self._compute_unrolled_model(input_train, target_train, eta, network_optimizer)
unrolled_loss = unrolled_model._loss(input_valid, target_valid)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
vector = [v.grad.data for v in unrolled_model.parameters()]
implicit_grads = self._hessian_vector_product(vector, input_train, target_train)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(eta, ig.data)
for v, g in zip(self.model.arch_parameters(), dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
def _construct_model_from_theta(self, theta):
model_new = self.model.new()
model_dict = self.model.state_dict()
params, offset = {}, 0
for k, v in self.model.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, input, target, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
loss = self.model._loss(input, target)
grads_p = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.sub_(2*R, v)
loss = self.model._loss(input, target)
grads_n = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
| 3,429 | 35.88172 | 130 | py |
sharpDARTS | sharpDARTS-master/cnn/train_imagenet.py | import os
import sys
import numpy as np
import time
import torch
import utils
import glob
import random
import logging
import argparse
import json
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from tqdm import tqdm
from model import NetworkImageNet as Network
import train
import autoaugment
import operations
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='imagenet', help='which dataset, only option is imagenet')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=250, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--mid_channels', type=int, default=96, help='C_mid channels in choke SharpSepConv')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='SHARP_DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--gamma', type=float, default=0.97, help='learning rate decay')
parser.add_argument('--decay_period', type=int, default=1, help='epochs between two learning rate decays')
parser.add_argument('--parallel', action='store_true', default=False, help='data parallelism')
parser.add_argument('--ops', type=str, default='OPS', help='which operations to use, options are OPS and DARTS_OPS')
parser.add_argument('--primitives', type=str, default='PRIMITIVES',
help='which primitive layers to use inside a cell search space,'
' options are PRIMITIVES and DARTS_PRIMITIVES')
parser.add_argument('--flops', action='store_true', default=False, help='count flops and exit, aka floating point operations.')
args = parser.parse_args()
args.save = 'eval-{}-{}-{}-{}'.format(time.strftime("%Y%m%d-%H%M%S"), args.save, args.dataset, args.arch)
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_file_path = os.path.join(args.save, 'log.txt')
logger = utils.logging_setup(log_file_path)
params_path = os.path.join(args.save, 'commandline_args.json')
with open(params_path, 'w') as f:
json.dump(vars(args), f)
CLASSES = 1000
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def main():
if not torch.cuda.is_available():
logger.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logger.info('gpu device = %d' % args.gpu)
logger.info("args = %s", args)
# # load the correct ops dictionary
op_dict_to_load = "operations.%s" % args.ops
logger.info('loading op dict: ' + str(op_dict_to_load))
op_dict = eval(op_dict_to_load)
# load the correct primitives list
primitives_to_load = "genotypes.%s" % args.primitives
logger.info('loading primitives:' + primitives_to_load)
primitives = eval(primitives_to_load)
logger.info('primitives: ' + str(primitives))
genotype = eval("genotypes.%s" % args.arch)
cnn_model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
if args.parallel:
cnn_model = nn.DataParallel(cnn_model).cuda()
else:
cnn_model = cnn_model.cuda()
logger.info("param size = %fMB", utils.count_parameters_in_MB(cnn_model))
if args.flops:
cnn_model.drop_path_prob = 0.0
logger.info("flops = " + utils.count_model_flops(cnn_model, data_shape=[1, 3, 224, 224]))
exit(1)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
optimizer = torch.optim.SGD(
cnn_model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
traindir = os.path.join(args.data, 'train')
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
autoaugment.ImageNetPolicy(),
# transforms.ColorJitter(
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.2),
transforms.ToTensor(),
normalize,
]))
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=8)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)
prog_epoch = tqdm(range(args.epochs), dynamic_ncols=True)
best_valid_acc = 0.0
best_epoch = 0
best_stats = {}
best_acc_top1 = 0
weights_file = os.path.join(args.save, 'weights.pt')
for epoch in prog_epoch:
scheduler.step()
cnn_model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train.train(args, train_queue, cnn_model, criterion, optimizer)
stats = train.infer(args, valid_queue, cnn_model, criterion)
is_best = False
if stats['valid_acc'] > best_valid_acc:
# new best epoch, save weights
utils.save(cnn_model, weights_file)
best_epoch = epoch
best_valid_acc = stats['valid_acc']
best_stats = stats
best_stats['lr'] = scheduler.get_lr()[0]
best_stats['epoch'] = best_epoch
best_train_loss = train_obj
best_train_acc = train_acc
is_best = True
logger.info('epoch, %d, train_acc, %f, valid_acc, %f, train_loss, %f, valid_loss, %f, lr, %e, best_epoch, %d, best_valid_acc, %f, ' + utils.dict_to_log_string(stats),
epoch, train_acc, stats['valid_acc'], train_obj, stats['valid_loss'], scheduler.get_lr()[0], best_epoch, best_valid_acc)
checkpoint = {
'epoch': epoch,
'state_dict': cnn_model.state_dict(),
'best_acc_top1': best_valid_acc,
'optimizer' : optimizer.state_dict(),
}
checkpoint.update(stats)
utils.save_checkpoint(stats, is_best, args.save)
best_epoch_str = utils.dict_to_log_string(best_stats, key_prepend='best_')
logger.info(best_epoch_str)
logger.info('Training of Final Model Complete! Save dir: ' + str(args.save))
if __name__ == '__main__':
main()
| 8,489 | 38.859155 | 170 | py |
sharpDARTS | sharpDARTS-master/cnn/visualize_whole_network.py | # Example commands to run:
#
# python3 visualize_whole_network.py --multi_channel
#
# python3 visualize_whole_network.py --dataset imagenet --arch SHARP_DARTS --auxiliary
#
# Set matplotlib backend to Agg
# *MUST* be done BEFORE importing hiddenlayer or libs that import matplotlib
import matplotlib
matplotlib.use("Agg")
import os
import torch
# import networkx
import model_search
import argparse
import os
import shutil
import time
import glob
import json
import copy
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision
import numpy as np
import random
from model import NetworkImageNet as NetworkImageNet
from model import NetworkCIFAR as NetworkCIFAR
from tqdm import tqdm
import dataset
import genotypes
import autoaugment
import operations
import utils
import warmup_scheduler
from cosine_power_annealing import cosine_power_annealing
# requires https://github.com/waleedka/hiddenlayer
import hiddenlayer as hl
parser = argparse.ArgumentParser("Common Argument Parser")
parser.add_argument('--arch', '-a', metavar='ARCH', default='multi_channel',
# choices=model_names,
help='model architecture: (default: multi_channel, other options are SHARP_DARTS and DARTS). '
'multi_channel is for multi channel search, a completely separate search space.')
parser.add_argument('--ops', type=str, default='OPS', help='which operations to use, options are OPS and DARTS_OPS')
parser.add_argument('--primitives', type=str, default='PRIMITIVES',
help='which primitive layers to use inside a cell search space,'
' options are PRIMITIVES and DARTS_PRIMITIVES')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--mid_channels', type=int, default=96, help='C_mid channels in choke SharpSepConv')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--dataset', type=str, default='cifar10', help='which dataset, imagenet or cifar10')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
args = parser.parse_args()
print('args.arch: ' + str(args.arch))
if args.arch != 'multi_channel':
# # load the correct ops dictionary
op_dict_to_load = "operations.%s" % args.ops
print('loading op dict: ' + str(op_dict_to_load))
op_dict = eval(op_dict_to_load)
# load the correct primitives list
primitives_to_load = "genotypes.%s" % args.primitives
print('loading primitives:' + primitives_to_load)
primitives = eval(primitives_to_load)
print('primitives: ' + str(primitives))
# create model
genotype = eval("genotypes.%s" % args.arch)
# get the number of output channels
classes = dataset.class_dict[args.dataset]
# create the neural network
print('initializing module')
if args.arch == 'multi_channel':
cnn_model = model_search.MultiChannelNetwork(always_apply_ops=True, layers=4, steps=2, visualization=True, genotype=None)
elif args.dataset == 'imagenet':
cnn_model = NetworkImageNet(args.init_channels, classes, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
# workaround for graph generation limitations
cnn_model.drop_path_prob = torch.zeros(1)
else:
cnn_model = NetworkCIFAR(args.init_channels, classes, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
# workaround for graph generation limitations
cnn_model.drop_path_prob = torch.zeros(1)
transforms = [
hl.transforms.Fold('MaxPool3x3 > Conv1x1 > BatchNorm', 'ResizableMaxPool', 'ResizableMaxPool'),
hl.transforms.Fold('MaxPool > Conv > BatchNorm', 'ResizableMaxPool', 'ResizableMaxPool'),
hl.transforms.Fold('Relu > Conv > Conv > BatchNorm', 'ReluSepConvBn'),
hl.transforms.Fold('ReluSepConvBn > ReluSepConvBn', 'SharpSepConv', 'SharpSepConv'),
hl.transforms.Fold('Relu > Conv > BatchNorm', 'ReLUConvBN'),
hl.transforms.Fold('Relu > Conv1x1 > BatchNorm', 'ReLUConv1x1BN'),
hl.transforms.Prune('Constant'),
hl.transforms.Prune('Gather'),
hl.transforms.Prune('Unsqueeze'),
hl.transforms.Prune('Concat'),
hl.transforms.Prune('Shape'),
# Fold repeated blocks
hl.transforms.FoldDuplicates(),
hl.transforms.Fold('Relu > Conv > BatchNorm', 'ReLUConvBN'),
hl.transforms.Fold('Relu > Conv1x1 > BatchNorm', 'ReLUConv1x1BN'),
]
print('building graph')
# WARNING: the code may hang here. These are instructions for a workaround:
# First install hiddenlayer from source:
#
# cd ~/src
# git clone https://github.com/waleedka/hiddenlayer.git
# cd hiddenlayer
# pip3 install --user --upgrade -e .
#
# Next open the file /hiddenlayer/hiddenlayer/pytorch_builder.py
#
# change:
# torch.onnx._optimize_trace(trace, torch.onnx.OperatorExportTypes.ONNX)
# to
# torch.onnx._optimize_trace(trace, torch.onnx.OperatorExportTypes.RAW)
#
# The graph is very large so building the graph will take a long time.
# Note that at the time of writing the graph algorithms can't handle multiplying by a constant.
# Instead, I added if statements that skip the weight component if it is in visualization mode.
#
# For progress bars go back to /hiddenlayer/hiddenlayer/pytorch_builder.py:
# at the top add:
# import tqdm as tqdm
#
# Then in:
#
# def import_graph()
#
# find all instances of:
#
# torch_graph.nodes()
#
# and replace with:
#
# tqdm(torch_graph.nodes())
#
if args.dataset == 'imagenet':
input_batch = torch.zeros([2, 3, 224, 224])
elif args.dataset == 'cifar10':
input_batch = torch.zeros([2, 3, 32, 32])
# print(input_batch)
cnn_graph = hl.build_graph(cnn_model, input_batch, transforms=transforms)
output_file = os.path.expanduser('~/src/darts/cnn/' + args.arch + '_network.pdf')
print('build complete, saving: ' + output_file)
cnn_graph.save(output_file)
print('save complete') | 6,350 | 38.447205 | 141 | py |
sharpDARTS | sharpDARTS-master/cnn/utils.py | # Some data loading code is from https://github.com/DRealArun/darts/ with the same license as darts.
import os
import time
import numpy as np
import logging
import torch
import shutil
import argparse
import glob
import json
import csv
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn.functional as F
from six import iteritems
from tqdm import tqdm
import colorlog
import autoaugment
import flops_counter
class NumpyEncoder(json.JSONEncoder):
""" json encoder for numpy types
source: https://stackoverflow.com/a/49677241/99379
"""
def default(self, obj):
if isinstance(obj,
(np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj,
(np.float_, np.float16, np.float32,
np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def tqdm_stats(progbar, prefix=''):
""" Very brittle function to extract timing stats from tqdm.
Replace when https://github.com/tqdm/tqdm/issues/562 is resolved.
Example of key string component that will be read:
3/3 [00:00<00:00, 12446.01it/s]
"""
s = str(progbar)
# get the stats part of the string
s = s[s.find("| ")+1:]
stats = {
prefix + 'current_step': s[:s.find('/')].strip(' '),
prefix + 'total_steps': s[s.find('/')+1:s.find('[')].strip(' '),
prefix + 'time_elapsed': s[s.find('[')+1:s.find('<')].strip(' '),
prefix + 'time_remaining': s[s.find('<')+1:s.find(',')].strip(' '),
prefix + 'step_time': s[s.find(', ')+1:s.find(']')].strip(' '),
}
if '%' in s:
stats[prefix + 'percent_complete'] = s[:s.find('%')].strip(' ')
return stats
def dict_to_log_string(log={}, separator=', ', key_prepend=''):
log_strings = []
for (k, v) in iteritems(log):
log_strings += [key_prepend + str(k), str(v)]
return separator.join(log_strings)
class TqdmHandler(logging.StreamHandler):
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
msg = self.format(record)
tqdm.write(msg)
def list_of_dicts_to_dict_of_lists(ld):
""" list of dictionaries to dictionary of lists when all keys are the same.
source: https://stackoverflow.com/a/23551944/99379
"""
return {key: [item[key] for item in ld] for key in ld[0].keys()}
def list_of_dicts_to_csv(filename, list_of_dicts):
with open(filename, 'w') as f:
# https://stackoverflow.com/a/10373268/99379
w = csv.DictWriter(f, list_of_dicts[0].keys())
w.writeheader()
for d in list_of_dicts:
w.writerow(d)
def list_to_csv(filename, l, write=True):
if not write:
print('list_to_csv: Output list of length {} to file {}. Set `write` to True to actually write to file.'.format(len(l), filename))
else:
with open(filename, 'a') as f:
w = csv.writer(f)
w.writerow(l)
def logging_setup(log_file_path):
""" setup logging to a file and support for tqdm progress bar
log_file_path: path to log file which will be created as a txt to output printed information
"""
# setup logging for tqdm compatibility
# based on https://github.com/tqdm/tqdm/issues/193#issuecomment-232887740
logger = colorlog.getLogger("SQUARE")
logger.setLevel(logging.DEBUG)
handler = TqdmHandler()
log_format = colorlog.ColoredFormatter(
# '%(log_color)s%(name)s | %(asctime)s | %(levelname)s | %(message)s',
'%(asctime)s %(message)s',
datefmt='%Y_%m_%d_%H_%M_%S',
log_colors={
'DEBUG': 'cyan',
'INFO': 'white',
'SUCCESS:': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white'},)
handler.setFormatter(log_format)
logger.addHandler(handler)
fh = logging.FileHandler(log_file_path)
fh.setFormatter(log_format)
logger.addHandler(fh)
return logger
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def random_eraser(input_img, p=0.66, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=True):
""" Cutout and random erasing algorithms for data augmentation
source:
https://github.com/yu4u/cutout-random-erasing/blob/master/random_eraser.py
modified for batch, channel, height, width dimension order, and so there are no while loop delays.
"""
img_c, img_h, img_w = input_img.shape
# print('input_img.shape' + str(input_img.shape))
p_1 = np.random.rand()
if p_1 > p:
return input_img
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
# ensure boundaries fit in the image border
w = np.clip(w, 0, img_w - left)
h = np.clip(h, 0, img_h - top)
if pixel_level:
c = np.random.uniform(v_l, v_h, (img_c, h, w))
else:
c = np.random.uniform(v_l, v_h)
c = torch.from_numpy(c)
# print('c.shape' + str(c.shape))
input_img[:, top:top + h, left:left + w] = c
return input_img
class Cutout(object):
"""Cutout
Defaults to one Cutout hole.
Note that for dual cutout two inference predictions must be made
in your main loop with an additional loss between them.
Dual cutout will affect performance substantially because two
inferences losses and backprop steps must be done for each batch.
Cutout: https://arxiv.org/abs/1708.04552
Dual Cutout: https://arxiv.org/pdf/1802.07426
Dual Cutout Code Example: https://github.com/Learning-and-Intelligent-Systems/Analytical-Learning-Theory
"""
def __init__(self, length=16, cuts=1):
self.length = length
self.cuts = cuts
def __call__(self, img):
if isinstance(img, torch.Tensor) or isinstance(img, np.array):
# torch or numpy image
h, w = img.shape[1], img.shape[2]
else:
# PIL image
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
for _ in range(self.cuts):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
if isinstance(img, torch.Tensor):
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class BatchCutout(object):
"""Cutout
Defaults to one Cutout hole.
Note that for dual cutout two inference predictions must be made
in your main loop with an additional loss between them.
Dual cutout will affect performance substantially because two
inferences losses and backprop steps must be done for each batch.
Cutout: https://arxiv.org/abs/1708.04552
Dual Cutout: https://arxiv.org/pdf/1802.07426
Dual Cutout Code Example: https://github.com/Learning-and-Intelligent-Systems/Analytical-Learning-Theory
"""
def __init__(self, length=16, cuts=1, dtype=np.float32, cuda=True):
self.length = length
self.cuts = cuts
self.dtype = dtype
self.cuda = cuda
def __call__(self, img):
b, c, h, w = img.shape
mask = np.ones((b, c, h, w), self.dtype)
for bi in range(b):
for _ in range(self.cuts):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[bi, :, y1: y2, x1: x2] = 0.
if isinstance(img, torch.Tensor):
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
if self.cuda:
mask = mask.cuda(non_blocking=True)
img *= mask
return img
# Function to fetch the transforms based on the dataset
def get_data_transforms(args, normalize_as_tensor=True):
"""Get the transforms for a specific dataset
One side side effect args.std and args.mean are set.
args: parser args. Expected to have random_eraser, cutout,
and autoaugment member variables.
normalize_as_tensor: when true the output will be converted
to a tensor then normalization will be applied based on the
dataset mean and std dev. Otherwise this step will be skipped
entirely
"""
print("get_data_transforms(): Getting ", args.dataset, " Transforms")
if args.dataset == 'cifar10':
return _data_transforms_cifar10(args, normalize_as_tensor)
if args.dataset == 'mnist':
return _data_transforms_mnist(args, normalize_as_tensor)
if args.dataset == 'emnist':
return _data_transforms_emnist(args, normalize_as_tensor)
if args.dataset == 'fashion':
return _data_transforms_fashion(args, normalize_as_tensor)
if args.dataset == 'svhn':
return _data_transforms_svhn(args, normalize_as_tensor)
if args.dataset == 'stl10':
return _data_transforms_stl10(args, normalize_as_tensor)
if args.dataset == 'devanagari':
return _data_transforms_devanagari(args, normalize_as_tensor)
if args.dataset == 'imagenet':
return _data_transforms_imagenet(args, normalize_as_tensor)
assert False, "Cannot get Transform for dataset"
def finalize_transform(train_transform, valid_transform, args, normalize_as_tensor=True):
""" Transform steps that apply to most augmentation regimes
"""
if normalize_as_tensor:
# train
train_transform.transforms.append(transforms.ToTensor())
train_transform.transforms.append(
transforms.Normalize(args.mean, args.std))
# valid
valid_transform.transforms.append(transforms.ToTensor())
valid_transform.transforms.append(
transforms.Normalize(args.mean, args.std))
# note that the current cutout and random eraser implementations
# require tensors as imput, so don't get applied when
# normalize_as_tensor is False
# cutout should be after normalize
if args.cutout:
# note that this defaults to dual cutout
train_transform.transforms.append(Cutout(args.cutout_length))
if args.random_eraser:
train_transform.transforms.append(random_eraser)
return train_transform, valid_transform
# Transform defined for imagenet
def _data_transforms_imagenet(args, normalize_as_tensor=True):
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
args.mean = IMAGENET_MEAN
args.std = IMAGENET_MEAN
if(args.arch == "inception_v3"):
crop_size = 299
val_size = 320 # nvidia author chose this value arbitrarily, we can adjust.
else:
crop_size = 224
val_size = 256
if args.autoaugment:
train_transform = transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
# cutout and autoaugment are used in the autoaugment paper
autoaugment.ImageNetPolicy(),
])
else:
train_transform = transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
])
valid_transform = transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop_size)
])
return finalize_transform(train_transform, valid_transform, args, normalize_as_tensor)
# Transform defined for cifar-10
def _data_transforms_cifar10(args, normalize_as_tensor=True):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
args.mean = CIFAR_MEAN
args.std = CIFAR_STD
if args.autoaugment:
train_transform = transforms.Compose([
# NOTE(ahundt) pad and fill has been added to support autoaugment. Results may have changed! https://github.com/DeepVoltaire/AutoAugment/issues/8
transforms.Pad(4, fill=128),
transforms.RandomCrop(32, padding=0),
transforms.RandomHorizontalFlip(),
autoaugment.CIFAR10Policy(),
])
else:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
])
valid_transform = transforms.Compose([])
return finalize_transform(train_transform, valid_transform, args, normalize_as_tensor)
# Transform defined for mnist
def _data_transforms_mnist(args, normalize_as_tensor=True):
MNIST_MEAN = (0.1307,)
MNIST_STD = (0.3081,)
args.mean = MNIST_MEAN
args.std = MNIST_STD
train_transform = transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(MNIST_MEAN, MNIST_STD),
])
valid_transform = transforms.Compose([])
return finalize_transform(train_transform, valid_transform, args, normalize_as_tensor)
# Transform defined for fashion mnist
def _data_transforms_fashion(args, normalize_as_tensor=True):
FASHION_MEAN = (0.2860405969887955,)
FASHION_STD = (0.35302424825650003,)
args.mean = FASHION_MEAN
args.std = FASHION_STD
train_transform = transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(FASHION_MEAN, FASHION_STD),
])
valid_transform = transforms.Compose([])
return finalize_transform(train_transform, valid_transform, args, normalize_as_tensor)
# Transform defined for emnist
def _data_transforms_emnist(args, normalize_as_tensor=True):
EMNIST_MEAN = (0.17510417052459282,)
EMNIST_STD = (0.33323714976320795,)
args.mean = EMNIST_MEAN
args.std = EMNIST_STD
train_transform = transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(EMNIST_MEAN, EMNIST_STD),
])
valid_transform = transforms.Compose([])
return finalize_transform(train_transform, valid_transform, args, normalize_as_tensor)
# Transform defined for svhn
def _data_transforms_svhn(args, normalize_as_tensor=True):
SVHN_MEAN = [ 0.4376821, 0.4437697, 0.47280442]
SVHN_STD = [ 0.19803012, 0.20101562, 0.19703614]
args.mean = SVHN_MEAN
args.std = SVHN_STD
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
valid_transform = transforms.Compose([])
return finalize_transform(train_transform, valid_transform, args, normalize_as_tensor)
# Transform defined for stl10
def _data_transforms_stl10(args, normalize_as_tensor=True):
STL_MEAN = [ 0.44671062, 0.43980984, 0.40664645]
STL_STD = [ 0.26034098, 0.25657727, 0.27126738]
args.mean = STL_MEAN
args.std = STL_STD
train_transform = transforms.Compose([
transforms.RandomCrop(96, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(STL_MEAN, STL_STD),
])
valid_transform = transforms.Compose([])
return finalize_transform(train_transform, valid_transform, args, normalize_as_tensor)
# Transform defined for devanagari hand written symbols
def _data_transforms_devanagari(args, normalize_as_tensor=True):
DEVANAGARI_MEAN = (0.240004663268,)
DEVANAGARI_STD = (0.386530114768,)
args.mean = DEVANAGARI_MEAN
args.std = DEVANAGARI_STD
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=2), #Already has padding 2 and size is 32x32
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(DEVANAGARI_MEAN, DEVANAGARI_STD),
])
valid_transform = transforms.Compose([])
return finalize_transform(train_transform, valid_transform, args, normalize_as_tensor)
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def count_model_flops(cnn_model, data_shape=[1, 3, 32, 32]):
cnn_model_flops = cnn_model
batch = torch.zeros(data_shape)
if torch.cuda.is_available():
batch = batch.cuda()
cnn_model_flops = flops_counter.add_flops_counting_methods(cnn_model)
cnn_model_flops.eval().start_flops_count()
out = cnn_model_flops(batch)
cnn_model_flops.stop_flops_count()
flops_str = flops_counter.flops_to_string(cnn_model.compute_average_flops_cost())
del cnn_model_flops
del batch
return flops_str
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def initialize_files_and_args(args, run_type='eval'):
"""Adds parameters to args and creates the folder for the log and weights with a code backup as needed.
This function is pretty data loader and argument specific,
and thus a bit brittle and not intended for general use.
Loads args from a file if specified by the user, args may change substantially!
This happens particularly when args.load_args or args.evaluate is set.
Creates the log folder if it does not exist.
Input:
args.evaluate: empty string or path to a weights file to evaluate
args.load_args: json file containing saved command line arguments which will be loaded.
args.save: custom name to give the log folder so you know what this run is about.
args.gpu: the integer id of the gpu on which to run.
args.dataset: a string with the name of the dataset.
args.arch: a string with the name of the neural network architecture being used.
Output:
args.stats_file: full path to file for final json statistics
args.epoch_stats_file: full path to file for json with per-epoch statistics
args.save: new save directory, or existing directory if evaluating.
args.evaluate: are we doing an evaluation-only run
args.load: updated if a weights file was specified via args.evaluate
args.log_file_path: set with the path to the file where logs will be written.
This variable is designed to be passed to utils.logging_setup(log_file_path).
Returns:
updated args object
"""
log_file_name = 'log.txt'
evaluate_arg = args.evaluate
loaded_args = False
if args.load_args:
with open(args.load_args, 'r') as f:
args_dict = vars(args)
args_dict.update(json.load(f))
args = argparse.Namespace(**args_dict)
args.evaluate = evaluate_arg
loaded_args = True
stats_time = time.strftime("%Y%m%d-%H%M%S")
if evaluate_arg:
# evaluate results go in the same directory as the weights but with a new timestamp
# we will put the logs in the same directory as the weights
save_dir = os.path.dirname(os.path.realpath(evaluate_arg))
log_file_name = 'eval-log-' + stats_time + '.txt'
log_file_path = os.path.join(save_dir, log_file_name)
params_path = os.path.join(save_dir, 'commandline_args.json')
if not loaded_args:
print('Warning: --evaluate specified, loading commandline args from:\n' + params_path)
with open(params_path, 'r') as f:
args_dict = vars(args)
args_dict.update(json.load(f))
args = argparse.Namespace(**args_dict)
args.evaluate = evaluate_arg
args.load = evaluate_arg
args.save = save_dir
else:
args.save = '{}-{}-{}-{}-{}-{}'.format(run_type, stats_time, args.save, args.dataset, args.arch, args.gpu)
params_path = os.path.join(args.save, 'commandline_args.json')
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_file_path = os.path.join(args.save, log_file_name)
with open(params_path, 'w') as f:
json.dump(vars(args), f)
stats_file_name = 'eval-stats-' + stats_time + '.json'
args.epoch_stats_file = os.path.join(args.save, 'eval-epoch-stats-' + stats_time + '.json')
args.stats_file = os.path.join(args.save, stats_file_name)
args.log_file_path = log_file_path
return args | 21,363 | 32.225505 | 151 | py |
sharpDARTS | sharpDARTS-master/cnn/model.py | import math
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from genotypes import PRIMITIVES, MULTICHANNELNET_PRIMITIVES
from operations import *
import operations
# from . import operations
# from . import genotypes
# from .operations import ReLUConvBN
# from .operations import ConvBNReLU
# from .operations import FactorizedReduce
# from .operations import Identity
from torch.autograd import Variable
# from .utils import drop_path
# from .model_search import MixedAux
import operations
import genotypes
from operations import FactorizedReduce
from operations import Identity
from operations import ReLUConvBN
from operations import SepConv
from utils import drop_path
class Cell(nn.Module):
def __init__(self, genotype_sequence, concat_sequence, C_prev_prev, C_prev, C, reduction, reduction_prev,
op_dict=None, separate_reduce_cell=True, C_mid=None):
"""Create a final cell with a single architecture.
The Cell class in model_search.py is the equivalent for searching multiple architectures.
# Arguments
op_dict: The dictionary of possible operation creation functions.
All primitive name strings defined in the genotype must be in the op_dict.
"""
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
self.reduction = reduction
if op_dict is None:
op_dict = operations.OPS
# _op_dict are op_dict available for use,
# _ops is the actual sequence of op_dict being utilized in this case
self._op_dict = op_dict
if reduction_prev is None:
self.preprocess0 = operations.Identity()
elif reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, stride=2)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
op_names, indices = zip(*genotype_sequence)
self._compile(C, op_names, indices, concat_sequence, reduction, C_mid)
def _compile(self, C, op_names, indices, concat, reduction, C_mid):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = self._op_dict[name](C, C, stride, True, C_mid)
# op = self._op_dict[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob=0.):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 14x14"""
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
# NOTE(Hanxaio Liu): This batchnorm was omitted in my earlier implementation due to a typo.
# Commenting it out for consistency with the experiments in the paper.
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class NetworkCIFAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype, in_channels=3, reduce_spacing=None,
mixed_aux=False, op_dict=None, C_mid=None, stem_multiplier=3):
"""
# Arguments
C: Initial number of output channels.
in_channels: initial number of input channels
layers: The number of cells to create.
reduce_spacing: number of layers of cells between reduction cells,
default of None is at 1/3 and 2/3 of the total number of layers.
1 means all cells are reduction. 2 means the first layer is
normal then the second
op_dict: The dictionary of possible operation creation functions.
All primitive name strings defined in the genotype must be in the op_dict.
"""
super(NetworkCIFAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self._in_channels = in_channels
self.drop_path_prob = 0.
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(in_channels, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
if mixed_aux:
self.auxs = MixedAux(num_classes, weights_are_parameters=True)
else:
self.auxs = None
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if ((reduce_spacing is None and i in [layers//3, 2*layers//3]) or
(reduce_spacing is not None and ((i + 1) % reduce_spacing == 0))):
C_curr *= 2
reduction = True
cell = Cell(genotype.reduce, genotype.reduce_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, op_dict=op_dict, C_mid=C_mid)
else:
reduction = False
cell = Cell(genotype.normal, genotype.normal_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, op_dict=op_dict, C_mid=C_mid)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if self.auxs is not None:
self.auxs.add_aux(C_prev)
elif i == 2*layers//3:
C_to_auxiliary = C_prev
if self.auxs is None:
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
# self.global_pooling = nn.AdaptiveMaxPool2d(1)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
else:
# init params to prioritize auxiliary decision making networks
self.auxs.build()
def forward(self, input_batch):
logits_aux = None
s0 = s1 = self.stem(input_batch)
s1s = []
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if self.auxs is not None:
# print('network forward i: ' + str(i) + ' s1 shape: ' + str(s1.shape))
s1s += [s1]
elif i == 2 * self._layers // 3 and self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
if self.auxs is not None:
# combine the result of all aux networks
# print('calling auxs, s1s len: ' + str(len(s1s)))
logits = self.auxs(s1s)
else:
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits, logits_aux
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype, in_channels=3, reduce_spacing=None,
mixed_aux=False, op_dict=None, C_mid=None, stem_multiplier=3):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self._in_channels = in_channels
self.drop_path_prob = 0.
self.stem0 = nn.Sequential(
nn.Conv2d(in_channels, C // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
cell = Cell(genotype.reduce, genotype.reduce_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, op_dict=op_dict, C_mid=C_mid)
else:
reduction = False
cell = Cell(genotype.normal, genotype.reduce_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, op_dict=op_dict, C_mid=C_mid)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, batch_input):
logits_aux = None
s0 = self.stem0(batch_input)
s1 = self.stem1(s0)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
class NoisyLinear(nn.Module):
""" Factorised NoisyLinear layer with bias
Reference: Rainbow: Combining Improvements in Deep Reinforcement Learning https://arxiv.org/abs/1710.02298
Code Source: https://github.com/Kaixhin/Rainbow
"""
def __init__(self, in_features, out_features, std_init=0.4):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))
self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))
self.bias_mu = nn.Parameter(torch.empty(out_features))
self.bias_sigma = nn.Parameter(torch.empty(out_features))
self.register_buffer('bias_epsilon', torch.empty(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, input):
if self.training:
return F.linear(input, self.weight_mu + self.weight_sigma * self.weight_epsilon, self.bias_mu + self.bias_sigma * self.bias_epsilon)
else:
return F.linear(input, self.weight_mu, self.bias_mu)
class DQN(nn.Module):
"""
Reference: Rainbow: Combining Improvements in Deep Reinforcement Learning https://arxiv.org/abs/1710.02298
Code Source: https://github.com/Kaixhin/Rainbow
"""
def __init__(self, args, action_space):
super(DQNAS, self).__init__()
self.atoms = args.atoms
self.action_space = action_space
self.conv1 = nn.Conv2d(args.history_length, 32, 8, stride=4, padding=1)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 64, 3)
self.fc_h_v = NoisyLinear(3136, args.hidden_size, std_init=args.noisy_std)
self.fc_h_a = NoisyLinear(3136, args.hidden_size, std_init=args.noisy_std)
self.fc_z_v = NoisyLinear(args.hidden_size, self.atoms, std_init=args.noisy_std)
self.fc_z_a = NoisyLinear(args.hidden_size, action_space * self.atoms, std_init=args.noisy_std)
def forward(self, x, log=False):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 3136)
q = self.q_score(x, log) # Probabilities with action over second dimension
return q
def q_score(self, x, log):
v = self.fc_z_v(F.relu(self.fc_h_v(x))) # Value stream
a = self.fc_z_a(F.relu(self.fc_h_a(x))) # Advantage stream
v, a = v.view(-1, 1, self.atoms), a.view(-1, self.action_space, self.atoms)
q = v + a - a.mean(1, keepdim=True) # Combine streams
if log: # Use log softmax for numerical stability
q = F.log_softmax(q, dim=2) # Log probabilities with action over second dimension
else:
q = F.softmax(q, dim=2) # Probabilities with action over second dimension
return q
def reset_noise(self):
for name, module in self.named_children():
if 'fc' in name:
module.reset_noise()
class RainbowDenseBlock(nn.Module):
"""Decision making layers of the Rainbow reinforcement learning algorithm.
Reference: Rainbow: Combining Improvements in Deep Reinforcement Learning https://arxiv.org/abs/1710.02298
Code Source: https://github.com/Kaixhin/Rainbow
"""
def __init__(self, c_in, action_space, hidden_size=512, atoms=51, noisy_std=0.1):
"""
# Arguments
c_in: number of channels in.
action_space: number of discrete possible actions, like controller buttons.
atoms: Discretised size of value distribution.
hidden_size: Network hidden size
"""
super().__init__()
self.c_in = c_in
self.atoms = atoms
self.action_space = action_space
self.fc_h_v = NoisyLinear(self.c_in, hidden_size, std_init=noisy_std)
self.fc_h_a = NoisyLinear(self.c_in, hidden_size, std_init=noisy_std)
self.fc_z_v = NoisyLinear(hidden_size, self.atoms, std_init=noisy_std)
self.fc_z_a = NoisyLinear(hidden_size, action_space * self.atoms, std_init=noisy_std)
def forward(self, x, log=False):
x = x.view(-1, self.c_in)
q = self.q_score(x, log) # Probabilities with action over second dimension
return q
def q_score(self, x, log=False):
v = self.fc_z_v(F.relu(self.fc_h_v(x))) # Value stream
a = self.fc_z_a(F.relu(self.fc_h_a(x))) # Advantage stream
v, a = v.view(-1, 1, self.atoms), a.view(-1, self.action_space, self.atoms)
q = v + a - a.mean(1, keepdim=True) # Combine streams
if log: # Use log softmax for numerical stability
q = F.log_softmax(q, dim=2) # Log probabilities with action over second dimension
else:
q = F.softmax(q, dim=2) # Probabilities with action over second dimension
return q
def reset_noise(self):
for name, module in self.named_children():
if 'fc' in name:
module.reset_noise()
class DQNAS(nn.Module):
def __init__(self, C=36, num_classes=10, layers=4, auxiliary=False, genotype=None, in_channels=3, reduce_spacing=None, noisy_std=0.1, drop_path_prob=0.0):
"""
# Arguments
C: Initial number of output channels.
in_channels: Initial number of input channels
layers: The number of cells to create.
auxiliary: Train a smaller auxiliary network partway down for "deep supervision" see NAS paper for details.
in_channels: The number of channels for input data, for example rgb images have 3 input channels.
reduce_spacing: number of layers of cells between reduction cells,
default of None is at 1/3 and 2/3 of the total number of layers.
1 means all cells are reduction. 2 means the first layer is
normal then the second
noisy_std: Initial standard deviation of noisy linear layers
"""
super(DQNAS, self).__init__()
if genotype is None:
genotype = genotypes.DARTS_V2
self._layers = layers
self._auxiliary = auxiliary
self._in_channels = in_channels
self._noisy_std = noisy_std
self._drop_path_prob = drop_path_prob
C_prev_prev, C_prev = self.nas_build(C, in_channels, layers, reduce_spacing, genotype, auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
# self.classifier = nn.Linear(C_prev, num_classes)
self.classifier = RainbowDenseBlock(C_prev, num_classes)
def nas_build(self, C, in_channels, layers, reduce_spacing, genotype, auxiliary, num_classes):
stem_multiplier = 3
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(in_channels, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if ((reduce_spacing is None and i in [layers//3, 2*layers//3]) or
(reduce_spacing is not None and ((i + 1) % reduce_spacing == 0))):
C_curr *= 2
reduction = True
cell = Cell(genotype.reduce, genotype.reduce_concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
else:
reduction = False
if i == 0 and genotype.start:
# start cell is nonempty
sequence = genotype.start
concat = genotype.start_concat
elif i == layers - 1 and genotype.end:
# end cell is nonempty
sequence = genotype.end
concat = genotype.end_concat
else:
# we are on a normal cell
sequence = genotype.normal
concat = genotype.normal_concat
cell = Cell(sequence, concat, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
return C_prev_prev, C_prev
def forward(self, input, log=False):
logits_aux = None
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self._drop_path_prob)
if i == 2*self._layers//3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1), log=log)
if self._auxiliary:
return logits, logits_aux
else:
#TODO(ahundt) previously a tuple was always returned and this if statement wasn't here before. check for compatibility with other DARTS code
return logits
def reset_noise(self):
self.classifier.reset_noise()
class MultiChannelNetworkModel(nn.Module):
"""
This class is used to initialize a sub-graph or linear model found after running a MultiChannelNet search (refer to MultiChannelNetwork
in model_search.py) and using graph operations found in multichannelnet_graph_operations.py.
To see an example of how final model architectures are stored, refer to genotypes.py
For more information about MultiChannelNet (Differentiable Grid Search) refer to section 4 of the paper https://arxiv.org/abs/1903.09900
"""
def __init__(self, C=32, num_classes=10, layers=6, criterion=None, steps=5, multiplier=4, stem_multiplier=3,
in_channels=3, final_linear_filters=768, always_apply_ops=False, visualization=False, primitives=None,
op_dict=None, weighting_algorithm=None, genotype=None, simple_path=True):
""" C is the mimimum number of channels. Layers is how many output scaling factors and layers should be in the network.
op_dict: The dictionary of possible operation creation functions.
All primitives must be in the op dict. (Refer to operations.py and genotypes.py for all the primitives available)
genotype is used to get the architecture of final model to be generated.
"""
super(MultiChannelNetworkModel, self).__init__()
self._C = C
if genotype is not None:
# TODO(ahundt) We shouldn't be using arrays here, we should be using actual genotype objects.
self._genotype = np.array(genotype)
else:
self._genotype = genotype
self._num_classes = num_classes
if layers % 2 == 1:
raise ValueError('MultiChannelNetwork layers option must be even, got ' + str(layers))
self._layers = layers // 2
if criterion is None:
self._criterion = nn.CrossEntropyLoss()
else:
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self._always_apply_ops = always_apply_ops
self._visualization = visualization
self._weighting_algorithm = weighting_algorithm
self.normal_index = 0
self.reduce_index = 1
self.layer_types = 2
self.strides = np.array([self.normal_index, self.reduce_index])
# 5 is a reasonable number
self.C_start = int(np.log2(C))
self.C_end = self.C_start + steps
print('c_start: ' + str(self.C_start) + ' c_end: ' + str(self.C_end))
self.Cs = np.array(np.exp2(np.arange(self.C_start, self.C_end)), dtype='int')
# $ print(Cs)
# [ 32. 64. 128. 256. 512.]
self.C_size = len(self.Cs)
C_in, C_out = np.array(np.meshgrid(self.Cs, self.Cs, indexing='ij'), dtype='int')
# $ print(C_in)
# [[ 32. 32. 32. 32. 32.]
# [ 64. 64. 64. 64. 64.]
# [128. 128. 128. 128. 128.]
# [256. 256. 256. 256. 256.]
# [512. 512. 512. 512. 512.]]
# $ print(C_out)
# [[ 32. 64. 128. 256. 512.]
# [ 32. 64. 128. 256. 512.]
# [ 32. 64. 128. 256. 512.]
# [ 32. 64. 128. 256. 512.]
# [ 32. 64. 128. 256. 512.]]
if primitives is None:
primitives = PRIMITIVES
if op_dict is None:
op_dict = operations.OPS
self.primitives = primitives
self.op_dict = op_dict
self.simple_path = simple_path
# self.op_types = [operations.SharpSepConv, operations.ResizablePool]
# Removed condition as it is not required.
# if self._genotype is not None and type(self._genotype[0]) is np.str_:
if self.simple_path:
model = self._genotype[np.flatnonzero(np.core.defchararray.find(self._genotype, 'add') == -1)]
root_ch = self.Cs[int(model[1][-1])]
self.stem = nn.ModuleList()
s = nn.Sequential(
nn.Conv2d(int(in_channels), root_ch, 3, padding=1, bias=False),
nn.BatchNorm2d(root_ch))
self.stem.append(s)
self.op_grid = nn.ModuleList()
c_out = 0
#Switched to primitives and op_dict like Network
# ops = {'SharpSepConv': 0, 'ResizablePool': 1}
# Parsing model definition string. Refer genotypes.py for sample model definition string.
for layers in model[3:-4]:
layer = layers.split("_")
# fetching primitive and other parameters from saved model.
primitive = self.primitives[int(layer[-1])]
stride = int(layer[3])
c_in = int(layer[6])
c_out = int(layer[9])
op = self.op_dict[primitive](c_in, c_out, stride=stride)
# Consistent with MixedOp
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
# Decreasing feature maps so that output is as expected.
if 'none' in primitive or ('skip_connect' in primitive and stride_idx == 0):
op = nn.Sequential(op, nn.Conv2d(int(cin), int(cout), 1))
self.op_grid.append(op)
self.base = nn.ModuleList()
self.base.append(operations.SharpSepConv(int(c_out), int(final_linear_filters), 3))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(final_linear_filters, num_classes)
else:
self.stem = nn.ModuleList()
self.stemCs = []
for i, c in enumerate(self.Cs):
if "Conv3x3_"+str(i) in self._genotype:
s = nn.Sequential(
nn.Conv2d(int(in_channels), int(c), 3, padding=1, bias=False),
nn.BatchNorm2d(c)
)
self.stemCs.append(c)
self.stem.append(s)
self.op_grid = nn.ModuleList()
self.op_grid_list = []
self.type_modules_list = []
for layer_idx in range(self._layers):
stride_modules = nn.ModuleList()
stride_modules_param = []
for stride_idx in self.strides:
out_modules = nn.ModuleList()
out_modules_param = []
for C_out_idx in range(self.C_size):
in_modules = nn.ModuleList()
in_modules_param = []
for C_in_idx in range(self.C_size):
out_node = 'layer_'+str(layer_idx)+'_add_'+'c_out_'+str(self.Cs[C_out_idx])+'_stride_' + str(stride_idx+1)
type_modules = nn.ModuleList()
type_modules_list = []
for primitive_idx, primitive in enumerate(self.primitives):
cin = C_in[C_in_idx][C_out_idx]
cout = C_out[C_in_idx][C_out_idx]
name = 'layer_' + str(layer_idx) + '_stride_' + str(stride_idx+1) + '_c_in_' + str(self.Cs[C_in_idx]) + '_c_out_' + str(self.Cs[C_out_idx]) + '_op_type_' + str(primitive) + '_opid_' + str(primitive_idx)
if name in self._genotype:
op = self.op_dict[primitive](int(cin), int(cout), int(stride_idx + 1), False)
# Consistent with MixedOp
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(int(cout), affine=False))
# Decreasing feature maps so that output is as expected.
if 'none' in primitive or ('skip_connect' in primitive and stride_idx == 0):
op = nn.Sequential(op, nn.Conv2d(int(cin), int(cout), 1))
type_modules.append(op)
type_modules_list.append((primitive_idx, primitive))
else:
continue
if len(type_modules) > 0:
in_modules.append(type_modules)
in_modules_param.append((self.Cs[C_in_idx], type_modules_list))
if len(in_modules) > 0:
out_modules.append(in_modules)
out_modules_param.append((self.Cs[C_out_idx], in_modules_param))
# op grid is stride_modules
if len(out_modules) > 0:
stride_modules.append(out_modules)
stride_modules_param.append((stride_idx, out_modules_param))
# stride_modules_param.append((stride_idx, out_modules_param))
if len(stride_modules) > 0:
self.op_grid.append(stride_modules)
self.op_grid_list.append((layer_idx, stride_modules_param))
self.base = nn.ModuleList()
self.baseCs=[]
for c in self.Cs:
if "SharpSepConv"+str(c) in self._genotype:
self.baseCs.append(c)
self.base.append(operations.SharpSepConv(int(c), int(final_linear_filters), 3))
# TODO(ahundt) there should be one more layer of normal convolutions to set the final linear layer size
# C_in will be defined by the previous layer's c_out
self.arch_weights_shape = [len(self.strides), self._layers, self.C_size, self.C_size, len(self.primitives)]
# number of weights total
self.weight_count = np.prod(self.arch_weights_shape)
# number of weights in a softmax call
self.softmax_weight_count = np.prod(self.arch_weights_shape[2:])
# minimum score for a layer to continue being trained
self.min_score = float(1 / (self.softmax_weight_count * self.softmax_weight_count))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(final_linear_filters, num_classes)
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input_batch):
# [in, normal_out, reduce_out]
if self.simple_path:
x = input_batch
for i in range(len(self.stem)):
x = self.stem[i](x)
for i in range(len(self.op_grid)):
x = self.op_grid[i](x)
out = self.global_pooling(self.base[0](x))
logits = self.classifier(out.view(out.size(0), -1))
return logits
else:
self.C_size = len(self.stemCs)
s0s = [[], [None] * self.C_size, [None] * self.C_size]
for operation in self.stem:
# Make the set of features with different numbers of channels.
s0s[0] += [operation(input_batch)]
# Duplicate s0s to account for 2 different strides
# s0s += [[]]
# s1s = [None] * layers + 1
for layer in self.op_grid_list:
# layer is how many times we've called everything, i.e. the number of "layers"
# this is different from the number of layer types which is len([SharpSepConv, ResizablePool]) == 2
# layer_st_time = time.time()
layer_idx = layer[0]
for stride_idx, C_outs in layer[1]:
stride = 1 + stride_idx
# C_out_layer = [x[2] for x in self.outCs if x[0] == layer[0] and x[1] == strides[0]]
# C_outs = strides[1]
for C_out_grid_id, (C_out, C_ins) in enumerate(C_outs):
# take all the layers with the same output so we can sum them
# print('forward layer: ' + str(layer) + ' stride: ' + str(stride) + ' c_out: ' + str(self.Cs[C_out_idx]))
C_out_idx = np.where(self.Cs == C_out)[0][0]
c_outs = []
# C_in_layer = [x[3] for x in self.inCs if x[0] == layer[0] and x[1] == strides[0] and x[2] == C_out]
for C_in_grid_id, (C_in, primitives) in enumerate(C_ins):
C_in_idx = np.where(self.Cs == C_in)[0][0]
for primitive_grid_idx, (primitive_idx, primitive) in enumerate(primitives):
# get the specific weight for this op
name = 'layer_' + str(layer_idx) + '_stride_' + str(stride) + '_c_in_' + str(C_in) + '_c_out_' + str(C_out) + '_op_type_' + str(primitive) + '_opid_' + str(primitive_idx)
# layer is present in final model architecture.
if name in self._genotype:
s = s0s[stride_idx][C_in_grid_id]
if s is not None:
x = self.op_grid[layer_idx][stride_idx][C_in_grid_id][C_out_grid_id][primitive_grid_idx](s)
c_outs += [x]
# only apply updates to layers of sufficient quality
if c_outs:
# print('combining c_outs forward layer: ' + str(layer) + ' stride: ' + str(stride) + ' c_out: ' + str(self.Cs[C_out_idx]) + ' c_in: ' + str(self.Cs[C_in_idx]) + ' op type: ' + str(op_type_idx))
# combined values with the same c_out dimension
combined = sum(c_outs)
if s0s[stride][C_out_grid_id] is None:
# first call sets the value
s0s[stride][C_out_grid_id] = combined
else:
s0s[stride][C_out_grid_id] += combined
# downscale reduced input as next output
self.C_out_size = len(C_outs)
s0s = [s0s[stride], [None] * self.C_out_size, [None] * self.C_out_size]
# combine results
# use SharpSepConv to match dimension of final linear layer
# then add up all remaining outputs and pool the result
out = self.global_pooling(sum(op(x) for op, x in zip(self.base, s0s[0]) if x is not None))
logits = self.classifier(out.view(out.size(0), -1))
return logits
def arch_weights(self, stride_idx):
# ops are stored as layer, stride, cin, cout, num_layer_types
# while weights are ordered stride_index, layer, cin, cout, num_layer_types
# first exclude the stride_idx because we already know that
view_shape = self.arch_weights_shape[1:]
# print('arch_weights() view_shape self.weights_shape[1:]: ' + str(view_shape))
# softmax of weights should occur once for each layer
num_layers = self.arch_weights_shape[1]
weights_softmax_view = self._arch_parameters[stride_idx].view(num_layers, -1)
# apply softmax and convert to an indexable view
weights = F.softmax(weights_softmax_view, dim=-1).view(view_shape)
return weights
def _loss(self, input_batch, target):
logits = self(input_batch)
return self._criterion(logits, target)
def _initialize_alphas(self, genotype=None):
if genotype is None or genotype[-1] == 'longest_path':
init_alpha = 1e-3*torch.randn(self.arch_weights_shape)
else:
print("_initialize_alphas with preconfigured weights", genotype[0][0][0][0])
init_alpha = []
init_alpha.append(genotype[0])
init_alpha.append(genotype[2])
init_alpha = torch.from_numpy(np.array(init_alpha)).float()
if torch.cuda.is_available():
self._arch_parameters = Variable(init_alpha.cuda(), requires_grad=True)
else:
self._arch_parameters = Variable(init_alpha, requires_grad=True)
def arch_parameters(self):
''' Get list of architecture parameters
'''
return [self._arch_parameters]
def genotype(self, layout='raw_weights'):
"""
layout options: raw_weights, longest_path, graph
"""
if layout == 'raw_weights':
# TODO(ahundt) switch from raw weights to a simpler representation for genotype?
gene_normal = np.array(self.arch_weights(0).data.cpu().numpy()).tolist()
gene_reduce = np.array(self.arch_weights(1).data.cpu().numpy()).tolist()
elif layout == 'longest_path':
# TODO(ahundt) make into a list of the layer strings to be included.
gene_normal = nx.algorithms.dag.dag_longest_path(self.G)
gene_reduce = []
elif layout == 'graph':
data = json_graph.node_link_data(self.G)
gene_normal = [json.dumps(data)]
gene_reduce = []
else:
raise ValueError('unsupported layout: ' + str(layout))
genotype = Genotype(
normal=gene_normal, normal_concat=[],
reduce=gene_reduce, reduce_concat=[],
layout=layout
)
return genotype
| 34,793 | 39.552448 | 220 | py |
sharpDARTS | sharpDARTS-master/cnn/dataset.py | # Code to load various datasets for training.
#
# Some data loading code is from https://github.com/DRealArun/darts/ with the same license as DARTS.
import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
try:
import costar_dataset
except ImportError:
print('dataset.py: The costar dataset is not available, so it is being skipped. '
'See https://github.com/ahundt/costar_dataset for details')
costar_dataset = None
CIFAR_CLASSES = 10
MNIST_CLASSES = 10
FASHION_CLASSES = 10
EMNIST_CLASSES = 47
SVHN_CLASSES = 10
STL10_CLASSES = 10
DEVANAGARI_CLASSES = 46
IMAGENET_CLASSES = 1000
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
MNIST_MEAN = (0.1307,)
MNIST_STD = (0.3081,)
FASHION_MEAN = (0.2860405969887955,)
FASHION_STD = (0.35302424825650003,)
EMNIST_MEAN = (0.17510417052459282,)
EMNIST_STD = (0.33323714976320795,)
SVHN_MEAN = [ 0.4376821, 0.4437697, 0.47280442]
SVHN_STD = [ 0.19803012, 0.20101562, 0.19703614]
STL10_MEAN = [ 0.44671062, 0.43980984, 0.40664645]
STL10_STD = [ 0.26034098, 0.25657727, 0.27126738]
DEVANAGARI_MEAN = (0.240004663268,)
DEVANAGARI_STD = (0.386530114768,)
class_dict = {'cifar10': CIFAR_CLASSES,
'mnist' : MNIST_CLASSES,
'emnist': EMNIST_CLASSES,
'fashion': FASHION_CLASSES,
'svhn': SVHN_CLASSES,
'stl10': STL10_CLASSES,
'devanagari' : DEVANAGARI_CLASSES,
'imagenet' : IMAGENET_CLASSES}
mean_dict = {'cifar10': CIFAR_MEAN,
'mnist' : MNIST_MEAN,
'emnist': EMNIST_MEAN,
'fashion': FASHION_MEAN,
'svhn': SVHN_MEAN,
'stl10': STL10_MEAN,
'devanagari' : DEVANAGARI_MEAN,
'imagenet' : IMAGENET_MEAN}
std_dict = {'cifar10': CIFAR_STD,
'mnist' : MNIST_STD,
'emnist': EMNIST_STD,
'fashion': FASHION_STD,
'svhn': SVHN_STD,
'stl10': STL10_STD,
'devanagari' : DEVANAGARI_STD,
'imagenet' : IMAGENET_STD}
inp_channel_dict = {'cifar10': 3,
'mnist' : 1,
'emnist': 1,
'fashion': 1,
'svhn': 3,
'stl10': 3,
'devanagari' : 1,
'imagenet': 3}
costar_class_dict = {'translation_only': 3,
'rotation_only': 5,
'all_features': 8}
costar_supercube_inp_channel_dict = {'translation_only': 52,
'rotation_only': 55,
'all_features': 57}
costar_vec_size_dict = {'translation_only': 44,
'rotation_only': 49,
'all_features': 49}
COSTAR_SET_NAMES = ['blocks_only', 'blocks_with_plush_toy']
COSTAR_SUBSET_NAMES = ['success_only', 'error_failure_only', 'task_failure_only', 'task_and_error_failure']
def get_training_queues(dataset_name, train_transform, valid_transform, dataset_location=None, batch_size=32, train_proportion=1.0, search_architecture=False,
costar_version='v0.4', costar_set_name=None, costar_subset_name=None, costar_feature_mode=None, costar_output_shape=(224, 224, 3),
costar_random_augmentation=None, costar_one_hot_encoding=True, costar_num_images_per_example=200, distributed=False, num_workers=12,
collate_fn=torch.utils.data.dataloader.default_collate, verbose=0, evaluate=False):
print("Getting " + dataset_name + " data")
if dataset_name == 'imagenet':
print("Using IMAGENET training set")
# first check if we are just one directory above the imagenet dir
# imagenet_dir = os.path.join(dataset_location, 'imagenet')
# if os.path.exists(imagenet_dir):
# dataset_location = imagenet_dir
# set the train directory
train_dir = os.path.join(dataset_location, 'train')
train_data = dset.ImageFolder(train_dir, train_transform)
elif dataset_name == 'cifar10':
print("Using CIFAR10 training set")
train_data = dset.CIFAR10(root=dataset_location, train=True, download=True, transform=train_transform)
elif dataset_name == 'mnist':
print("Using MNIST training set")
train_data = dset.MNIST(root=dataset_location, train=True, download=True, transform=train_transform)
elif dataset_name == 'emnist':
print("Using EMNIST training set")
train_data = dset.EMNIST(root=dataset_location, split='balanced', train=True, download=True, transform=train_transform)
elif dataset_name == 'fashion':
print("Using Fashion training set")
train_data = dset.FashionMNIST(root=dataset_location, train=True, download=True, transform=train_transform)
elif dataset_name == 'svhn':
print("Using SVHN training set")
train_data = dset.SVHN(root=dataset_location, split='train', download=True, transform=train_transform)
elif dataset_name == 'stl10':
print("Using STL10 training set")
train_data = dset.STL10(root=dataset_location, split='train', download=True, transform=train_transform)
elif dataset_name == 'devanagari':
print("Using DEVANAGARI training set")
def grey_pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('L')
return img
# Ensure dataset is present in the directory args.data. Does not support auto download
train_data = dset.ImageFolder(root=dataset_location, transform=train_transform, loader = grey_pil_loader)
elif dataset_name == 'stacking':
# Support for costar block stacking generator implemented by Chia-Hung Lin (rexxarchl)
# sites.google.com/costardataset
# https://github.com/ahundt/costar_dataset
# https://sites.google.com/site/costardataset
if costar_dataset is None:
raise ImportError("Trying to use costar_dataset but it was not imported")
print("Using CoSTAR Dataset")
if costar_set_name is None or costar_set_name not in COSTAR_SET_NAMES:
raise ValueError("Specify costar_set_name as one of {'blocks_only', 'blocks_with_plush_toy'}")
if costar_subset_name is None or costar_subset_name not in COSTAR_SUBSET_NAMES:
raise ValueError("Specify costar_subset_name as one of {'success_only', 'error_failure_only', 'task_failure_only', 'task_and_error_failure'}")
train_data = costar_dataset.CostarBlockStackingDataset.from_standard_txt(
root=dataset_location, single_batch_cube=False,
version=costar_version, set_name=costar_set_name, subset_name=costar_subset_name,
split='train', feature_mode=costar_feature_mode, output_shape=costar_output_shape,
random_augmentation=costar_random_augmentation, one_hot_encoding=costar_one_hot_encoding,
verbose=verbose, num_images_per_example=costar_num_images_per_example, is_training=not evaluate)
else:
assert False, "Cannot get training queue for dataset"
num_train = len(train_data)
indices = list(range(num_train))
if search_architecture:
# select the 'validation' set from the training data
split = int(np.floor(train_proportion * num_train))
print("search_architecture enabled, splitting training set into train and val.")
print("Total Training size", num_train)
print("Training set size", split)
print("Training subset for validation size", num_train-split)
valid_data = train_data
else:
split = num_train
# get the actual train/test set
if dataset_name == 'imagenet':
print("Using IMAGENET validation data")
valid_dir = os.path.join(dataset_location, 'val')
valid_data = dset.ImageFolder(valid_dir, valid_transform)
elif dataset_name == 'cifar10':
print("Using CIFAR10 validation data")
valid_data = dset.CIFAR10(root=dataset_location, train=search_architecture, download=True, transform=valid_transform)
elif dataset_name == 'mnist':
print("Using MNIST validation data")
valid_data = dset.MNIST(root=dataset_location, train=search_architecture, download=True, transform=valid_transform)
elif dataset_name == 'emnist':
print("Using EMNIST validation data")
valid_data = dset.EMNIST(root=dataset_location, split='balanced', train=search_architecture, download=True, transform=valid_transform)
elif dataset_name == 'fashion':
print("Using Fashion validation data")
valid_data = dset.FashionMNIST(root=dataset_location, train=search_architecture, download=True, transform=valid_transform)
elif dataset_name == 'svhn':
print("Using SVHN validation data")
valid_data = dset.SVHN(root=dataset_location, split='test', download=True, transform=valid_transform)
elif dataset_name == 'stl10':
print("Using STL10 validation data")
valid_data = dset.STL10(root=dataset_location, split='test', download=True, transform=valid_transform)
elif dataset_name == 'devanagari':
print("Using DEVANAGARI validation data")
def grey_pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('L')
return img
# Ensure dataset is present in the directory args.data. Does not support auto download
valid_data = dset.ImageFolder(root=dataset_location, transform=valid_transform, loader = grey_pil_loader)
elif dataset_name == 'stacking':
valid_data = costar_dataset.CostarBlockStackingDataset.from_standard_txt(
root=dataset_location, single_batch_cube=False,
version=costar_version, set_name=costar_set_name, subset_name=costar_subset_name,
split='val', feature_mode=costar_feature_mode, output_shape=costar_output_shape,
random_augmentation=costar_random_augmentation, one_hot_encoding=costar_one_hot_encoding,
verbose=verbose, num_images_per_example=costar_num_images_per_example, is_training=False)
else:
assert False, "Cannot get training queue for dataset"
if dataset_name == 'devanagari':
print("SHUFFLE INDEX LIST BEFORE BATCHING")
print("Before Shuffle", indices[-10:num_train])
np.random.shuffle(indices)
print("After Shuffle", indices[-10:num_train])
if distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(indices[:split])
elif evaluate:
print("Evaluate mode! Training set will appear sequentially.")
train_sampler = None # Use default sampler, i.e. Sequential Sampler, when in evaluation
else:
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split])
# shuffle does not need to be set to True because
# that is taken care of by the subset random sampler
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=batch_size,
sampler=train_sampler,
pin_memory=True, num_workers=num_workers,
collate_fn=collate_fn)
if search_architecture:
# validation sampled from training set
val_from_train_indices = indices[split:num_train]
if distributed:
valid_sampler = torch.utils.data.distributed.DistributedSampler(val_from_train_indices)
else:
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_from_train_indices)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=batch_size,
sampler=valid_sampler,
pin_memory=True, num_workers=num_workers,
collate_fn=collate_fn)
else:
# test set
valid_sampler = None
if distributed:
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_data)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=batch_size,
sampler=valid_sampler,
pin_memory=True, num_workers=num_workers,
collate_fn=collate_fn)
return train_queue, valid_queue
def get_costar_test_queue(dataset_location, costar_set_name, costar_subset_name, costar_version='v0.4', costar_feature_mode=None, costar_output_shape=(224, 224, 3),
costar_random_augmentation=None, costar_one_hot_encoding=True, costar_num_images_per_example=200, batch_size=32, verbose=0,
collate_fn=torch.utils.data.dataloader.default_collate):
# Support for costar block stacking generator implemented by Chia-Hung Lin (rexxarchl)
# sites.google.com/costardataset
# https://github.com/ahundt/costar_dataset
# https://sites.google.com/site/costardataset
if costar_dataset is None:
raise ImportError("Trying to use costar_dataset but it was not imported")
if verbose > 0:
print("Getting CoSTAR BSD test set...")
if costar_set_name not in COSTAR_SET_NAMES:
raise ValueError("Specify costar_set_name as one of {'blocks_only', 'blocks_with_plush_toy'}")
if costar_subset_name not in COSTAR_SUBSET_NAMES:
raise ValueError("Specify costar_subset_name as one of {'success_only', 'error_failure_only', 'task_failure_only', 'task_and_error_failure'}")
test_data = costar_dataset.CostarBlockStackingDataset.from_standard_txt(
root=dataset_location, single_batch_cube=False,
version=costar_version, set_name=costar_set_name, subset_name=costar_subset_name,
split='test', feature_mode=costar_feature_mode, output_shape=costar_output_shape,
random_augmentation=costar_random_augmentation, one_hot_encoding=costar_one_hot_encoding,
verbose=verbose, num_images_per_example=costar_num_images_per_example, is_training=False)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=batch_size, collate_fn=collate_fn,
pin_memory=False, num_workers=4)
return test_queue
| 14,281 | 46.926174 | 164 | py |
sharpDARTS | sharpDARTS-master/cnn/model_search.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from operations import *
import operations
from torch.autograd import Variable
from genotypes import PRIMITIVES, MULTICHANNELNET_PRIMITIVES
from genotypes import Genotype
import networkx as nx
from networkx.readwrite import json_graph
import json
import time
import genotype_extractor
class MixedOp(nn.Module):
def __init__(self, C, stride, primitives=None, op_dict=None, weighting_algorithm=None):
""" Perform a mixed forward pass incorporating multiple primitive operations like conv, max pool, etc.
# Arguments
primitives: the list of strings defining the operations to choose from.
op_dict: The dictionary of possible operation creation functions.
All primitives must be in the op dict.
"""
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
self._stride = stride
if primitives is None:
primitives = PRIMITIVES
self._primitives = primitives
if op_dict is None:
op_dict = operations.OPS
for primitive in primitives:
op = op_dict[primitive](C, C, stride, False)
# op = OPS[primitive](C, stride, False)
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
self._weighting_algorithm = weighting_algorithm
def forward(self, x, weights):
# result = 0
# print('-------------------- forward')
# print('weights shape: ' + str(len(weights)) + ' ops shape: ' + str(len(self._ops)))
# for i, (w, op) in enumerate(zip(weights, self._ops)):
# print('w shape: ' + str(w.shape) + ' op type: ' + str(type(op)) + ' i: ' + str(i) + ' self._primitives[i]: ' + str(self._primitives[i]) + 'x size: ' + str(x.size()) + ' stride: ' + str(self._stride))
# op_out = op(x)
# print('op_out size: ' + str(op_out.size()))
# result += w * op_out
# return result
# apply all ops with intensity corresponding to their weight
if self._weighting_algorithm is None or self._weighting_algorithm == 'scalar':
return sum(w * op(x) for w, op in zip(weights, self._ops))
elif self._weighting_algorithm == 'max_w':
max_w = torch.max(weights)
return sum((1. - max_w + w) * op(x) for w, op in zip(weights, self._ops))
else:
raise ValueError('MixedOP(): Unsupported weighting algorithm: ' + str(self._weighting_algorithm) +
' try "scalar" or "max_w"')
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev, primitives=None, op_dict=None, weighting_algorithm=None):
"""Create a searchable cell representing multiple architectures.
The Cell class in model.py is the equivalent for a single architecture.
# Arguments
steps: The number of primitive operations in the cell,
essentially the number of low level layers.
multiplier: The rate at which the number of channels increases.
op_dict: The dictionary of possible operation creation functions.
All primitives must be in the op dict.
"""
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev is None:
self.preprocess0 = operations.Identity()
elif reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, stride=2, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride, primitives, op_dict, weighting_algorithm=weighting_algorithm)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(self._ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C=16, num_classes=10, layers=8, criterion=None, steps=4, multiplier=4, stem_multiplier=3,
in_channels=3, primitives=None, op_dict=None, C_mid=None, weights_are_parameters=False,
weighting_algorithm=None):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
if criterion is None:
self._criterion = nn.CrossEntropyLoss()
else:
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self._weights_are_parameters = weights_are_parameters
if primitives is None:
primitives = PRIMITIVES
self.primitives = primitives
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(in_channels, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr,
reduction, reduction_prev, primitives, op_dict,
weighting_algorithm=weighting_algorithm)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input):
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = len(self.primitives)
self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
if self._weights_are_parameters:
# in simpler training modes the weights are just regular parameters
self.alphas_normal = torch.nn.Parameter(self.alphas_normal)
self.alphas_reduce = torch.nn.Parameter(self.alphas_reduce)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def arch_weights(self, stride_idx):
weights_softmax_view = self._arch_parameters[stride_idx]
# apply softmax and convert to an indexable view
weights = F.softmax(weights_softmax_view, dim=-1)
return weights
def genotype(self, skip_primitive='none'):
'''
Extract the genotype, or specific connections within a cell, as encoded by the weights.
# Arguments
skip_primitives: hack was added by DARTS to temporarily workaround the
'strong gradient' problem identified in the sharpDARTS paper https://arxiv.org/abs/1903.09900,
set skip_primitive=None to not skip any primitives.
'''
gene_normal = genotype_extractor.parse_cell(
F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy(),
primitives=self.primitives, steps=self._steps, skip_primitive=skip_primitive)
gene_reduce = genotype_extractor.parse_cell(
F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy(),
primitives=self.primitives, steps=self._steps, skip_primitive=skip_primitive)
concat = range(2+self._steps-self._multiplier, self._steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat,
layout='cell',
)
return genotype
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MultiChannelNetwork(nn.Module):
"""
This class is used to perform Differentiable Grid Search using a set of primitives of your choice (refer to genotypes.py and operations.py for
more information on the different types of primitives available and their definition).
For more information about the search please refer to section 4 of the paper https://arxiv.org/abs/1903.09900
"""
def __init__(self, C=32, num_classes=10, layers=6, criterion=None, steps=5, multiplier=4, stem_multiplier=3,
in_channels=3, final_linear_filters=768, always_apply_ops=False, visualization=False, primitives=None, op_dict=None,
weighting_algorithm=None, genotype=None):
""" C is the mimimum number of channels. Layers is how many output scaling factors and layers should be in the network.
op_dict: The dictionary of possible operation creation functions.
All primitives must be in the op dict.
"""
super(MultiChannelNetwork, self).__init__()
self._C = C
if genotype is not None:
# TODO(ahundt) We shouldn't be using arrays here, we should be using actual genotype objects.
self._genotype = np.array(genotype)
else:
self._genotype = genotype
self._num_classes = num_classes
if layers % 2 == 1:
raise ValueError('MultiChannelNetwork layers option must be even, got ' + str(layers))
self._layers = layers // 2
if criterion is None:
self._criterion = nn.CrossEntropyLoss()
else:
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self._always_apply_ops = always_apply_ops
self._visualization = visualization
self._weighting_algorithm = weighting_algorithm
if primitives is None:
primitives = MULTICHANNELNET_PRIMITIVES
if op_dict is None:
op_dict = operations.MULTICHANNELNET_OPS
self.primitives = primitives
self.op_dict = op_dict
self.normal_index = 0
self.reduce_index = 1
self.layer_types = 2
self.strides = np.array([self.normal_index, self.reduce_index])
# 5 is a reasonable number
self.C_start = int(np.log2(C))
self.C_end = self.C_start + steps
print('c_start: ' + str(self.C_start) + ' c_end: ' + str(self.C_end))
self.Cs = np.array(np.exp2(np.arange(self.C_start, self.C_end)), dtype='int')
# $ print(Cs)
# [ 32. 64. 128. 256. 512.]
self.C_size = len(self.Cs)
C_in, C_out = np.array(np.meshgrid(self.Cs, self.Cs, indexing='ij'), dtype='int')
# $ print(C_in)
# [[ 32. 32. 32. 32. 32.]
# [ 64. 64. 64. 64. 64.]
# [128. 128. 128. 128. 128.]
# [256. 256. 256. 256. 256.]
# [512. 512. 512. 512. 512.]]
# $ print(C_out)
# [[ 32. 64. 128. 256. 512.]
# [ 32. 64. 128. 256. 512.]
# [ 32. 64. 128. 256. 512.]
# [ 32. 64. 128. 256. 512.]
# [ 32. 64. 128. 256. 512.]]
# Switching to primitives.
# self.op_types = [operations.SharpSepConv, operations.ResizablePool]
self.stem = nn.ModuleList()
self.G = nx.DiGraph()
self.G.add_node("Source")
self.G.nodes["Source"]['demand'] = -1
for i, c in enumerate(self.Cs):
s = nn.Sequential(
nn.Conv2d(int(in_channels), int(c), 3, padding=1, bias=False),
nn.BatchNorm2d(c)
)
self.G.add_edge("Source", "Conv3x3_"+str(i))
self.G["Source"]["Conv3x3_"+str(i)]["weight"] = 600
self.G.add_node("Conv3x3_"+str(i))
self.G.add_node("BatchNorm_"+str(i))
self.G.add_edge("Conv3x3_"+str(i), "BatchNorm_"+str(i))
self.stem.append(s)
for layer_idx in range(self._layers):
for stride_idx in self.strides:
for C_out_idx in range(self.C_size):
out_node = 'layer_'+str(layer_idx)+'_add_'+'c_out_'+str(self.Cs[C_out_idx])+'_stride_' + str(stride_idx+1)
self.G.add_node(out_node)
self.op_grid = nn.ModuleList()
for layer_idx in range(self._layers):
stride_modules = nn.ModuleList()
for stride_idx in self.strides:
in_modules = nn.ModuleList()
for C_in_idx in range(self.C_size):
out_modules = nn.ModuleList()
# print('init layer: ' + str(layer_idx) + ' stride: ' + str(stride_idx+1) + ' c_in: ' + str(self.Cs[C_in_idx]))
for C_out_idx in range(self.C_size):
out_node = 'layer_'+str(layer_idx)+'_add_'+'c_out_'+str(self.Cs[C_out_idx])+'_stride_' + str(stride_idx+1)
type_modules = nn.ModuleList()
# switching to primitives
# for OpType in self.op_types:
for primitive_idx, primitive in enumerate(self.primitives):
cin = C_in[C_in_idx][C_out_idx]
cout = C_out[C_in_idx][C_out_idx]
# print('cin: ' + str(cin) + ' cout: ' + str(cout))
name = 'layer_' + str(layer_idx) + '_stride_' + str(stride_idx+1) + '_c_in_' + str(self.Cs[C_in_idx]) + '_c_out_' + str(self.Cs[C_out_idx]) + '_op_type_' + str(primitive) + '_opid_' + str(primitive_idx)
self.G.add_node(name)
if layer_idx == 0 and stride_idx == 0:
self.G.add_edge("BatchNorm_"+str(C_in_idx), name)
elif stride_idx > 0 or layer_idx == 0:
self.G.add_edge('layer_' + str(layer_idx)+'_add_' + 'c_out_'+str(self.Cs[C_in_idx])+'_stride_' + str(stride_idx), name)
else:
self.G.add_edge('layer_' + str(layer_idx-1)+'_add_' + 'c_out_'+str(self.Cs[C_in_idx])+'_stride_' + str(self.strides[-1] + 1), name)
self.G.add_edge(name, out_node)
# op = OpType(int(cin), int(cout), kernel_size=3, stride=int(stride_idx + 1))
op = self.op_dict[primitive](int(cin), int(cout), int(stride_idx + 1), False)
# Consistent with MixedOp
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(int(cout), affine=False))
# Decreasing feature maps so that output is as expected.
if 'none' in primitive or ('skip_connect' in primitive and stride_idx == 0):
op = nn.Sequential(op, nn.Conv2d(int(cin), int(cout), 1))
type_modules.append(op)
out_modules.append(type_modules)
in_modules.append(out_modules)
# op grid is stride_modules
stride_modules.append(in_modules)
self.op_grid.append(stride_modules)
self.base = nn.ModuleList()
self.G.add_node("add-SharpSep")
self.time_between_layers = AverageMeter()
for c in self.Cs:
self.G.add_node("SharpSepConv" + str(c))
out_node = 'layer_'+str(self._layers-1)+'_add_'+'c_out_'+str(c)+'_stride_' + str(self.strides[-1] + 1)
self.G.add_edge("SharpSepConv" + str(c), "add-SharpSep")
self.G.add_edge(out_node, "SharpSepConv" + str(c))
self.base.append(operations.SharpSepConv(int(c), int(final_linear_filters), 3))
# TODO(ahundt) there should be one more layer of normal convolutions to set the final linear layer size
# C_in will be defined by the previous layer's c_out
self.arch_weights_shape = [len(self.strides), self._layers, self.C_size, self.C_size, len(self.primitives)]
# number of weights total
self.weight_count = np.prod(self.arch_weights_shape)
# number of weights in a softmax call
self.softmax_weight_count = np.prod(self.arch_weights_shape[2:])
# minimum score for a layer to continue being trained
self.min_score = float(1 / (self.softmax_weight_count * self.softmax_weight_count))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(final_linear_filters, num_classes)
self.G.add_node("global_pooling")
self.G.add_edge("add-SharpSep", "global_pooling")
self.G.add_node("Linear")
self.G.nodes["Linear"]['demand'] = 1
self.G.add_edge("global_pooling", "Linear")
self.G["global_pooling"]["Linear"]["weight"] = 800
print("Saving graph...")
nx.write_gpickle(self.G, "network_test.graph")
if not self._visualization:
self._initialize_alphas(genotype)
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input_batch):
self.C_size = len(self.Cs)
s0s = [[], [None] * self.C_size, [None] * self.C_size]
for i, C_in in enumerate(self.Cs):
# Make the set of features with different numbers of channels.
s0s[0] += [self.stem[i](input_batch)]
# calculate weights, there are two weight views according to stride
weight_views = []
if not self._visualization:
for stride_idx in self.strides:
# ops are stored as layer, stride, cin, cout, num_layer_types
# while weights are ordered stride_index, layer, cout, num_layer_types
# first exclude the stride_idx because we already know that
weight_views += [self.arch_weights(stride_idx)]
# Duplicate s0s to account for 2 different strides
# s0s += [[]]
# s1s = [None] * layers + 1
# computing capacity during model eval
if self.training is False:
# time_between_layers = AverageMeter()
end_time = time.time()
for layer in range(self._layers):
# layer is how many times we've called everything, i.e. the number of "layers"
# this is different from the number of layer types which is len([SharpSepConv, ResizablePool]) == 2
layer_st_time = time.time()
for stride_idx in self.strides:
stride = 1 + stride_idx
# we don't pass the gradient along max_w because it is the weight for a different operation.
# TODO(ahundt) is there a better way to create this variable without gradients & reallocating repeatedly?
# max_w = torch.Variable(torch.max(weight_views[stride_idx][layer, :, :, :]), requires_grad=False).cuda()
# find the maximum comparable weight, copy it and make sure we don't pass gradients along that path
if not self._visualization and self._weighting_algorithm is not None and self._weighting_algorithm == 'max_w':
max_w = torch.max(weight_views[stride_idx][layer, :, :, :])
for C_out_idx, C_out in enumerate(self.Cs):
# take all the layers with the same output so we can sum them
# print('forward layer: ' + str(layer) + ' stride: ' + str(stride) + ' c_out: ' + str(self.Cs[C_out_idx]))
out_node = 'layer_'+str(layer)+'_add_'+'c_out_'+str(C_out)+'_stride_' + str(stride_idx+1)
c_outs = []
# compute average time when validating model.
if self.training is False:
self.time_between_layers.update(time.time() - end_time)
time_in_layers = AverageMeter()
for C_in_idx, C_in in enumerate(self.Cs):
for primitive_idx, primitive in enumerate(self.primitives):
if self.training is False:
op_st_time = time.time()
# get the specific weight for this op
name = 'layer_' + str(layer) + '_stride_' + str(stride_idx+1) + '_c_in_' + str(C_in) + '_c_out_' + str(C_out) + '_op_type_' + str(primitive) + '_opid_' + str(primitive_idx)
if not self._visualization:
w = weight_views[stride_idx][layer, C_in_idx, C_out_idx, primitive_idx]
# self.G.add_edge(name, out_node, {weight: w})
self.G[name][out_node]["weight"] = float(w.clone().cpu().detach().numpy())
self.G[name][out_node]["weight_int"] = int(float(w.clone().cpu().detach().numpy()) * 1e+5)
# print('w weight_views[stride_idx][layer, C_in_idx, C_out_idx, op_type_idx]: ' + str(w))
# apply the operation then weight, equivalent to
# w * op(input_feature_map)
# TODO(ahundt) fix conditionally evaluating calls with high ratings, there is currently a bug
if self._always_apply_ops or w > self.min_score:
# only apply an op if weight score isn't too low: w > 1/(N*N)
# x = 1 - max_w + w so that max_w gets a score of 1 and everything else gets a lower score accordingly.
s = s0s[stride_idx][C_in_idx]
if s is not None:
if not self._visualization:
if self._weighting_algorithm is None or self._weighting_algorithm == 'scalar':
x = w * self.op_grid[layer][stride_idx][C_in_idx][C_out_idx][primitive_idx](s)
elif self._weighting_algorithm == 'max_w':
# print(name)
# print(s.size())
x = (1. - max_w + w) * self.op_grid[layer][stride_idx][C_in_idx][C_out_idx][primitive_idx](s)
# self.G[name][out_node]["weight"] = (1. - max_w + w)
else:
raise ValueError(
'MultiChannelNetwork.forward(): Unsupported weighting algorithm: ' +
str(self._weighting_algorithm) + ' try "scalar" or "max_w"')
else:
# doing visualization, skip the weights
x = self.op_grid[layer][stride_idx][C_in_idx][C_out_idx][primitive_idx](s)
c_outs += [x]
# compute average time when validating model.
if self.training is False:
time_in_layers.update(time.time() - op_st_time)
self.G[name][out_node]["capacity"] = time_in_layers.avg + self.time_between_layers.avg
end_time = time.time()
# only apply updates to layers of sufficient quality
if c_outs:
# print('combining c_outs forward layer: ' + str(layer) + ' stride: ' + str(stride) + ' c_out: ' + str(self.Cs[C_out_idx]) + ' c_in: ' + str(self.Cs[C_in_idx]) + ' op type: ' + str(op_type_idx))
# combined values with the same c_out dimension
combined = sum(c_outs)
if s0s[stride][C_out_idx] is None:
# first call sets the value
s0s[stride][C_out_idx] = combined
else:
s0s[stride][C_out_idx] += combined
# downscale reduced input as next output
s0s = [s0s[stride], [None] * self.C_size, [None] * self.C_size]
# combine results
# use SharpSepConv to match dimension of final linear layer
# then add up all remaining outputs and pool the result
out = self.global_pooling(sum(op(x) for op, x in zip(self.base, s0s[0]) if x is not None))
# outs = []
# print('len s0s[0]: ' + str(len(s0s[0])))
# for i, op in enumerate(self.base):
# x = s0s[0][i]
# if x is not None:
# outs += [op()]
# out = sum(outs)
# out = self.global_pooling(out)
logits = self.classifier(out.view(out.size(0),-1))
# print('logits')
#print("Optimal_path_forward", nx.algorithms.dag.dag_longest_path(self.G))
#print("Top down greedy", self.gen_greedy_path(self.G,"top_down"))
#print("Bottom up greedy",self.gen_greedy_path(self.G,"bottom_up"))
return logits
def gen_greedy_path(self, G, strategy="top_down"):
if strategy == "top_down":
start_ = "Source"
current_node = "Source"
end_node = "Linear"
new_G = G
elif strategy == "bottom_up":
start_ = "Linear"
current_node = "Linear"
end_node = "Source"
new_G = G.reverse(copy=True)
wt = 0
node_list = []
while current_node != end_node:
neighbors = [n for n in new_G.neighbors(start_)]
for nodes in neighbors:
weight_ = new_G.get_edge_data(start_, nodes, "weight")
# print(weight_)
if len(weight_):
weight_ = weight_["weight"]
else:
weight_ = 0
# print(weight_)
if weight_ > wt:
wt = weight_
current_node = nodes
node_list.append(current_node)
# print("start",start_)
# print(node)
start_ = current_node
wt = -1
# print(node_list)
if strategy == "bottom_up":
node_list = node_list[::-1]
node_list.append("Linear")
return node_list
def arch_weights(self, stride_idx):
# ops are stored as layer, stride, cin, cout, num_layer_types
# while weights are ordered stride_index, layer, cin, cout, num_layer_types
# first exclude the stride_idx because we already know that
view_shape = self.arch_weights_shape[1:]
# print('arch_weights() view_shape self.weights_shape[1:]: ' + str(view_shape))
# softmax of weights should occur once for each layer
num_layers = self.arch_weights_shape[1]
weights_softmax_view = self._arch_parameters[stride_idx].view(num_layers, -1)
# apply softmax and convert to an indexable view
weights = F.softmax(weights_softmax_view, dim=-1).view(view_shape)
return weights
def _loss(self, input_batch, target):
logits = self(input_batch)
return self._criterion(logits, target)
def _initialize_alphas(self, genotype=None):
if genotype is None or genotype[-1] == 'longest_path':
init_alpha = 1e-3*torch.randn(self.arch_weights_shape)
else:
print("_initialize_alphas with preconfigured weights", genotype[0][0][0][0])
init_alpha = []
init_alpha.append(genotype[0])
init_alpha.append(genotype[2])
init_alpha = torch.from_numpy(np.array(init_alpha)).float()
if torch.cuda.is_available():
self._arch_parameters = Variable(init_alpha.cuda(), requires_grad=True)
else:
self._arch_parameters = Variable(init_alpha, requires_grad=True)
def arch_parameters(self):
''' Get list of architecture parameters
'''
return [self._arch_parameters]
def genotype(self, layout='raw_weights'):
"""
layout options: raw_weights, longest_path, graph
"""
if layout == 'raw_weights':
# TODO(ahundt) switch from raw weights to a simpler representation for genotype?
gene_normal = np.array(self.arch_weights(0).data.cpu().numpy()).tolist()
gene_reduce = np.array(self.arch_weights(1).data.cpu().numpy()).tolist()
elif layout == 'longest_path':
# TODO(ahundt) make into a list of the layer strings to be included.
gene_normal = nx.algorithms.dag.dag_longest_path(self.G)
gene_reduce = []
elif layout == 'graph':
data = json_graph.node_link_data(self.G)
gene_normal = [json.dumps(data)]
gene_reduce = []
else:
raise ValueError('unsupported layout: ' + str(layout))
genotype = Genotype(
normal=gene_normal, normal_concat=[],
reduce=gene_reduce, reduce_concat=[],
layout=layout
)
return genotype
| 27,726 | 42.188474 | 218 | py |
sharpDARTS | sharpDARTS-master/cnn/train_search.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import copy
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import networkx as nx
from torch.autograd import Variable
import model_search
from architect import Architect
from PIL import Image
import random
from tqdm import tqdm
# import dataset
# from Padam import Padam
import json
# from learning_rate_schedulers import CosineWithRestarts
import operations
import genotypes
import dataset
from cosine_power_annealing import cosine_power_annealing
import matplotlib.pyplot as plt
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydotplus
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("Needs PyGraphviz or PyDotPlus to generate graph visualization")
parser = argparse.ArgumentParser("Common Argument Parser")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='cifar10',
help='which dataset: cifar10, mnist, emnist, fashion, svhn, stl10, devanagari')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=1e-4, help='min learning rate')
parser.add_argument('--lr_power_annealing_exponent_order', type=float, default=2,
help='Cosine Power Annealing Schedule Base, larger numbers make '
'the exponential more dominant, smaller make cosine more dominant, '
'1 returns to standard cosine annealing.')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--start_epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful for restarts)')
parser.add_argument('--warmup_epochs', type=int, default=5, help='num of warmup training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--mid_channels', type=int, default=32, help='C_mid channels in choke SharpSepConv')
parser.add_argument('--layers_of_cells', type=int, default=8, help='total number of cells in the whole network, default is 8 cells')
parser.add_argument('--layers_in_cells', type=int, default=4,
help='Total number of nodes in each cell, aka number of steps,'
' default is 4 nodes, which implies 8 ops')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--autoaugment', action='store_true', default=False, help='use cifar10 autoaugment https://arxiv.org/abs/1805.09501')
parser.add_argument('--random_eraser', action='store_true', default=False, help='use random eraser')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--no_architect', action='store_true', default=False, help='directly train genotype parameters, disable architect.')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--multi_channel', action='store_true', default=False, help='perform multi channel search, a completely separate search space')
parser.add_argument('--ops', type=str, default='OPS', help='which operations to use, options are OPS, DARTS_OPS and MULTICHANNELNET_OPS')
parser.add_argument('--primitives', type=str, default='PRIMITIVES',
help='which primitive layers to use inside a cell search space,'
' options are PRIMITIVES, DARTS_PRIMITIVES AND MULTICHANNELNET_PRIMITIVES')
parser.add_argument('-e', '--evaluate', dest='evaluate', type=str, metavar='PATH', default='',
help='evaluate model at specified path on training, test, and validation datasets')
parser.add_argument('--load', type=str, default='', metavar='PATH', help='load weights at specified location')
parser.add_argument('--load_args', type=str, default='', metavar='PATH',
help='load command line args from a json file, this will override '
'all currently set args except for --evaluate, and arguments '
'that did not exist when the json file was originally saved out.')
parser.add_argument('--weighting_algorithm', type=str, default='scalar',
help='which operations to use, options are '
'"max_w" (1. - max_w + w) * op, and scalar (w * op)')
# TODO(ahundt) remove final path and switch back to genotype
parser.add_argument('--final_path', type=str, default=None, help='path for final model')
parser.add_argument('--load_genotype', type=str, default=None, help='Name of genotype to be used')
args = parser.parse_args()
args.arch = args.primitives + '-' + args.ops
# TODO(ahundt) enable --dataset flag, merge code from mixed_aux branch
args = utils.initialize_files_and_args(args, run_type='search')
logger = utils.logging_setup(args.log_file_path)
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logger.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logger.info('gpu device = %d' % args.gpu)
logger.info("args = %s", args)
# # load the correct ops dictionary
op_dict_to_load = "operations.%s" % args.ops
logger.info('loading op dict: ' + str(op_dict_to_load))
op_dict = eval(op_dict_to_load)
# load the correct primitives list
primitives_to_load = "genotypes.%s" % args.primitives
logger.info('loading primitives:' + primitives_to_load)
primitives = eval(primitives_to_load)
logger.info('primitives: ' + str(primitives))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
if args.multi_channel:
final_path = None
if args.final_path is not None:
final_path = np.load(args.final_path)
genotype = None
if args.load_genotype is not None:
genotype = getattr(genotypes, args.load_genotype)
cnn_model = model_search.MultiChannelNetwork(
args.init_channels, CIFAR_CLASSES, layers=args.layers_of_cells, criterion=criterion, steps=args.layers_in_cells, primitives=primitives,
op_dict=op_dict, weighting_algorithm=args.weighting_algorithm, genotype=genotype)
#save_graph(cnn_model.G, os.path.join(args.save, 'network_graph.pdf'))
if args.load_genotype is not None:
# TODO(ahundt) support other batch shapes
data_shape = [1, 3, 32, 32]
batch = torch.zeros(data_shape)
cnn_model(batch)
logger.info("loaded genotype_raw_weights = " + str(cnn_model.genotype('raw_weights')))
logger.info("loaded genotype_longest_path = " + str(cnn_model.genotype('longest_path')))
logger.info("loaded genotype greedy_path = " + str(gen_greedy_path(cnn_model.G, strategy="top_down")))
logger.info("loaded genotype greedy_path_bottom_up = " + str(gen_greedy_path(cnn_model.G, strategy="bottom_up")))
# TODO(ahundt) support other layouts
else:
cnn_model = model_search.Network(
args.init_channels, CIFAR_CLASSES, layers=args.layers_of_cells, criterion=criterion, steps=args.layers_in_cells,
primitives=primitives, op_dict=op_dict, weights_are_parameters=args.no_architect, C_mid=args.mid_channels,
weighting_algorithm=args.weighting_algorithm)
cnn_model = cnn_model.cuda()
logger.info("param size = %fMB", utils.count_parameters_in_MB(cnn_model))
if args.load:
logger.info('loading weights from: ' + args.load)
utils.load(cnn_model, args.load)
optimizer = torch.optim.SGD(
cnn_model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Get preprocessing functions (i.e. transforms) to apply on data
train_transform, valid_transform = utils.get_data_transforms(args)
# Get the training queue, select training and validation from training set
train_queue, valid_queue = dataset.get_training_queues(
args.dataset, train_transform, valid_transform, args.data, args.batch_size, args.train_portion,
search_architecture=True)
lr_schedule = cosine_power_annealing(
epochs=args.epochs, max_lr=args.learning_rate, min_lr=args.learning_rate_min,
warmup_epochs=args.warmup_epochs, exponent_order=args.lr_power_annealing_exponent_order)
epochs = np.arange(args.epochs) + args.start_epoch
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
# optimizer, float(args.epochs), eta_min=args.learning_rate_min)
if args.no_architect:
architect = None
else:
architect = Architect(cnn_model, args)
epoch_stats = []
stats_csv = args.epoch_stats_file
stats_csv = stats_csv.replace('.json', '.csv')
with tqdm(epochs, dynamic_ncols=True) as prog_epoch:
best_valid_acc = 0.0
best_epoch = 0
# state_dict = {}
# og_state_keys = set()
# updated_state_keys = set()
#saving state_dict for debugging weights by comparison
# for key in cnn_model.state_dict():
# state_dict[key] = cnn_model.state_dict()[key].clone()
# # logger.info('layer = {}'.format(key))
# logger.info('Total keys in state_dict = {}'.format(len(cnn_model.state_dict().keys())))
# og_state_keys.update(cnn_model.state_dict().keys())
best_stats = {}
weights_file = os.path.join(args.save, 'weights.pt')
for epoch, learning_rate in zip(prog_epoch, lr_schedule):
# scheduler.step()
# lr = scheduler.get_lr()[0]
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
genotype = None
if args.final_path is None:
genotype = cnn_model.genotype()
logger.info('genotype = %s', genotype)
if not args.multi_channel:
# the genotype is the alphas in the multi-channel case
# print the alphas in other cases
logger.info('alphas_normal = %s', cnn_model.arch_weights(0))
logger.info('alphas_reduce = %s', cnn_model.arch_weights(1))
# training
train_acc, train_obj = train(train_queue, valid_queue, cnn_model, architect, criterion, optimizer, learning_rate)
if args.multi_channel and args.final_path is None:
# TODO(ahundt) remove final path and switch back to genotype, and save out raw weights plus optimal path
optimal_path = nx.algorithms.dag.dag_longest_path(cnn_model.G)
optimal_path_filename = os.path.join(args.save, 'longest_path_layer_sequence.npy')
logger.info('Saving model layer sequence object: ' + str(optimal_path_filename))
np.save(optimal_path_filename, optimal_path)
graph_filename = os.path.join(args.save, 'network_graph_' + str(epoch) + '.graph')
logger.info('Saving updated weight graph: ' + str(graph_filename))
nx.write_gpickle(cnn_model.G, graph_filename)
logger.info('optimal_path : %s', optimal_path)
# validation
valid_acc, valid_obj = infer(valid_queue, cnn_model, criterion)
if valid_acc > best_valid_acc:
# new best epoch, save weights
utils.save(cnn_model, weights_file)
if args.multi_channel:
graph_filename = os.path.join(args.save, 'network_graph_best_valid' + str(epoch) + '.graph')
logger.info('Saving updated weight graph: ' + str(graph_filename))
best_epoch = epoch
best_valid_acc = valid_acc
prog_epoch.set_description(
'Overview ***** best_epoch: {0} best_valid_acc: {1:.2f} ***** Progress'
.format(best_epoch, best_valid_acc))
logger.info('epoch, %d, train_acc, %f, valid_acc, %f, train_loss, %f, valid_loss, %f, lr, %e, best_epoch, %d, best_valid_acc, %f',
epoch, train_acc, valid_acc, train_obj, valid_obj, learning_rate, best_epoch, best_valid_acc)
stats = {
'epoch': epoch,
'train_acc': train_acc,
'valid_acc': valid_acc,
'train_loss': train_obj,
'valid_loss': valid_obj,
'lr': learning_rate,
'best_epoch': best_epoch,
'best_valid_acc': best_valid_acc,
'genotype': str(genotype),
'arch_weights': str(cnn_model.arch_weights)}
epoch_stats += [copy.deepcopy(stats)]
with open(args.epoch_stats_file, 'w') as f:
json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
# print the final model
if args.final_path is None:
genotype = cnn_model.genotype()
logger.info('genotype = %s', genotype)
logger.info('Search for Model Complete! Save dir: ' + str(args.save))
def train(train_queue, valid_queue, cnn_model, architect, criterion, optimizer, lr):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
progbar = tqdm(train_queue, dynamic_ncols=True)
for step, (input_batch, target) in enumerate(progbar):
cnn_model.train()
n = input_batch.size(0)
input_batch = Variable(input_batch, requires_grad=False).cuda()
target = Variable(target, requires_grad=False).cuda(non_blocking=True)
# get a random minibatch from the search queue with replacement
input_search, target_search = next(iter(valid_queue))
input_search = Variable(input_search, requires_grad=False).cuda(non_blocking=True)
target_search = Variable(target_search, requires_grad=False).cuda(non_blocking=True)
cnn_model.time_between_layers.reset()
# define validation loss for analyzing the importance of hyperparameters
if architect is not None:
val_loss = architect.step(input_batch, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
optimizer.zero_grad()
logits = cnn_model(input_batch)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm(cnn_model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
progbar.set_description('Training loss: {0:9.5f}, top 1: {1:5.2f}, top 5: {2:5.2f} progress'.format(objs.avg, top1.avg, top5.avg))
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
with torch.no_grad():
progbar = tqdm(valid_queue, dynamic_ncols=True)
for step, (input_batch, target) in enumerate(progbar):
input_batch = Variable(input_batch).cuda(non_blocking=True)
target = Variable(target).cuda(non_blocking=True)
logits = model(input_batch)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input_batch.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
progbar.set_description('Search Validation step: {0}, loss: {1:9.5f}, top 1: {2:5.2f} top 5: {3:5.2f} progress'.format(step, objs.avg, top1.avg, top5.avg))
return top1.avg, objs.avg
def save_graph(G, file_name):
pos=graphviz_layout(G, prog='dot')
plt.figure(figsize=(160, 180))
nx.draw_networkx_nodes(G, pos, node_shape="s",nodelist=G.nodes(),node_size=1000, linewidths=0.1, vmin=0, vmax=1, alpha=1)
nx.draw_networkx_edges(G, pos, edgelist=G.edges(),width=1, edge_color="black", alpha=0.8)
nx.draw_networkx_labels(G, pos, font_size=8, font_family='sans-serif')
# figure(num=1, figsize=(100, 80), dpi=1000, facecolor='w', edgecolor='k')
# plt.figure(1,figsize=(1200,1200))
plt.axis('off')
plt.tight_layout()
plt.savefig(file_name)
def gen_greedy_path(G, strategy="top_down"):
if strategy == "top_down":
start_ = "Source"
current_node = "Source"
end_node = "Linear"
new_G = G
elif strategy == "bottom_up":
start_ = "Linear"
current_node = "Linear"
end_node = "Source"
new_G = G.reverse(copy=True)
wt = 0
node_list = []
while current_node != end_node:
neighbors = [n for n in new_G.neighbors(start_)]
for nodes in neighbors:
weight_ = new_G.get_edge_data(start_, nodes, "weight")
# print(weight_)
if len(weight_):
weight_ = weight_["weight"]
else:
weight_ = 0
# print(weight_)
if weight_ > wt:
wt = weight_
current_node = nodes
node_list.append(current_node)
# print("start",start_)
# print(node)
start_ = current_node
wt = -1
# print(node_list)
if strategy == "bottom_up":
node_list = node_list[::-1]
node_list.append("Linear")
return node_list
if __name__ == '__main__':
main()
| 18,217 | 43.434146 | 161 | py |
sharpDARTS | sharpDARTS-master/cnn/flops_counter.py | # source: https://github.com/sovrasov/flops-counter.pytorch
# license: MIT
import torch.nn as nn
import torch
import numpy as np
def flops_to_string(flops):
if flops // 10**9 > 0:
return str(round(flops / 10.**9, 2)) + 'GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, 2)) + 'MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, 2)) + 'KMac'
return str(flops) + 'Mac'
def get_model_parameters_number(model, as_string=True):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
if not as_string:
return params_num
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + 'M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + 'k'
return str(params_num)
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding variables necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
# ---- Internal functions
def is_supported_instance(module):
if isinstance(module, (torch.nn.Conv2d, torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
torch.nn.LeakyReLU, torch.nn.ReLU6, torch.nn.Linear, \
torch.nn.MaxPool2d, torch.nn.AvgPool2d, torch.nn.BatchNorm2d, \
torch.nn.Upsample, nn.AdaptiveMaxPool2d, nn.AdaptiveAvgPool2d)):
return True
return False
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += output_elements_count
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += active_elements_count
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += batch_size * input.shape[1] * output.shape[1]
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += np.prod(input.shape)
def bn_flops_counter_hook(module, input, output):
module.affine
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += batch_flops
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_height, output_width = output.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = kernel_height * kernel_width * in_channels * filters_per_channel
active_elements_count = batch_size * output_height * output_width
if conv_module.__mask__ is not None:
# (b, 1, h, w)
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += overall_flops
def batch_counter_hook(module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, torch.nn.Conv2d):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
torch.nn.LeakyReLU, torch.nn.ReLU6)):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, torch.nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, \
nn.AdaptiveAvgPool2d)):
handle = module.register_forward_hook(pool_flops_counter_hook)
elif isinstance(module, torch.nn.BatchNorm2d):
handle = module.register_forward_hook(bn_flops_counter_hook)
elif isinstance(module, torch.nn.Upsample):
handle = module.register_forward_hook(upsample_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# --- Masked flops counting
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
| 8,348 | 30.988506 | 100 | py |
sharpDARTS | sharpDARTS-master/cnn/test_imagenet.py | import os
import sys
import numpy as np
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CLASSES = 1000
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
model.load_state_dict(torch.load(args.model_path)['state_dict'])
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
model.drop_path_prob = args.drop_path_prob
valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 3,791 | 32.557522 | 104 | py |
sharpDARTS | sharpDARTS-master/cnn/train.py | import os
import sys
import time
import glob
import json
import copy
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR
from model import NetworkImageNet
from tqdm import tqdm
import genotypes
import operations
import cifar10_1
import dataset
import flops_counter
from cosine_power_annealing import cosine_power_annealing
from model import MultiChannelNetworkModel
def main():
parser = argparse.ArgumentParser("Common Argument Parser")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='cifar10', help='which dataset:\
cifar10, mnist, emnist, fashion, svhn, stl10, devanagari')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=1e-8, help='min learning rate')
parser.add_argument('--lr_power_annealing_exponent_order', type=float, default=2,
help='Cosine Power Annealing Schedule Base, larger numbers make '
'the exponential more dominant, smaller make cosine more dominant, '
'1 returns to standard cosine annealing.')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', '--wd', dest='weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--partial', default=1/8, type=float, help='partially adaptive parameter p in Padam')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=2000, help='num of training epochs')
parser.add_argument('--start_epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful for restarts)')
parser.add_argument('--warmup_epochs', type=int, default=5, help='num of warmup training epochs')
parser.add_argument('--warm_restarts', type=int, default=20, help='warm restarts of cosine annealing')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--mid_channels', type=int, default=32, help='C_mid channels in choke SharpSepConv')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--mixed_auxiliary', action='store_true', default=False, help='Learn weights for auxiliary networks during training. Overrides auxiliary flag')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--autoaugment', action='store_true', default=False, help='use cifar10 autoaugment https://arxiv.org/abs/1805.09501')
parser.add_argument('--random_eraser', action='store_true', default=False, help='use random eraser')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--ops', type=str, default='OPS', help='which operations to use, options are OPS and DARTS_OPS')
parser.add_argument('--primitives', type=str, default='PRIMITIVES',
help='which primitive layers to use inside a cell search space,'
' options are PRIMITIVES, SHARPER_PRIMITIVES, and DARTS_PRIMITIVES')
parser.add_argument('--optimizer', type=str, default='sgd', help='which optimizer to use, options are padam and sgd')
parser.add_argument('--load', type=str, default='', metavar='PATH', help='load weights at specified location')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--flops', action='store_true', default=False, help='count flops and exit, aka floating point operations.')
parser.add_argument('-e', '--evaluate', dest='evaluate', type=str, metavar='PATH', default='',
help='evaluate model at specified path on training, test, and validation datasets')
parser.add_argument('--multi_channel', action='store_true', default=False, help='perform multi channel search, a completely separate search space')
parser.add_argument('--load_args', type=str, default='', metavar='PATH',
help='load command line args from a json file, this will override '
'all currently set args except for --evaluate, and arguments '
'that did not exist when the json file was originally saved out.')
parser.add_argument('--layers_of_cells', type=int, default=8, help='total number of cells in the whole network, default is 8 cells')
parser.add_argument('--layers_in_cells', type=int, default=4,
help='Total number of nodes in each cell, aka number of steps,'
' default is 4 nodes, which implies 8 ops')
parser.add_argument('--weighting_algorithm', type=str, default='scalar',
help='which operations to use, options are '
'"max_w" (1. - max_w + w) * op, and scalar (w * op)')
# TODO(ahundt) remove final path and switch back to genotype
parser.add_argument('--load_genotype', type=str, default=None, help='Name of genotype to be used')
parser.add_argument('--simple_path', default=True, action='store_false', help='Final model is a simple path (MultiChannelNetworkModel)')
args = parser.parse_args()
args = utils.initialize_files_and_args(args)
logger = utils.logging_setup(args.log_file_path)
if not torch.cuda.is_available():
logger.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logger.info('gpu device = %d' % args.gpu)
logger.info("args = %s", args)
DATASET_CLASSES = dataset.class_dict[args.dataset]
DATASET_CHANNELS = dataset.inp_channel_dict[args.dataset]
DATASET_MEAN = dataset.mean_dict[args.dataset]
DATASET_STD = dataset.std_dict[args.dataset]
logger.info('output channels: ' + str(DATASET_CLASSES))
# # load the correct ops dictionary
op_dict_to_load = "operations.%s" % args.ops
logger.info('loading op dict: ' + str(op_dict_to_load))
op_dict = eval(op_dict_to_load)
# load the correct primitives list
primitives_to_load = "genotypes.%s" % args.primitives
logger.info('loading primitives:' + primitives_to_load)
primitives = eval(primitives_to_load)
logger.info('primitives: ' + str(primitives))
genotype = eval("genotypes.%s" % args.arch)
# create the neural network
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
if args.multi_channel:
final_path = None
if args.load_genotype is not None:
genotype = getattr(genotypes, args.load_genotype)
print(genotype)
if type(genotype[0]) is str:
logger.info('Path :%s', genotype)
# TODO(ahundt) remove final path and switch back to genotype
cnn_model = MultiChannelNetwork(
args.init_channels, DATASET_CLASSES, layers=args.layers_of_cells, criterion=criterion, steps=args.layers_in_cells,
weighting_algorithm=args.weighting_algorithm, genotype=genotype)
flops_shape = [1, 3, 32, 32]
elif args.dataset == 'imagenet':
cnn_model = NetworkImageNet(args.init_channels, DATASET_CLASSES, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
flops_shape = [1, 3, 224, 224]
else:
cnn_model = NetworkCIFAR(args.init_channels, DATASET_CLASSES, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
flops_shape = [1, 3, 32, 32]
cnn_model = cnn_model.cuda()
logger.info("param size = %fMB", utils.count_parameters_in_MB(cnn_model))
if args.flops:
logger.info('flops_shape = ' + str(flops_shape))
logger.info("flops = " + utils.count_model_flops(cnn_model, data_shape=flops_shape))
return
optimizer = torch.optim.SGD(
cnn_model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
# Get preprocessing functions (i.e. transforms) to apply on data
train_transform, valid_transform = utils.get_data_transforms(args)
if args.evaluate:
# evaluate the train dataset without augmentation
train_transform = valid_transform
# Get the training queue, use full training and test set
train_queue, valid_queue = dataset.get_training_queues(
args.dataset, train_transform, valid_transform, args.data, args.batch_size, train_proportion=1.0, search_architecture=False)
test_queue = None
if args.dataset == 'cifar10':
# evaluate best model weights on cifar 10.1
# https://github.com/modestyachts/CIFAR-10.1
test_data = cifar10_1.CIFAR10_1(root=args.data, download=True, transform=valid_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8)
if args.evaluate:
# evaluate the loaded model, print the result, and return
logger.info("Evaluating inference with weights file: " + args.load)
eval_stats = evaluate(
args, cnn_model, criterion, args.load,
train_queue=train_queue, valid_queue=valid_queue, test_queue=test_queue)
with open(args.stats_file, 'w') as f:
arg_dict = vars(args)
arg_dict.update(eval_stats)
json.dump(arg_dict, f)
logger.info("flops = " + utils.count_model_flops(cnn_model))
logger.info(utils.dict_to_log_string(eval_stats))
logger.info('\nEvaluation of Loaded Model Complete! Save dir: ' + str(args.save))
return
lr_schedule = cosine_power_annealing(
epochs=args.epochs, max_lr=args.learning_rate, min_lr=args.learning_rate_min,
warmup_epochs=args.warmup_epochs, exponent_order=args.lr_power_annealing_exponent_order)
epochs = np.arange(args.epochs) + args.start_epoch
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
epoch_stats = []
stats_csv = args.epoch_stats_file
stats_csv = stats_csv.replace('.json', '.csv')
with tqdm(epochs, dynamic_ncols=True) as prog_epoch:
best_valid_acc = 0.0
best_epoch = 0
best_stats = {}
stats = {}
epoch_stats = []
weights_file = os.path.join(args.save, 'weights.pt')
for epoch, learning_rate in zip(prog_epoch, lr_schedule):
# update the drop_path_prob augmentation
cnn_model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
# update the learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
# scheduler.get_lr()[0]
train_acc, train_obj = train(args, train_queue, cnn_model, criterion, optimizer)
val_stats = infer(args, valid_queue, cnn_model, criterion)
stats.update(val_stats)
stats['train_acc'] = train_acc
stats['train_loss'] = train_obj
stats['lr'] = learning_rate
stats['epoch'] = epoch
if stats['valid_acc'] > best_valid_acc:
# new best epoch, save weights
utils.save(cnn_model, weights_file)
best_epoch = epoch
best_stats.update(copy.deepcopy(stats))
best_valid_acc = stats['valid_acc']
best_train_loss = train_obj
best_train_acc = train_acc
# else:
# # not best epoch, load best weights
# utils.load(cnn_model, weights_file)
logger.info('epoch, %d, train_acc, %f, valid_acc, %f, train_loss, %f, valid_loss, %f, lr, %e, best_epoch, %d, best_valid_acc, %f, ' + utils.dict_to_log_string(stats),
epoch, train_acc, stats['valid_acc'], train_obj, stats['valid_loss'], learning_rate, best_epoch, best_valid_acc)
stats['train_acc'] = train_acc
stats['train_loss'] = train_obj
epoch_stats += [copy.deepcopy(stats)]
with open(args.epoch_stats_file, 'w') as f:
json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
# get stats from best epoch including cifar10.1
eval_stats = evaluate(args, cnn_model, criterion, weights_file, train_queue, valid_queue, test_queue)
with open(args.stats_file, 'w') as f:
arg_dict = vars(args)
arg_dict.update(eval_stats)
json.dump(arg_dict, f, cls=utils.NumpyEncoder)
with open(args.epoch_stats_file, 'w') as f:
json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
logger.info(utils.dict_to_log_string(eval_stats))
logger.info('Training of Final Model Complete! Save dir: ' + str(args.save))
def evaluate(args, cnn_model, criterion, weights_file=None, train_queue=None, valid_queue=None, test_queue=None, prefix='best_'):
# load the best model weights
if weights_file is not None:
utils.load(cnn_model, weights_file)
test_prefix = 'test_'
if args.dataset == 'cifar10':
test_prefix = 'cifar10_1_test_'
queues = [train_queue, valid_queue, test_queue]
stats = {}
with tqdm(['train_', 'valid_', test_prefix], desc='Final Evaluation', dynamic_ncols=True) as prefix_progbar:
for dataset_prefix, queue in zip(prefix_progbar, queues):
if queue is not None:
stats.update(infer(args, queue, cnn_model, criterion=criterion, prefix=prefix + dataset_prefix, desc='Running ' + dataset_prefix + 'data'))
return stats
def train(args, train_queue, cnn_model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
cnn_model.train()
with tqdm(train_queue, dynamic_ncols=True) as progbar:
for step, (input_batch, target) in enumerate(progbar):
input_batch = Variable(input_batch).cuda(non_blocking=True)
target = Variable(target).cuda(non_blocking=True)
optimizer.zero_grad()
if args.auxiliary:
logits, logits_aux = cnn_model(input_batch)
else:
logits = cnn_model(input_batch)
logits_aux = None
loss = criterion(logits, target)
if logits_aux is not None and args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight * loss_aux
loss.backward()
nn.utils.clip_grad_norm_(cnn_model.parameters(), args.grad_clip)
# if cnn_model.auxs is not None:
# # clip the aux weights even more so they don't jump too quickly
# nn.utils.clip_grad_norm_(cnn_model.auxs.alphas, args.grad_clip/10)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input_batch.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
progbar.set_description('Training loss: {0:9.5f}, top 1: {1:5.2f}, top 5: {2:5.2f} progress'.format(objs.avg, top1.avg, top5.avg))
return top1.avg, objs.avg
def infer(args, valid_queue, cnn_model, criterion, prefix='valid_', desc='Running Validation'):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
cnn_model.eval()
with torch.no_grad():
# dynamic_ncols = false in this case because we want accurate timing stats
with tqdm(valid_queue, dynamic_ncols=False, desc=desc) as progbar:
for step, (input_batch, target) in enumerate(progbar):
input_batch = Variable(input_batch).cuda(non_blocking=True)
target = Variable(target).cuda(non_blocking=True)
if args.auxiliary:
logits, _ = cnn_model(input_batch)
else:
logits = cnn_model(input_batch)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input_batch.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
# description on each validation step is disabled for performance reasons
# progbar.set_description('Validation step: {0}, loss: {1:9.5f}, top 1: {2:5.2f} top 5: {3:5.2f} progress'.format(step, objs.avg, top1.avg, top5.avg))
# extract progbar timing stats from tqdm https://github.com/tqdm/tqdm/issues/660
stats = utils.tqdm_stats(progbar, prefix=prefix)
stats[prefix + 'acc'] = top1.avg
stats[prefix + 'loss'] = objs.avg
stats[prefix + 'top1'] = top1.avg
stats[prefix + 'top5'] = top5.avg
# return top1, avg loss, and timing stats string
return stats
if __name__ == '__main__':
main()
| 17,359 | 47.627451 | 172 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.