code stringlengths 17 6.64M |
|---|
class ClevrBatcher():
def __init__(self, batchSize, split, maxSamples=None, rand=True):
dat = h5py.File('data/preprocessed/clevr.h5', 'r')
self.questions = dat[(split + 'Questions')]
self.answers = dat[(split + 'Answers')]
self.programs = dat[(split + 'Programs')]
self.imgs = dat[(split + 'Imgs')]
self.pMask = dat[(split + 'ProgramMask')]
self.imgIdx = dat[(split + 'ImageIdx')]
self.batchSize = batchSize
if (maxSamples is not None):
self.m = maxSamples
else:
self.m = ((len(self.questions) // batchSize) * batchSize)
self.batches = (self.m // batchSize)
self.pos = 0
def next(self):
batchSize = self.batchSize
if ((self.pos + batchSize) > self.m):
self.pos = 0
imgIdx = self.imgIdx[self.pos:(self.pos + batchSize)]
uniqueIdx = np.unique(imgIdx).tolist()
mapTo = np.arange(len(uniqueIdx)).tolist()
mapDict = dict(zip(uniqueIdx, mapTo))
relIdx = [mapDict[x] for x in imgIdx]
imgs = self.imgs[np.unique(imgIdx).tolist()][relIdx]
questions = self.questions[self.pos:(self.pos + batchSize)]
answers = self.answers[self.pos:(self.pos + batchSize)]
programs = self.programs[self.pos:(self.pos + batchSize)]
pMask = self.pMask[self.pos:(self.pos + batchSize)]
self.pos += batchSize
return ([questions, imgs, imgIdx], [programs, answers], [pMask])
|
def buildVocab(fName):
dat = open(fName).read()
dat = dat.split()
vocab = dict(zip(dat, (1 + np.arange(len(dat)))))
invVocab = {v: k for (k, v) in vocab.items()}
return (vocab, invVocab)
|
def applyVocab(line, vocab):
ret = []
for e in line:
ret += [vocab[e]]
return np.asarray(ret)
|
def applyInvVocab(x, vocab):
x = applyVocab(x, utils.invertDict(vocab))
return ''.join(x)
|
def invertDict(x):
return {v: k for (k, v) in x.items()}
|
def loadDict(fName):
with open(fName) as f:
s = eval(f.read())
return s
|
def norm(x, n=2):
return ((np.sum((np.abs(x) ** n)) ** (1.0 / n)) / np.prod(x.shape))
|
class Perm():
def __init__(self, n):
self.inds = np.random.permutation(np.arange(n))
self.m = n
self.pos = 0
def next(self, n):
assert ((self.pos + n) < self.m)
ret = self.inds[self.pos:(self.pos + n)]
self.pos += n
return ret
|
class CMA():
def __init__(self):
self.t = 0.0
self.cma = 0.0
def update(self, x):
self.cma = ((x + (self.t * self.cma)) / (self.t + 1))
self.t += 1.0
|
class EDA():
def __init__(self, k=0.99):
self.k = k
self.eda = 0.0
def update(self, x):
self.eda = (((1 - self.k) * x) + (self.k * self.eda))
|
def modelSize(net):
params = 0
for e in net.parameters():
params += np.prod(e.size())
params = int((params / 1000))
print('Network has ', params, 'K params')
|
def Conv2d(fIn, fOut, k):
pad = int(((k - 1) / 2))
return nn.Conv2d(fIn, fOut, k, padding=pad)
|
def list(module, *args, n=1):
return nn.ModuleList([module(*args) for i in range(n)])
|
def var(xNp, volatile=False, cuda=False):
x = Variable(t.from_numpy(xNp), volatile=volatile)
if cuda:
x = x.cuda()
return x
|
def initWeights(net, scheme='orthogonal'):
print('Initializing weights. Warning: may overwrite sensitive bias parameters (e.g. batchnorm)')
for e in net.parameters():
if (scheme == 'orthogonal'):
if (len(e.size()) >= 2):
init.orthogonal(e)
elif (scheme == 'normal'):
init.normal(e, std=0.01)
elif (scheme == 'xavier'):
init.xavier_normal(e)
|
class SaveManager():
def __init__(self, root):
(self.tl, self.ta, self.vl, self.va) = ([], [], [], [])
self.root = root
self.stateDict = None
self.lock = False
def update(self, net, tl, ta, vl, va):
nan = np.isnan(sum([t.sum(e) for e in net.state_dict().values()]))
if (nan or self.lock):
self.lock = True
print('NaN in update. Locking. Call refresh() to reset')
return
if ((self.epoch() == 1) or (va > np.max(self.va))):
self.stateDict = net.state_dict().copy()
t.save(net.state_dict(), (self.root + 'weights'))
self.tl += [tl]
self.ta += [ta]
self.vl += [vl]
self.va += [va]
np.save((self.root + 'tl.npy'), self.tl)
np.save((self.root + 'ta.npy'), self.ta)
np.save((self.root + 'vl.npy'), self.vl)
np.save((self.root + 'va.npy'), self.va)
def load(self, net, raw=False):
stateDict = t.load((self.root + 'weights'))
self.stateDict = stateDict
if (not raw):
net.load_state_dict(stateDict)
self.tl = np.load((self.root + 'tl.npy')).tolist()
self.ta = np.load((self.root + 'ta.npy')).tolist()
self.vl = np.load((self.root + 'vl.npy')).tolist()
self.va = np.load((self.root + 'va.npy')).tolist()
def refresh(self, net):
self.lock = False
net.load_state_dict(self.stateDict)
def epoch(self):
return (len(self.tl) + 1)
|
def _sequence_mask(sequence_length, max_len=None):
if (max_len is None):
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = t.range(0, (max_len - 1)).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = sequence_length.unsqueeze(1).expand_as(seq_range_expand)
return (seq_range_expand < seq_length_expand)
|
def maskedCE(logits, target, length):
'\n Args:\n logits: A Variable containing a FloatTensor of size\n (batch, max_len, num_classes) which contains the\n unnormalized probability for each class.\n target: A Variable containing a LongTensor of size\n (batch, max_len) which contains the index of the true\n class for each corresponding step.\n length: A Variable containing a LongTensor of size (batch,)\n which contains the length of each data in a batch.\n\n Returns:\n loss: An average loss value masked by the length.\n '
logits_flat = logits.view((- 1), logits.size((- 1)))
log_probs_flat = F.log_softmax(logits_flat)
target_flat = target.view((- 1), 1)
losses_flat = (- t.gather(log_probs_flat, dim=1, index=target_flat))
losses = losses_flat.view(*target.size())
mask = _sequence_mask(sequence_length=length, max_len=target.size(1))
losses = (losses * mask.float())
loss = (losses.sum() / length.float().sum())
return loss
|
def runMinibatch(net, batcher, cuda=True, volatile=False, trainable=False):
(x, y, mask) = batcher.next()
x = [var(e, volatile=volatile, cuda=cuda) for e in x]
y = [var(e, volatile=volatile, cuda=cuda) for e in y]
if (mask is not None):
mask = var(mask, volatile=volatile, cuda=cuda)
if (len(x) == 1):
x = x[0]
if (len(y) == 1):
y = y[0]
a = net(x, trainable)
return (a, y, mask)
|
def runData(net, opt, batcher, criterion=maskedCE, trainable=False, verbose=False, cuda=True, gradClip=10.0, minContext=0, numPrints=10):
iters = batcher.batches
meanAcc = CMA()
meanLoss = CMA()
for i in range(iters):
try:
if (verbose and ((i % int((iters / numPrints))) == 0)):
sys.stdout.write('#')
sys.stdout.flush()
except:
pass
(a, y, mask) = runMinibatch(net, batcher, trainable=trainable, cuda=cuda, volatile=(not trainable))
(loss, acc) = stats(criterion, a, y, mask)
if trainable:
opt.zero_grad()
loss.backward()
if (gradClip is not None):
t.nn.utils.clip_grad_norm(net.parameters(), gradClip, norm_type=1)
opt.step()
meanLoss.update(loss.data[0])
meanAcc.update(acc)
return (meanLoss.cma, meanAcc.cma)
|
def stats(criterion, a, y, mask):
if (mask is not None):
(_, preds) = t.max(a.data, 2)
(batch, sLen, c) = a.size()
loss = criterion(a.view((- 1), c), y.view((- 1)))
m = t.sum(mask)
mask = _sequence_mask(mask, sLen)
acc = (t.sum((mask.data.float() * (y.data == preds).float())) / float(m.data[0]))
else:
(_, preds) = t.max(a.data, 1)
loss = criterion(a, y)
acc = t.mean((y.data == preds).float())
return (loss, acc)
|
class ExecutionEngine(nn.Module):
def __init__(self, numUnary, numBinary, numClasses):
super(ExecutionEngine, self).__init__()
self.arities = (((2 * [0]) + ([1] * numUnary)) + ([2] * numBinary))
unaries = [UnaryModule() for i in range(numUnary)]
binaries = [BinaryModule() for i in range(numBinary)]
self.cells = nn.ModuleList((((2 * [None]) + unaries) + binaries))
self.CNN = CNN()
self.classifier = EngineClassifier(numClasses)
def parallel(self, pInds, p, imgFeats):
progs = []
for i in range(len(pInds)):
piInds = pInds[i]
pi = p[i]
feats = imgFeats[i:(i + 1)]
prog = Program(piInds, pi, feats, self.arities)
prog.build()
progs += [prog]
exeQ = FasterExecutioner(progs, self.cells)
a = exeQ.execute()
return a
def sequential(self, p, imgFeats):
progs = []
a = []
execs = []
for i in range(len(p)):
pi = p[i]
feats = imgFeats[i:(i + 1)]
prog = Program(pi, feats, self.arities)
prog.build()
exeQ = Executioner(prog, self.cells)
a += [exeQ.execute()]
execs += [exeQ]
a = t.cat(a, 0)
return a
def forward(self, x, fast=True):
(pInds, p, img) = x
a = []
imgFeats = self.CNN(img)
pInds = pInds.data.cpu().numpy().tolist()
if fast:
a = self.parallel(pInds, p, imgFeats)
else:
a = self.sequential(p, imgFeats)
a = self.classifier(a)
return a
|
class EngineClassifier(nn.Module):
def __init__(self, numClasses):
super(EngineClassifier, self).__init__()
self.conv1 = utils.Conv2d(128, 512, 1)
self.fc1 = nn.Linear(((512 * 7) * 7), 1024)
self.pool = nn.MaxPool2d(2)
self.fc2 = nn.Linear(1024, numClasses)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = x.view(x.size()[0], (- 1))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = utils.Conv2d(1024, 128, 3)
self.conv2 = utils.Conv2d(128, 128, 3)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
return x
|
class UnaryModule(nn.Module):
def __init__(self):
super(UnaryModule, self).__init__()
self.conv1 = utils.Conv2d(128, 128, 3)
self.conv2 = utils.Conv2d(128, 128, 3)
def forward(self, x):
inp = x
x = F.relu(self.conv1(x))
x = self.conv2(x)
x += inp
x = F.relu(x)
return x
|
class BinaryModule(nn.Module):
def __init__(self):
super(BinaryModule, self).__init__()
self.conv1 = utils.Conv2d(256, 128, 1)
self.conv2 = utils.Conv2d(128, 128, 3)
self.conv3 = utils.Conv2d(128, 128, 3)
def forward(self, x1, x2):
x = t.cat((x1, x2), 1)
x = F.relu(self.conv1(x))
res = x
x = F.relu(self.conv2(x))
x = self.conv3(x)
x += res
x = F.relu(x)
return x
|
class Upscale(nn.Module):
def __init__(self):
super(Upscale, self).__init__()
self.fc = nn.Linear(1, ((128 * 14) * 14))
def forward(self, x):
x = x.view(1, 1)
x = self.fc(x)
x = x.view((- 1), 128, 14, 14)
return x
|
class Node():
def __init__(self, prev):
self.prev = prev
self.inpData = []
def build(self, cellInd, mul, arity):
self.next = ([None] * arity)
self.arity = arity
self.cellInd = cellInd
self.mul = mul
|
class Program():
def __init__(self, prog, mul, imgFeats, arities):
self.prog = prog
self.mul = mul
self.imgFeats = imgFeats
self.arities = arities
self.root = Node(None)
def build(self, ind=0):
self.buildInternal(self.root)
def buildInternal(self, cur=None, count=0):
if (count >= len(self.prog)):
arity = 0
ind = 0
mul = 1.0
else:
ind = self.prog[count]
mul = self.mul[count]
arity = self.arities[ind]
cur.build(ind, mul, arity)
if (arity == 0):
cur.inpData = [self.imgFeats]
elif (arity == 1):
cur.next = [Node(cur)]
count = self.buildInternal(cur.next[0], (count + 1))
elif (arity == 2):
cur.next = [Node(cur), Node(cur)]
count = self.buildInternal(cur.next[0], (count + 1))
count = self.buildInternal(cur.next[1], (count + 1))
return count
def flat(self):
return self.flatInternal(self.root, [])
def flatInternal(self, cur, flattened):
flattened += [cur.cellInd]
for e in cur.next:
self.flatInternal(e, flattened)
return flattened
def topologicalSort(self):
return self.topInternal(self.root, [])
def topInternal(self, cur, flattened):
for e in cur.next:
self.topInternal(e, flattened)
flattened += [cur]
return flattened
|
class HighArcESort():
def __init__(self):
self.out = {}
def __call__(self, root):
assert (not self.out)
self.highArcESortInternal(root, 0)
return self.out
def highArcESortInternal(self, cur, rank):
for nxt in cur.next:
ret = self.highArcESortInternal(nxt, rank)
rank = max(rank, ret)
self.out[rank] = cur
return (rank + 1)
|
class FasterExecutioner():
def __init__(self, progs, cells):
self.cells = cells
self.progs = progs
self.roots = [p.root for p in progs]
self.sortProgs()
self.maxKey = max(list(self.progs.keys()))
def sortProgs(self):
progs = {}
for prog in self.progs:
prog = HighArcESort()(prog.root)
for (rank, nodeList) in prog.items():
progs.setdefault(rank, []).append(nodeList)
self.progs = progs
def execute(self):
for s in range((self.maxKey + 1)):
nodes = self.progs[s]
groupedNodes = {}
for node in nodes:
groupedNodes.setdefault(node.cellInd, []).append(node)
for (cellInd, nodes) in groupedNodes.items():
arity = nodes[0].arity
cell = self.cells[cellInd]
outData = [node.inpData[0] for node in nodes]
if (arity == 1):
arg = t.cat(outData, 0)
outData = cell(arg)
outData = [outData[i:(i + 1)] for i in range(outData.size()[0])]
elif (arity == 2):
arg2 = t.cat(outData, 0)
arg1 = t.cat([node.inpData[1] for node in nodes], 0)
outData = cell(arg1, arg2)
outData = [outData[i:(i + 1)] for i in range(outData.size()[0])]
for (node, outDat) in zip(nodes, outData):
if (type(node.mul) != float):
outDat = outDat
if (node.prev is None):
node.outData = outDat
else:
node.prev.inpData += [outDat]
outData = [root.outData for root in self.roots]
return t.cat(outData, 0)
|
class FastExecutioner():
def __init__(self, progs, cells):
self.cells = cells
self.progs = progs
self.sortProgs()
def sortProgs(self):
for i in range(len(self.progs)):
self.progs[i] = self.progs[i].topologicalSort()
def execute(self):
maxLen = max([len(e) for e in self.progs])
for s in range(maxLen):
nodes = []
for i in range(len(self.progs)):
prog = self.progs[i]
if (len(prog) <= s):
continue
nodes += [prog[s]]
groupedNodes = {}
for node in nodes:
groupedNodes.setdefault(node.cellInd, []).append(node)
for (cellInd, nodes) in groupedNodes.items():
arity = nodes[0].arity
cell = self.cells[cellInd]
outData = [node.inpData[0] for node in nodes]
if (arity == 1):
arg = t.cat(outData, 0)
outData = cell(arg)
outData = t.split(outData, 1, 0)
elif (arity == 2):
arg1 = t.cat(outData, 0)
arg2 = t.cat([node.inpData[1] for node in nodes], 0)
outData = cell(arg1, arg2)
outData = t.split(outData, 1, 0)
for (node, outDat) in zip(nodes, outData):
if (node.prev is None):
node.outData = outDat
else:
node.prev.inpData += [outDat]
outData = [prog[(- 1)].outData for prog in self.progs]
return t.cat(outData, 0)
|
class Executioner():
def __init__(self, prog, cells):
self.prog = prog
self.cells = cells
def execute(self):
return self.executeInternal(self.prog.root)
def executeInternal(self, cur):
if (cur.arity == 0):
return cur.inpData[0]
elif (cur.arity == 1):
args = [self.executeInternal(cur.next[0])]
elif (cur.arity == 2):
arg1 = self.executeInternal(cur.next[0])
arg2 = self.executeInternal(cur.next[1])
args = [arg1, arg2]
cell = self.cells[cur.cellInd]
return cell(*args)
|
class ProgramGenerator(nn.Module):
def __init__(self, embedDim, hGen, qLen, qVocab, pVocab):
super(ProgramGenerator, self).__init__()
self.embed = nn.Embedding(qVocab, embedDim)
self.encoder = t.nn.LSTM(embedDim, hGen, 2, batch_first=True)
self.decoder = t.nn.LSTM(hGen, hGen, 2, batch_first=True)
self.proj = nn.Linear(hGen, pVocab)
self.qLen = qLen
self.hGen = hGen
self.pVocab = pVocab
def forward(self, x):
x = self.embed(x)
(x, state) = self.encoder(x)
stateInp = [state[0][0] for i in range(self.qLen)]
stateInp = t.stack(stateInp, 1)
(x, _) = self.decoder(stateInp, state)
sz = list(x.size())
x = x.contiguous().view((- 1), self.hGen)
x = self.proj(x)
x = x.view(*sz[:2], (- 1))
return x
|
def ResNetFeatureExtractor():
resnet = torchvision.models.resnet101(pretrained=True)
return nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3).eval()
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class BottleneckFinal(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BottleneckFinal, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
return out
|
class ResNet(nn.Module):
def __init__(self, num_classes=1000):
block = Bottleneck
layers = [3, 4, 5]
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.featureExtract = BottleneckFinal(1024, 256, 1, None)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
localStates = self.state_dict()
url = 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'
remoteStates = model_zoo.load_url(url)
localKeys = localStates.keys()
remoteKeys = remoteStates.keys()
localPrefix = 'featureExtract.'
remotePrefix = 'layer3.5.'
mismatch = ['conv1.weight', 'bn1.weight', 'bn1.bias', 'bn1.running_mean', 'bn1.running_var', 'conv2.weight', 'bn2.weight', 'bn2.bias', 'bn2.running_mean', 'bn2.running_var', 'conv3.weight']
for e in mismatch:
remoteStates[(localPrefix + e)] = remoteStates[(remotePrefix + e)]
for k in list(remoteKeys):
if (k not in list(localKeys)):
remoteStates.pop(k)
self.load_state_dict(remoteStates)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.featureExtract(x)
return x
|
def resnet101(pretrained=False, **kwargs):
'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
class Node():
def __init__(self, cell):
self.nxt = cell['inputs'][::(- 1)]
self.func = cell['function']
if (len(cell['value_inputs']) > 0):
self.func += ('_' + cell['value_inputs'][0])
|
class BTree():
def __init__(self, cells):
self.root = Node(cells[(- 1)])
self.addNodes(cells[:(- 1)], self.root)
def addNodes(self, cells, cur):
for i in range(len(cur.nxt)):
e = cur.nxt[i]
node = Node(cells[e])
cur.nxt[i] = node
self.addNodes(cells, cur.nxt[i])
def flat(self):
return self.flatInternal(self.root, [])
def flatInternal(self, cur, flattened):
flattened += [cur.func]
for e in cur.nxt:
self.flatInternal(e, flattened)
return flattened
def print(self):
self.printInternal(self.root)
def printInternal(self, cur):
print(cur.func)
for e in cur.nxt:
self.printInternal(e)
|
def loadDat():
with open('../data/clevr/questions/CLEVR_val_questions.json') as dataF:
questions = json.load(dataF)['questions']
return questions
|
def getFuncs(dat):
vocab = []
for p in dat:
p = p['program']
for e in p:
func = e['function']
append = e['value_inputs']
if (len(append) > 0):
func += ('_' + append[0])
func = ((str(len(e['inputs'])) + '_') + func)
vocab += [func]
vocab = list(set(vocab))
return sorted(vocab)
|
def getAllWords(fName):
dat = open(fName).read()
dat = json.loads(dat)
dat = dat['questions']
wordsX = []
wordsY = []
for e in dat:
wordsX += e['question'].lower()[:(- 1)].split()
if ('answer' in e.keys()):
wordsY += e['answer'].lower().split()
return ((wordsX + ['?']), wordsY)
|
def name(split):
return (('../data/clevr/questions/CLEVR_' + split) + '_questions.json')
|
def plotResults():
batch = [1, 32, 64, 320, 640, 850]
fVanilla = [0.0031771, 0.0031694, 0.0026328, 0.00238375, 0.0023333]
fOurs = [0.003963, 0.00248858, 0.001686116, 0.000710902, 0.0005151, 0.00042235]
cVanilla = [0.002315934, 0.00287098, 0.00249189, 0.002322, 0.002199]
cOurs = [0.002244463, 0.00155909, 0.00117287, 0.000341281, 0.000202705, 0.000156266]
lineWidth = 3
ls = 26
fs = 24
ts = 26
leg = 24
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log', basex=2)
ax.set_yscale('log', basey=2)
ax.set_xlim(1, 1024)
ax.set_ylim((2 ** (- 13)), (2 ** (- 8)))
ax.tick_params(axis='x', labelsize=ls)
ax.tick_params(axis='y', labelsize=ls)
plt.xlabel('Minibatch Size', fontsize=fs)
plt.ylabel('Execution Time (sec / example)', fontsize=fs)
plt.title('Efficiency Gains with Improved Topological Sort', fontsize=ts)
ax.hold(True)
ax.plot(batch[:(- 1)], fVanilla, LineWidth=lineWidth, label='Vanilla Forward')
ax.plot(batch, fOurs, LineWidth=lineWidth, label='Our Forward')
ax.plot(batch[:(- 1)], cVanilla, LineWidth=lineWidth, label='Vanilla Cell')
ax.plot(batch, cOurs, LineWidth=lineWidth, label='Our Cell')
ax.legend(loc='lower left', shadow=False, prop={'size': leg})
plt.show()
|
class _NNMFBase(object):
def __init__(self, num_users, num_items, D=10, Dprime=60, hidden_units_per_layer=50, latent_normal_init_params={'mean': 0.0, 'stddev': 0.1}, model_filename='model/nnmf.ckpt'):
self.num_users = num_users
self.num_items = num_items
self.D = D
self.Dprime = Dprime
self.hidden_units_per_layer = hidden_units_per_layer
self.latent_normal_init_params = latent_normal_init_params
self.model_filename = model_filename
self._epochs = 0
self.user_index = tf.placeholder(tf.int32, [None])
self.item_index = tf.placeholder(tf.int32, [None])
self.r_target = tf.placeholder(tf.float32, [None])
self._init_vars()
self._init_ops()
self.rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.r, self.r_target))))
def _init_vars(self):
raise NotImplementedError
def _init_ops(self):
raise NotImplementedError
def init_sess(self, sess):
self.sess = sess
init = tf.initialize_all_variables()
self.sess.run(init)
def _train_iteration(self, data, additional_feed=None):
user_ids = data['user_id']
item_ids = data['item_id']
ratings = data['rating']
feed_dict = {self.user_index: user_ids, self.item_index: item_ids, self.r_target: ratings}
if additional_feed:
feed_dict.update(additional_feed)
for step in self.optimize_steps:
self.sess.run(step, feed_dict=feed_dict)
self._epochs += 1
def train_iteration(self, data):
self._train_iteration(data)
def eval_loss(self, data):
raise NotImplementedError
def eval_rmse(self, data):
user_ids = data['user_id']
item_ids = data['item_id']
ratings = data['rating']
feed_dict = {self.user_index: user_ids, self.item_index: item_ids, self.r_target: ratings}
return self.sess.run(self.rmse, feed_dict=feed_dict)
def predict(self, user_id, item_id):
rating = self.sess.run(self.r, feed_dict={self.user_index: [user_id], self.item_index: [item_id]})
return rating[0]
|
class NNMF(_NNMFBase):
def __init__(self, *args, **kwargs):
if ('lam' in kwargs):
self.lam = float(kwargs['lam'])
del kwargs['lam']
else:
self.lam = 0.01
super(NNMF, self).__init__(*args, **kwargs)
def _init_vars(self):
self.U = tf.Variable(tf.truncated_normal([self.num_users, self.D], **self.latent_normal_init_params))
self.Uprime = tf.Variable(tf.truncated_normal([self.num_users, self.Dprime], **self.latent_normal_init_params))
self.V = tf.Variable(tf.truncated_normal([self.num_items, self.D], **self.latent_normal_init_params))
self.Vprime = tf.Variable(tf.truncated_normal([self.num_items, self.Dprime], **self.latent_normal_init_params))
self.U_lu = tf.nn.embedding_lookup(self.U, self.user_index)
self.Uprime_lu = tf.nn.embedding_lookup(self.Uprime, self.user_index)
self.V_lu = tf.nn.embedding_lookup(self.V, self.item_index)
self.Vprime_lu = tf.nn.embedding_lookup(self.Vprime, self.item_index)
f_input_layer = tf.concat(concat_dim=1, values=[self.U_lu, self.V_lu, tf.mul(self.Uprime_lu, self.Vprime_lu)])
(_r, self.mlp_weights) = build_mlp(f_input_layer, hidden_units_per_layer=self.hidden_units_per_layer)
self.r = tf.squeeze(_r, squeeze_dims=[1])
def _init_ops(self):
reconstruction_loss = tf.reduce_sum(tf.square(tf.sub(self.r_target, self.r)), reduction_indices=[0])
reg = tf.add_n([tf.reduce_sum(tf.square(self.Uprime), reduction_indices=[0, 1]), tf.reduce_sum(tf.square(self.U), reduction_indices=[0, 1]), tf.reduce_sum(tf.square(self.V), reduction_indices=[0, 1]), tf.reduce_sum(tf.square(self.Vprime), reduction_indices=[0, 1])])
self.loss = (reconstruction_loss + (self.lam * reg))
self.optimizer = tf.train.AdamOptimizer()
f_train_step = self.optimizer.minimize(self.loss, var_list=self.mlp_weights.values())
latent_train_step = self.optimizer.minimize(self.loss, var_list=[self.U, self.Uprime, self.V, self.Vprime])
self.optimize_steps = [f_train_step, latent_train_step]
def eval_loss(self, data):
user_ids = data['user_id']
item_ids = data['item_id']
ratings = data['rating']
feed_dict = {self.user_index: user_ids, self.item_index: item_ids, self.r_target: ratings}
return self.sess.run(self.loss, feed_dict=feed_dict)
|
class SVINNMF(_NNMFBase):
num_latent_samples = 1
num_data_samples = 3
def __init__(self, *args, **kwargs):
if ('r_var' in kwargs):
self.r_var = float(kwargs['r_var'])
del kwargs['r_sigma']
else:
self.r_var = 1.0
if ('U_prior_var' in kwargs):
self.U_prior_var = float(kwargs['U_prior_var'])
del kwargs['U_prior_var']
else:
self.U_prior_var = 5.0
if ('Uprime_prior_var' in kwargs):
self.Uprime_prior_var = float(kwargs['Uprime_prior_var'])
del kwargs['Uprime_prior_var']
else:
self.Uprime_prior_var = 5.0
if ('V_prior_var' in kwargs):
self.V_prior_var = float(kwargs['V_prior_var'])
del kwargs['V_prior_var']
else:
self.V_prior_var = 5.0
if ('Vprime_prior_var' in kwargs):
self.Vprime_prior_var = float(kwargs['Vprime_prior_var'])
del kwargs['Vprime_prior_var']
else:
self.Vprime_prior_var = 5.0
if ('kl_full_epoch' in kwargs):
self.kl_full_epoch = int(kwargs['kl_full_epoch'])
del kwargs['kl_full_epoch']
else:
self.kl_full_epoch = 1000
if ('anneal_kl' in kwargs):
self.anneal_kl = bool(kwargs['anneal_kl'])
else:
self.anneal_kl = True
super(SVINNMF, self).__init__(*args, **kwargs)
def _init_vars(self):
self.U_mu = tf.Variable(tf.truncated_normal([self.num_users, self.D], **self.latent_normal_init_params))
self.U_log_var = tf.Variable(tf.random_uniform([self.num_users, self.D], minval=0.0, maxval=0.5))
self.Uprime_mu = tf.Variable(tf.truncated_normal([self.num_users, self.Dprime], **self.latent_normal_init_params))
self.Uprime_log_var = tf.Variable(tf.random_uniform([self.num_users, self.Dprime], minval=0.0, maxval=0.5))
self.V_mu = tf.Variable(tf.truncated_normal([self.num_items, self.D], **self.latent_normal_init_params))
self.V_log_var = tf.Variable(tf.random_uniform([self.num_items, self.D], minval=0.0, maxval=0.5))
self.Vprime_mu = tf.Variable(tf.truncated_normal([self.num_items, self.Dprime], **self.latent_normal_init_params))
self.Vprime_log_var = tf.Variable(tf.random_uniform([self.num_items, self.Dprime], minval=0.0, maxval=0.5))
U_mu_lu = tf.nn.embedding_lookup(self.U_mu, self.user_index)
U_log_var_lu = tf.nn.embedding_lookup(self.U_log_var, self.user_index)
Uprime_mu_lu = tf.nn.embedding_lookup(self.Uprime_mu, self.user_index)
Uprime_log_var_lu = tf.nn.embedding_lookup(self.Uprime_log_var, self.user_index)
V_mu_lu = tf.nn.embedding_lookup(self.V_mu, self.item_index)
V_log_var_lu = tf.nn.embedding_lookup(self.V_log_var, self.item_index)
Vprime_mu_lu = tf.nn.embedding_lookup(self.Vprime_mu, self.item_index)
Vprime_log_var_lu = tf.nn.embedding_lookup(self.Vprime_log_var, self.item_index)
q_U = tf.contrib.distributions.MultivariateNormalDiag(mu=U_mu_lu, diag_stdev=tf.sqrt(tf.exp(U_log_var_lu)))
q_Uprime = tf.contrib.distributions.MultivariateNormalDiag(mu=Uprime_mu_lu, diag_stdev=tf.sqrt(tf.exp(Uprime_log_var_lu)))
q_V = tf.contrib.distributions.MultivariateNormalDiag(mu=V_mu_lu, diag_stdev=tf.sqrt(tf.exp(V_log_var_lu)))
q_Vprime = tf.contrib.distributions.MultivariateNormalDiag(mu=Vprime_mu_lu, diag_stdev=tf.sqrt(tf.exp(Vprime_log_var_lu)))
self.U = q_U.sample()
self.Uprime = q_Uprime.sample()
self.V = q_V.sample()
self.Vprime = q_Vprime.sample()
f_input_layer = tf.concat(concat_dim=1, values=[self.U, self.V, tf.mul(self.Uprime, self.Vprime)])
(self.r_mu, self.mlp_weights) = build_mlp(f_input_layer, hidden_units_per_layer=self.hidden_units_per_layer)
self.r = tf.squeeze(self.r_mu, squeeze_dims=[1])
self.kl_weight = (tf.placeholder(tf.float32) if self.anneal_kl else tf.constant(1.0, dtype=tf.float32))
def _init_ops(self):
KL_U = KL(self.U_mu, self.U_log_var, prior_var=self.U_prior_var)
KL_Uprime = KL(self.Uprime_mu, self.Uprime_log_var, prior_var=self.Uprime_prior_var)
KL_V = KL(self.V_mu, self.V_log_var, prior_var=self.V_prior_var)
KL_Vprime = KL(self.Vprime_mu, self.Vprime_log_var, prior_var=self.Vprime_prior_var)
KL_all = (((KL_U + KL_Uprime) + KL_V) + KL_Vprime)
log_prob = ((- (1 / (2.0 * self.r_var))) * tf.reduce_sum(tf.square(tf.sub(self.r_target, self.r)), reduction_indices=[0]))
elbo = (log_prob - (self.kl_weight * KL_all))
self.loss = (- elbo)
self.optimizer = tf.train.AdamOptimizer()
self.optimize_steps = [self.optimizer.minimize(self.loss)]
def train_iteration(self, data):
additional_feed = ({self.kl_weight: get_kl_weight(self._epochs, on_epoch=self.kl_full_epoch)} if self.anneal_kl else {})
super(SVINNMF, self)._train_iteration(data, additional_feed=additional_feed)
def eval_loss(self, data):
user_ids = data['user_id']
item_ids = data['item_id']
ratings = data['rating']
feed_dict = {self.user_index: user_ids, self.item_index: item_ids, self.r_target: ratings, self.kl_weight: get_kl_weight(self._epochs, on_epoch=self.kl_full_epoch)}
return self.sess.run(self.loss, feed_dict=feed_dict)
|
def KL(mean, log_var, prior_var):
'Computes KL divergence for a group of univariate normals (ie. every dimension of a latent).'
return tf.reduce_sum((tf.log((math.sqrt(prior_var) / tf.sqrt(tf.exp(log_var)))) + ((tf.exp(log_var) + tf.square(mean)) / (2.0 * prior_var))), reduction_indices=[0, 1])
|
def _weight_init_range(n_in, n_out):
'Calculates range for picking initial weight values from a uniform distribution.'
range = ((4.0 * math.sqrt(6.0)) / math.sqrt((n_in + n_out)))
return {'minval': (- range), 'maxval': range}
|
def build_mlp(f_input_layer, hidden_units_per_layer):
'Builds a feed-forward NN (MLP) with 3 hidden layers.'
num_f_inputs = f_input_layer.get_shape().as_list()[1]
mlp_weights = {'h1': tf.Variable(tf.random_uniform([num_f_inputs, hidden_units_per_layer], **_weight_init_range(num_f_inputs, hidden_units_per_layer))), 'b1': tf.Variable(tf.zeros([hidden_units_per_layer])), 'h2': tf.Variable(tf.random_uniform([hidden_units_per_layer, hidden_units_per_layer], **_weight_init_range(hidden_units_per_layer, hidden_units_per_layer))), 'b2': tf.Variable(tf.zeros([hidden_units_per_layer])), 'h3': tf.Variable(tf.random_uniform([hidden_units_per_layer, hidden_units_per_layer], **_weight_init_range(hidden_units_per_layer, hidden_units_per_layer))), 'b3': tf.Variable(tf.zeros([hidden_units_per_layer])), 'out': tf.Variable(tf.random_uniform([hidden_units_per_layer, 1], **_weight_init_range(hidden_units_per_layer, 1))), 'b_out': tf.Variable(tf.zeros([1]))}
mlp_layer_1 = tf.nn.sigmoid((tf.matmul(f_input_layer, mlp_weights['h1']) + mlp_weights['b1']))
mlp_layer_2 = tf.nn.sigmoid((tf.matmul(mlp_layer_1, mlp_weights['h2']) + mlp_weights['b2']))
mlp_layer_3 = tf.nn.sigmoid((tf.matmul(mlp_layer_2, mlp_weights['h3']) + mlp_weights['b3']))
out = (tf.matmul(mlp_layer_3, mlp_weights['out']) + mlp_weights['b_out'])
return (out, mlp_weights)
|
def get_kl_weight(curr_iter, on_epoch=100):
"Outputs sigmoid scheduled KL weight term (to be fully on at 'on_epoch')"
return (1.0 / (1 + math.exp(((- (25.0 / on_epoch)) * (curr_iter - (on_epoch / 2.0))))))
|
def chunk_df(df, size):
'Splits a Pandas dataframe into chunks of size `size`.\n\n See here: https://stackoverflow.com/a/25701576/1424734\n '
return (df[pos:(pos + size)] for pos in xrange(0, len(df), size))
|
def load_data(train_filename, valid_filename, test_filename, delimiter='\t', col_names=['user_id', 'item_id', 'rating']):
'Helper function to load in/preprocess dataframes'
train_data = pd.read_csv(train_filename, delimiter=delimiter, header=None, names=col_names)
train_data['user_id'] = (train_data['user_id'] - 1)
train_data['item_id'] = (train_data['item_id'] - 1)
valid_data = pd.read_csv(valid_filename, delimiter=delimiter, header=None, names=col_names)
valid_data['user_id'] = (valid_data['user_id'] - 1)
valid_data['item_id'] = (valid_data['item_id'] - 1)
test_data = pd.read_csv(test_filename, delimiter=delimiter, header=None, names=col_names)
test_data['user_id'] = (test_data['user_id'] - 1)
test_data['item_id'] = (test_data['item_id'] - 1)
return (train_data, valid_data, test_data)
|
def train(model, sess, saver, train_data, valid_data, batch_size, max_epochs, use_early_stop, early_stop_max_epoch):
batch = (train_data.sample(batch_size) if batch_size else train_data)
train_error = model.eval_loss(batch)
train_rmse = model.eval_rmse(batch)
valid_rmse = model.eval_rmse(valid_data)
print('[start] Train error: {:3f}, Train RMSE: {:3f}; Valid RMSE: {:3f}'.format(train_error, train_rmse, valid_rmse))
prev_valid_rmse = float('Inf')
early_stop_epochs = 0
for epoch in xrange(max_epochs):
shuffled_df = train_data.sample(frac=1)
batches = (chunk_df(shuffled_df, batch_size) if batch_size else [train_data])
for (batch_iter, batch) in enumerate(batches):
model.train_iteration(batch)
train_error = model.eval_loss(batch)
train_rmse = model.eval_rmse(batch)
valid_rmse = model.eval_rmse(valid_data)
print('[{:d}-{:d}] Train error: {:3f}, Train RMSE: {:3f}; Valid RMSE: {:3f}'.format(epoch, batch_iter, train_error, train_rmse, valid_rmse))
if use_early_stop:
early_stop_epochs += 1
if (valid_rmse < prev_valid_rmse):
prev_valid_rmse = valid_rmse
early_stop_epochs = 0
saver.save(sess, model.model_filename)
elif (early_stop_epochs == early_stop_max_epoch):
print('Early stopping ({} vs. {})...'.format(prev_valid_rmse, valid_rmse))
break
else:
saver.save(sess, model.model_filename)
|
def test(model, sess, saver, test_data, train_data=None, log=True):
if (train_data is not None):
train_rmse = model.eval_rmse(train_data)
if log:
print('Final train RMSE: {}'.format(train_rmse))
test_rmse = model.eval_rmse(test_data)
if log:
print('Final test RMSE: {}'.format(test_rmse))
return test_rmse
|
def squash(cap_input):
'\n squash function for keep the length of capsules between 0 - 1\n :arg\n cap_input: total input of capsules,\n with shape: [None, h, w, c] or [None, n, d]\n :return\n cap_output: output of each capsules, which has the shape as cap_input\n '
with tf.name_scope('squash'):
input_norm_square = tf.reduce_sum(tf.square(cap_input), axis=(- 1), keep_dims=True)
scale = ((input_norm_square / (1.0 + input_norm_square)) / tf.sqrt(input_norm_square))
return (cap_input * scale)
|
class CapsNet(object):
def __init__(self, mnist):
'initial class with mnist dataset'
self._mnist = mnist
self._dim = 28
self._num_caps = [0]
def _capsule(self, input, i_c, o_c, idx):
'\n compute a capsule,\n conv op with kernel: 9x9, stride: 2,\n padding: VALID, output channels: 8 per capsule.\n As described in the paper.\n :arg\n input: input for computing capsule, shape: [None, w, h, c]\n i_c: input channels\n o_c: output channels\n idx: index of the capsule about to create\n\n :return\n capsule: computed capsule\n '
with tf.variable_scope(('cap_' + str(idx))):
w = tf.get_variable('w', shape=[9, 9, i_c, o_c], dtype=tf.float32)
cap = tf.nn.conv2d(input, w, [1, 2, 2, 1], padding='VALID', name='cap_conv')
if cfg.USE_BIAS:
b = tf.get_variable('b', shape=[o_c], dtype=tf.float32, initializer=self._b_initializer)
cap = (cap + b)
capsule = squash(cap)
capsule = tf.expand_dims(capsule, axis=1)
return capsule
def _dynamic_routing(self, primary_caps, layer_index):
'"\n dynamic routing between capsules\n :arg\n primary_caps: primary capsules with shape [None, 1, 32 x 6 x 6, 1, 8]\n layer_index: index of the current capsule layer, i.e. the input layer for routing\n :return\n digit_caps: the output of digit capsule layer output, with shape: [None, 10, 16]\n '
num_caps = self._num_caps[layer_index]
cap_ws = tf.get_variable('cap_w', shape=[10, num_caps, 8, 16], dtype=tf.float32)
fn_init = tf.zeros([10, num_caps, 1, 16])
cap_predicts = tf.scan((lambda ac, x: tf.matmul(x, cap_ws)), tf.tile(primary_caps, [1, 10, 1, 1, 1]), initializer=fn_init, name='cap_predicts')
cap_predictions = tf.squeeze(cap_predicts, axis=[3])
log_prior = tf.get_variable('log_prior', shape=[10, num_caps], dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=cfg.PRIOR_TRAINING)
if (cfg.ROUTING_WAY == 'static'):
digit_caps = self._dynamic_routingV1(log_prior, cap_predictions)
elif (cfg.ROUTING_WAY == 'dynamic'):
digit_caps = self._dynamic_routingV2(log_prior, cap_predictions, num_caps)
else:
raise NotImplementedError
return digit_caps
def _dynamic_routingV2(self, prior, cap_predictions, num_caps):
'\n doing dynamic routing with tf.while_loop\n :arg\n proir: log prior for scaling with shape [10, num_caps]\n cap_prediction: predictions from layer below with shape [None, 10, num_caps, 16]\n num_caps: num_caps\n :return\n digit_caps: digit capsules with shape [None, 10, 16]\n '
init_cap = tf.reduce_sum(cap_predictions, (- 2))
iters = tf.constant(cfg.ROUTING_ITERS)
prior = tf.expand_dims(prior, 0)
def body(i, prior, cap_out):
c = tf.nn.softmax(prior, dim=1)
c_expand = tf.expand_dims(c, axis=(- 1))
s_t = tf.multiply(cap_predictions, c_expand)
s = tf.reduce_sum(s_t, axis=[2])
cap_out = squash(s)
delta_prior = tf.reduce_sum(tf.multiply(tf.expand_dims(cap_out, axis=2), cap_predictions), axis=[(- 1)])
prior = (prior + delta_prior)
return [(i - 1), prior, cap_out]
condition = (lambda i, proir, cap_out: (i > 0))
(_, prior, digit_caps) = tf.while_loop(condition, body, [iters, prior, init_cap], shape_invariants=[iters.get_shape(), tf.TensorShape([None, 10, num_caps]), init_cap.get_shape()])
return digit_caps
def _dynamic_routingV1(self, prior, cap_predictions):
'\n doing dynamic routing with for loop as static implementation\n :arg\n proir: log prior for scaling with shape [10, num_caps]\n cap_prediction: predictions from layer below with shape [None, 10, num_caps, 16]\n :return\n digit_caps: digit capsules with shape [None, 10, 16]\n '
prior = tf.expand_dims(prior, 0)
for idx in xrange(cfg.ROUTING_ITERS):
with tf.name_scope(('routing_%s' % idx)):
c = tf.nn.softmax(prior, dim=1)
c_t = tf.expand_dims(c, axis=(- 1))
s_t = tf.multiply(cap_predictions, c_t)
s = tf.reduce_sum(s_t, axis=[2])
digit_caps = squash(s)
delta_prior = tf.reduce_sum(tf.multiply(tf.expand_dims(digit_caps, axis=2), cap_predictions), axis=[(- 1)])
prior = (prior + delta_prior)
return digit_caps
def _reconstruct(self, digit_caps):
'\n reconstruct from digit capsules with 3 fully connected layer\n :param\n digit_caps: digit capsules with shape [None, 10, 16]\n :return:\n out: out of reconstruction\n '
with tf.name_scope('reconstruct'):
y_ = tf.expand_dims(self._y_, axis=2)
target_cap = (y_ * digit_caps)
target_cap = tf.reduce_sum(target_cap, axis=1)
fc = slim.fully_connected(target_cap, 512, weights_initializer=self._w_initializer)
fc = slim.fully_connected(fc, 1024, weights_initializer=self._w_initializer)
fc = slim.fully_connected(fc, 784, weights_initializer=self._w_initializer, activation_fn=None)
out = tf.sigmoid(fc)
return out
def _add_loss(self, digit_caps):
'\n add the margin loss and reconstruction loss\n :arg\n digit_caps: output of digit capsule layer, shape [None, 10, 16]\n :return\n total_loss:\n '
with tf.name_scope('loss'):
self._digit_caps_norm = tf.norm(digit_caps, ord=2, axis=2, name='digit_caps_norm')
with tf.name_scope('pos_loss'):
pos_loss = tf.maximum(0.0, (cfg.M_POS - tf.reduce_sum((self._digit_caps_norm * self._y_), axis=1)), name='pos_max')
pos_loss = tf.square(pos_loss, name='pos_square')
pos_loss = tf.reduce_mean(pos_loss)
tf.summary.scalar('pos_loss', pos_loss)
y_negs = (1.0 - self._y_)
with tf.name_scope('neg_loss'):
neg_loss = tf.maximum(0.0, ((self._digit_caps_norm * y_negs) - cfg.M_NEG))
neg_loss = (tf.reduce_sum(tf.square(neg_loss), axis=(- 1)) * cfg.LAMBDA)
neg_loss = tf.reduce_mean(neg_loss)
tf.summary.scalar('neg_loss', neg_loss)
reconstruct = self._reconstruct(digit_caps)
with tf.name_scope('l2_loss'):
reconstruct_loss = tf.reduce_sum(tf.square((self._x - reconstruct)), axis=(- 1))
reconstruct_loss = tf.reduce_mean(reconstruct_loss)
tf.summary.scalar('reconstruct_loss', reconstruct_loss)
total_loss = ((pos_loss + neg_loss) + (cfg.RECONSTRUCT_W * reconstruct_loss))
self.reconstruct_loss = reconstruct_loss
tf.summary.scalar('loss', total_loss)
return total_loss
def creat_architecture(self):
'creat architecture of the whole network'
self._x = tf.placeholder(tf.float32, [None, 784])
self._y_ = tf.placeholder(tf.float32, [None, 10])
self._w_initializer = tf.truncated_normal_initializer(stddev=0.1)
self._b_initializer = tf.zeros_initializer()
with tf.variable_scope('CapsNet', initializer=self._w_initializer):
self._build_net()
self._global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(cfg.LR, self._global_step, cfg.STEP_SIZE, cfg.DECAY_RATIO, staircase=True)
tf.summary.scalar('learning rate', learning_rate)
self._optimizer = tf.train.AdamOptimizer(learning_rate)
gradidents = self._optimizer.compute_gradients(self._loss)
tf.summary.scalar('grad_norm', tf.global_norm(gradidents))
self._train_op = self._optimizer.apply_gradients(gradidents, global_step=self._global_step)
self._accuracy()
self._summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.train_writer = tf.summary.FileWriter((cfg.TB_DIR + '/train'))
self.val_writer = tf.summary.FileWriter((cfg.TB_DIR + '/val'))
def _build_net(self):
'build the graph of the network'
with tf.name_scope('x_reshape'):
x_image = tf.reshape(self._x, [(- 1), 28, 28, 1])
with tf.variable_scope('conv1'):
w = tf.get_variable('w', shape=[9, 9, 1, 256], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
conv1 = tf.nn.conv2d(x_image, w, [1, 1, 1, 1], padding='VALID', name='conv1')
if cfg.USE_BIAS:
b = tf.get_variable('b', shape=[256], dtype=tf.float32, initializer=self._b_initializer)
conv1 = tf.nn.relu((conv1 + b))
else:
conv1 = tf.nn.relu(conv1)
self._dim = (((self._dim - 9) // 1) + 1)
assert (self._dim == 20), 'after conv1, dimensions of feature mapshould be 20x20'
with tf.variable_scope('PrimaryCaps'):
self._dim = (((self._dim - 9) // 2) + 1)
self._num_caps.append(((self._dim ** 2) * cfg.PRIMARY_CAPS_CHANNELS))
assert (self._dim == 6), 'dims for primary caps grid should be 6x6.'
"\n caps = []\n for idx in xrange(cfg.PRIMARY_CAPS_CHANNELS):\n # get a capsule with 8-D\n cap = self._capsule(conv1, 256, 8, idx)\n # cap with shape: [None, 1, 6, 6, 8]\n caps.append(cap)\n\n # concat all the primary capsules\n primary_caps = tf.concat(caps, axis=1)\n # primary_caps with shape: [None, 32, 6, 6, 8]\n with tf.name_scope('primary_cap_reshape'):\n # reshape and expand dims for broadcasting in dynamic routing\n primary_caps = tf.reshape(primary_caps,\n shape=[-1, 1, self._num_caps[1], 1, 8])\n # primary_caps with shape: [None, 1, 1152, 1, 8]\n "
primary_caps = slim.conv2d(conv1, (32 * 8), 9, 2, padding='VALID', activation_fn=None)
primary_caps = tf.reshape(primary_caps, [(- 1), 1, self._num_caps[1], 1, 8])
primary_caps = squash(primary_caps)
with tf.variable_scope('digit_caps'):
self._digit_caps = self._dynamic_routing(primary_caps, 1)
self._loss = self._add_loss(self._digit_caps)
def _accuracy(self):
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(self._y_, 1), tf.argmax(self._digit_caps_norm, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
self.accuracy = tf.reduce_mean(correct_prediction)
tf.summary.scalar('accuracy', self.accuracy)
def train_with_summary(self, sess, batch_size=100, iters=0):
batch = self._mnist.train.next_batch(batch_size)
(loss, _, train_acc, train_summary) = sess.run([self._loss, self._train_op, self.accuracy, self._summary_op], feed_dict={self._x: batch[0], self._y_: batch[1]})
if (((iters % cfg.PRINT_EVERY) == 0) and (iters > 0)):
val_batch = self._mnist.validation.next_batch(batch_size)
self.train_writer.add_summary(train_summary, iters)
self.train_writer.flush()
print(('iters: %d / %d, loss ==> %.4f ' % (iters, cfg.MAX_ITERS, loss)))
print(('train accuracy: %.4f' % train_acc))
(test_acc, test_summary) = sess.run([self.accuracy, self._summary_op], feed_dict={self._x: val_batch[0], self._y_: val_batch[1]})
print(('val accuracy: %.4f' % test_acc))
self.val_writer.add_summary(test_summary, iters)
self.val_writer.flush()
if (((iters % cfg.SAVE_EVERY) == 0) and (iters > 0)):
self.snapshot(sess, iters=iters)
self.test(sess)
def snapshot(self, sess, iters=0):
save_path = (cfg.TRAIN_DIR + '/capsnet')
self.saver.save(sess, save_path, iters)
def test(self, sess, set='validation'):
if (set == 'test'):
x = self._mnist.test.images
y_ = self._mnist.test.labels
else:
x = self._mnist.validation.images
y_ = self._mnist.validation.labels
acc = []
for i in tqdm(xrange((len(x) // 100)), desc=('calculating %s accuracy' % set)):
x_i = x[(i * 100):((i + 1) * 100)]
y_i = y_[(i * 100):((i + 1) * 100)]
ac = sess.run(self.accuracy, feed_dict={self._x: x_i, self._y_: y_i})
acc.append(ac)
all_ac = np.mean(np.array(acc))
print('whole {} accuracy: {}'.format(set, all_ac))
def adv_validation(self, sess, set, x_adv, max_iter, fname=None):
if (set == 'test'):
x = self._mnist.test.images
y_ = self._mnist.test.labels
if (set == 'validation'):
x = self._mnist.validation.images
y_ = self._mnist.validation.labels
if (set == 'train'):
x = self._mnist.validation.images
y_ = self._mnist.validation.labels
acc = []
for i in tqdm(xrange((len(x) // 100)), desc=('calculating %s accuracy' % set)):
x_i = x[(i * 100):((i + 1) * 100)]
y_i = y_[(i * 100):((i + 1) * 100)]
for j in range(max_iter):
x_i = sess.run(x_adv, feed_dict={self._x: x_i, self._y_: y_i})
image_save(x_i, fname)
return
ac = sess.run(self.accuracy, feed_dict={self._x: x_i, self._y_: y_i})
acc.append(ac)
all_ac = np.mean(np.array(acc))
print('whole {} accuracy: {}'.format(set, all_ac))
|
def model_test():
model = CapsNet(None)
model.creat_architecture()
print('pass')
|
def main(_):
eps = (((1.0 * FLAGS.max_epsilon) / 256.0) / FLAGS.max_iter)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
tf.reset_default_graph()
caps_net = CapsNet(mnist)
caps_net.creat_architecture()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_dir = cfg.TRAIN_DIR
ckpt = tf.train.get_checkpoint_state(train_dir)
(dy_dx,) = tf.gradients(caps_net._loss, caps_net._x)
x_adv = tf.stop_gradient((caps_net._x + ((1 * eps) * tf.sign(dy_dx))))
x_adv = tf.clip_by_value(x_adv, 0.0, 1.0)
with tf.Session(config=config) as sess:
if (ckpt and cfg.USE_CKPT):
print(('Reading parameters from %s' % ckpt.model_checkpoint_path))
caps_net.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Created model with fresh paramters.')
sess.run(tf.global_variables_initializer())
print(('Num params: %d' % sum((v.get_shape().num_elements() for v in tf.trainable_variables()))))
caps_net.train_writer.add_graph(sess.graph)
caps_net.adv_validation(sess, 'test', x_adv, FLAGS.max_iter, (((('samples/gsm_' + str(FLAGS.max_iter)) + '_') + str(FLAGS.max_epsilon)) + '.PNG'))
|
def model_test():
model = CapsNet(None)
model.creat_architecture()
print('pass')
|
def main(_):
eps = (((1.0 * FLAGS.max_epsilon) / 256.0) / FLAGS.max_iter)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
tf.reset_default_graph()
caps_net = CapsNet(mnist)
caps_net.creat_architecture()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_dir = cfg.TRAIN_DIR
ckpt = tf.train.get_checkpoint_state(train_dir)
target = tf.one_hot(tf.argmin(caps_net._digit_caps_norm, 1), 10, on_value=1.0, off_value=0.0)
pos_loss = tf.maximum(0.0, (cfg.M_POS - tf.reduce_sum((caps_net._digit_caps_norm * target), axis=1)), name='pos_max')
pos_loss = tf.square(pos_loss, name='pos_square')
pos_loss = tf.reduce_mean(pos_loss)
y_negs = (1.0 - target)
neg_loss = tf.maximum(0.0, ((caps_net._digit_caps_norm * y_negs) - cfg.M_NEG))
neg_loss = (tf.reduce_sum(tf.square(neg_loss), axis=(- 1)) * cfg.LAMBDA)
neg_loss = tf.reduce_mean(neg_loss)
loss = ((pos_loss + neg_loss) + (cfg.RECONSTRUCT_W * caps_net.reconstruct_loss))
(dy_dx,) = tf.gradients(loss, caps_net._x)
x_adv = tf.stop_gradient((caps_net._x - ((1 * eps) * tf.sign(dy_dx))))
x_adv = tf.clip_by_value(x_adv, 0.0, 1.0)
with tf.Session(config=config) as sess:
if (ckpt and cfg.USE_CKPT):
print(('Reading parameters from %s' % ckpt.model_checkpoint_path))
caps_net.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Created model with fresh paramters.')
sess.run(tf.global_variables_initializer())
print(('Num params: %d' % sum((v.get_shape().num_elements() for v in tf.trainable_variables()))))
caps_net.train_writer.add_graph(sess.graph)
caps_net.adv_validation(sess, 'test', x_adv, FLAGS.max_iter, (((('samples/llcm_' + str(FLAGS.max_iter)) + '_') + str(FLAGS.max_epsilon)) + '.PNG'))
|
def model_test():
model = CapsNet(None)
model.creat_architecture()
print('pass')
|
def main(_):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
tf.reset_default_graph()
caps_net = CapsNet(mnist)
caps_net.creat_architecture()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_dir = cfg.TRAIN_DIR
ckpt = tf.train.get_checkpoint_state(train_dir)
with tf.Session(config=config) as sess:
if (ckpt and cfg.USE_CKPT):
print(('Reading parameters from %s' % ckpt.model_checkpoint_path))
caps_net.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Created model with fresh paramters.')
sess.run(tf.global_variables_initializer())
print(('Num params: %d' % sum((v.get_shape().num_elements() for v in tf.trainable_variables()))))
caps_net.train_writer.add_graph(sess.graph)
iters = 0
tic = time.time()
for iters in xrange(cfg.MAX_ITERS):
sys.stdout.write(('>>> %d / %d \r' % ((iters % cfg.PRINT_EVERY), cfg.PRINT_EVERY)))
sys.stdout.flush()
caps_net.train_with_summary(sess, batch_size=100, iters=iters)
if (((iters % cfg.PRINT_EVERY) == 0) and (iters > 0)):
toc = time.time()
print(('average time: %.2f secs' % (toc - tic)))
tic = time.time()
caps_net.snapshot(sess, iters)
caps_net.test(sess, 'test')
|
def download(url, dirpath):
filename = url.split('/')[(- 1)]
filepath = os.path.join(dirpath, filename)
u = urllib.request.urlopen(url)
f = open(filepath, 'wb')
filesize = int(u.headers['Content-Length'])
print(('Downloading: %s Bytes: %s' % (filename, filesize)))
downloaded = 0
block_sz = 8192
status_width = 70
while True:
buf = u.read(block_sz)
if (not buf):
print('')
break
else:
print('', end='\r')
downloaded += len(buf)
f.write(buf)
status = ((('[%-' + str((status_width + 1))) + 's] %3.2f%%') % ((('=' * int(((float(downloaded) / filesize) * status_width))) + '>'), ((downloaded * 100.0) / filesize)))
print(status, end='')
sys.stdout.flush()
f.close()
return filepath
|
def download_file_from_google_drive(id, destination):
URL = 'https://docs.google.com/uc?export=download'
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
|
def get_confirm_token(response):
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def save_response_content(response, destination, chunk_size=(32 * 1024)):
total_size = int(response.headers.get('content-length', 0))
with open(destination, 'wb') as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True, desc=destination):
if chunk:
f.write(chunk)
|
def unzip(filepath):
print(('Extracting: ' + filepath))
dirpath = os.path.dirname(filepath)
with zipfile.ZipFile(filepath) as zf:
zf.extractall(dirpath)
os.remove(filepath)
|
def download_celeb_a(dirpath):
data_dir = 'celebA'
if os.path.exists(os.path.join(dirpath, data_dir)):
print('Found Celeb-A - skip')
return
(filename, drive_id) = ('img_align_celeba.zip', '0B7EVK8r0v71pZjFTYXZWM3FlRnM')
save_path = os.path.join(dirpath, filename)
if os.path.exists(save_path):
print('[*] {} already exists'.format(save_path))
else:
download_file_from_google_drive(drive_id, save_path)
zip_dir = ''
with zipfile.ZipFile(save_path) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dirpath)
os.remove(save_path)
os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, data_dir))
|
def _list_categories(tag):
url = ('http://lsun.cs.princeton.edu/htbin/list.cgi?tag=' + tag)
f = urllib.request.urlopen(url)
return json.loads(f.read())
|
def _download_lsun(out_dir, category, set_name, tag):
url = 'http://lsun.cs.princeton.edu/htbin/download.cgi?tag={tag}&category={category}&set={set_name}'.format(**locals())
print(url)
if (set_name == 'test'):
out_name = 'test_lmdb.zip'
else:
out_name = '{category}_{set_name}_lmdb.zip'.format(**locals())
out_path = os.path.join(out_dir, out_name)
cmd = ['curl', url, '-o', out_path]
print('Downloading', category, set_name, 'set')
subprocess.call(cmd)
|
def download_lsun(dirpath):
data_dir = os.path.join(dirpath, 'lsun')
if os.path.exists(data_dir):
print('Found LSUN - skip')
return
else:
os.mkdir(data_dir)
tag = 'latest'
categories = ['bedroom']
for category in categories:
_download_lsun(data_dir, category, 'train', tag)
_download_lsun(data_dir, category, 'val', tag)
_download_lsun(data_dir, '', 'test', tag)
|
def download_mnist(dirpath):
data_dir = os.path.join(dirpath, 'mnist')
if os.path.exists(data_dir):
print('Found MNIST - skip')
return
else:
os.mkdir(data_dir)
url_base = 'http://yann.lecun.com/exdb/mnist/'
file_names = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
for file_name in file_names:
url = (url_base + file_name).format(**locals())
print(url)
out_path = os.path.join(data_dir, file_name)
cmd = ['curl', url, '-o', out_path]
print('Downloading ', file_name)
subprocess.call(cmd)
cmd = ['gzip', '-d', out_path]
print('Decompressing ', file_name)
subprocess.call(cmd)
|
def prepare_data_dir(path='./data'):
if (not os.path.exists(path)):
os.mkdir(path)
|
def main(_):
pp.pprint(flags.FLAGS.__flags)
if (FLAGS.input_width is None):
FLAGS.input_width = FLAGS.input_height
if (FLAGS.output_width is None):
FLAGS.output_width = FLAGS.output_height
if (not os.path.exists(FLAGS.checkpoint_dir)):
os.makedirs(FLAGS.checkpoint_dir)
if (not os.path.exists(FLAGS.sample_dir)):
os.makedirs(FLAGS.sample_dir)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth = True
with tf.Session(config=run_config) as sess:
dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir)
show_all_variables()
if FLAGS.train:
dcgan.train(FLAGS)
elif (not dcgan.load(FLAGS.checkpoint_dir)[0]):
raise Exception('[!] Train a model first, then run test mode')
OPTION = FLAGS.options
visualize(sess, dcgan, FLAGS, OPTION, FLAGS.gan_set)
|
class batch_norm(object):
def __init__(self, epsilon=1e-05, momentum=0.9, name='batch_norm'):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, is_training=train, scope=self.name)
|
def conv_cond_concat(x, y):
'Concatenate conditioning vector on feature map axis.'
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return concat([x, (y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]]))], 3)
|
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv2d'):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
|
def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='deconv2d', with_w=False):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, output_shape[(- 1)], input_.get_shape()[(- 1)]], initializer=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[(- 1)]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return (deconv, w, biases)
else:
return deconv
|
def lrelu(x, leak=0.2, name='lrelu'):
return tf.maximum(x, (leak * x))
|
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope((scope or 'Linear')):
matrix = tf.get_variable('Matrix', [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable('bias', [output_size], initializer=tf.constant_initializer(bias_start))
if with_w:
return ((tf.matmul(input_, matrix) + bias), matrix, bias)
else:
return (tf.matmul(input_, matrix) + bias)
|
def create_dataset(file_path):
with h5py.File(file_path, 'r') as f, h5py.File('cuhk-03.h5') as fw:
val_index = (f[f['testsets'][0][0]][:].T - 1).tolist()
tes_index = (f[f['testsets'][0][1]][:].T - 1).tolist()
fwa = fw.create_group('a')
fwb = fw.create_group('b')
fwat = fwa.create_group('train')
fwav = fwa.create_group('validation')
fwae = fwa.create_group('test')
fwbt = fwb.create_group('train')
fwbv = fwb.create_group('validation')
fwbe = fwb.create_group('test')
temp = []
count_t = 0
count_v = 0
count_e = 0
for i in range(3):
for k in range(f[f['labeled'][0][i]][0].size):
print(i, k)
if ([i, k] in val_index):
for j in range(5):
if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3):
temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0))
fwav.create_dataset(str(count_v), data=np.array(temp))
temp = []
for j in range(5, 10):
if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3):
temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0))
fwbv.create_dataset(str(count_v), data=np.array(temp))
temp = []
count_v += 1
if ([i, k] in tes_index):
for j in range(5):
if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3):
temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0))
fwae.create_dataset(str(count_e), data=np.array(temp))
temp = []
for j in range(5, 10):
if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3):
temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0))
fwbe.create_dataset(str(count_e), data=np.array(temp))
temp = []
count_e += 1
if (([i, k] not in val_index) and ([i, k] not in tes_index)):
for j in range(5):
if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3):
temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0))
fwat.create_dataset(str(count_t), data=np.array(temp))
temp = []
for j in range(5, 10):
if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3):
temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0))
fwbt.create_dataset(str(count_t), data=np.array(temp))
temp = []
count_t += 1
|
class DataGenerator(Dataset):
def __init__(self, root, data_transform=None, image_dir=None, target_transform=None):
super(DataGenerator, self).__init__()
assert (image_dir is not None)
self.image_dir = image_dir
self.samples = []
self.img_label = []
self.img_flag = []
self.data_transform = data_transform
self.target_transform = target_transform
self.train_val = root
if (root == 'train_new'):
for folder in os.listdir(self.image_dir):
fdir = ((self.image_dir + '/') + folder)
if (folder == 'gen_0000'):
for files in os.listdir(fdir):
temp = ((folder + '_') + files)
self.img_label.append(int(folder[(- 4):]))
self.img_flag.append(1)
self.samples.append(temp)
else:
for files in os.listdir(fdir):
temp = ((folder + '_') + files)
self.img_label.append(int(folder))
self.img_flag.append(0)
self.samples.append(temp)
else:
for folder in os.listdir(self.image_dir):
fdir = ((self.image_dir + '/') + folder)
for files in os.listdir(fdir):
temp = ((folder + '_') + files)
self.img_label.append(int(folder))
self.img_flag.append(0)
self.samples.append(temp)
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
temp = self.samples[idx]
if (self.img_flag[idx] == 1):
foldername = 'gen_0000'
filename = temp[9:]
else:
foldername = temp[:4]
filename = temp[5:]
img = default_loader(((((self.image_dir + '/') + foldername) + '/') + filename))
if (self.train_val == 'train_new'):
result = {'img': self.data_transform(img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]}
else:
result = {'img': self.data_transform(img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]}
return result
|
class Dataset():
def __init__(self, root='/home/paul/datasets', dataset='market1501'):
self.dataset = dataset
self.root = root
def train_path(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke')):
return os.path.join(self.root, self.dataset, 'bounding_box_train')
elif (self.dataset == 'cuhk03'):
return os.path.join(self.root, self.dataset, 'bounding_box_train')
elif (self.dataset == 'viper'):
return os.path.join(self.root, self.dataset, 'bounding_box_train')
else:
raise ValueError(('Unknown train set for %s' % self.dataset))
def test_path(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke')):
return os.path.join(self.root, self.dataset, 'bounding_box_test')
elif ((self.dataset == 'cuhk03') or (self.dataset == 'viper')):
return os.path.join(self.root, self.dataset, 'bounding_box_test')
else:
raise ValueError(('Unknown test set for %s' % self.dataset))
def gallery_path(self):
return self.testset()
def query_path(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke')):
return os.path.join(self.root, self.dataset, 'query')
elif ((self.dataset == 'cuhk03') or (self.dataset == 'viper')):
return os.path.join(self.root, self.dataset, 'query')
else:
raise ValueError(('Unknown query set for %s' % self.dataset))
def gan_path(self):
return os.path.join('/home/paul/generated', self.dataset)
def dataset_path(self):
return os.path.join(self.root, self.dataset)
def n_classe(self):
if (self.dataset == 'market1501'):
return 751
elif (self.dataset == 'duke'):
return 702
elif (self.dataset == 'cuhk03'):
return 767
elif (self.dataset == 'viper'):
return 316
else:
raise ValueError(('Unknown n_classe set for %s' % self.dataset))
def root_path(self):
return self.root
def gt_set(self):
if (self.dataset == 'market1501'):
return os.path.join(self.root, self.dataset, 'gt_bbox')
else:
raise ValueError(('Unknown hand-drawn bounding boxes for %s' % self.dataset))
def train_list(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke') or (self.dataset == 'cuhk03')):
train_list = os.path.join(self.root, self.dataset, 'train.list')
elif (self.dataset == 'viper'):
train_list = os.path.join(self.root, self.dataset, 'train.list')
else:
raise ValueError(('Unknown train bounding boxes for %s' % self.dataset))
if (not os.path.exists(train_list)):
raise FileNotFoundError(('%s not found' % train_list))
return train_list
def cluster_path(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke') or (self.dataset == 'cuhk03') or (self.dataset == 'viper')):
return os.path.join('/home/paul', 'clustering', self.dataset)
else:
raise ValueError(('Unknown cluster path for %s' % self.dataset))
def n_training_set(self):
if (self.dataset == 'market1501'):
data_list = glob.glob(os.path.join(self.train_path(), '*.jpg'))
n = len(data_list)
assert (n == 12936)
elif (self.dataset == 'duke'):
n = 16522
else:
raise ValueError(('Unknow training set size for %s' % self.dataset))
return n
def n_gan_set(self):
if (self.dataset == 'market1501'):
data_list = glob.glob(os.path.join(self.gan_path(), '*.jpg'))
n = len(data_list)
else:
raise ValueError(('Unknow generated set size for %s' % self.dataset))
return n
def test_num(self):
if (self.dataset == 'market1501'):
return 19732
elif (self.dataset == 'duke'):
return 17661
elif (self.dataset == 'cuhk03'):
return 6751
elif (self.dataset == 'viper'):
return 316
else:
raise ValueError(('Unknown test num for % dataset' % self.dataset))
def query_num(self):
if (self.dataset == 'market1501'):
return 3368
elif (self.dataset == 'duke'):
return 2228
elif (self.dataset == 'cuhk03'):
return 6751
elif (self.dataset == 'viper'):
return 316
else:
raise ValueError(('Unknown query num for % dataset' % self.dataset))
|
def read_image(img_path):
'Keep reading image until succeed.\n This can avoid IOError incurred by heavy IO process.'
got_img = False
if (not osp.exists(img_path)):
raise IOError('{} does not exist'.format(img_path))
while (not got_img):
try:
img = Image.open(img_path).convert('RGB')
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img
|
class ImageDataset(Dataset):
'Image Person ReID Dataset'
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
(img_path, pid, camid) = self.dataset[index]
img = read_image(img_path)
if (self.transform is not None):
img = self.transform(img)
return (img, pid, camid)
|
class VideoDataset(Dataset):
'Video Person ReID Dataset.\n Note batch data has shape (batch, seq_len, channel, height, width).\n '
sample_methods = ['evenly', 'random', 'all']
def __init__(self, dataset, seq_len=15, sample='evenly', transform=None):
self.dataset = dataset
self.seq_len = seq_len
self.sample = sample
self.transform = transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
(img_paths, pid, camid) = self.dataset[index]
num = len(img_paths)
if (self.sample == 'random'):
'\n Randomly sample seq_len items from num items,\n if num is smaller than seq_len, then replicate items\n '
indices = np.arange(num)
replace = (False if (num >= self.seq_len) else True)
indices = np.random.choice(indices, size=self.seq_len, replace=replace)
indices = np.sort(indices)
elif (self.sample == 'evenly'):
'Evenly sample seq_len items from num items.'
if (num >= self.seq_len):
num -= (num % self.seq_len)
indices = np.arange(0, num, (num / self.seq_len))
else:
indices = np.arange(0, num)
num_pads = (self.seq_len - num)
indices = np.concatenate([indices, (np.ones(num_pads).astype(np.int32) * (num - 1))])
assert (len(indices) == self.seq_len)
elif (self.sample == 'all'):
'\n Sample all items, seq_len is useless now and batch_size needs\n to be set to 1.\n '
indices = np.arange(num)
else:
raise KeyError('Unknown sample method: {}. Expected one of {}'.format(self.sample, self.sample_methods))
imgs = []
for index in indices:
img_path = img_paths[index]
img = read_image(img_path)
if (self.transform is not None):
img = self.transform(img)
img = img.unsqueeze(0)
imgs.append(img)
imgs = torch.cat(imgs, dim=0)
return (imgs, pid, camid)
|
def evaluate(qf, ql, qc, gf, gl, gc):
query = qf
score = np.dot(gf, query)
index = np.argsort(score)
index = index[::(- 1)]
query_index = np.argwhere((gl == ql))
camera_index = np.argwhere((gc == qc))
good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index1 = np.argwhere((gl == (- 1)))
junk_index2 = np.intersect1d(query_index, camera_index)
junk_index = np.append(junk_index2, junk_index1)
CMC_tmp = compute_mAP(index, good_index, junk_index)
return CMC_tmp
|
def compute_mAP(index, good_index, junk_index):
ap = 0
cmc = torch.IntTensor(len(index)).zero_()
if (good_index.size == 0):
cmc[0] = (- 1)
return (ap, cmc)
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
ngood = len(good_index)
mask = np.in1d(index, good_index)
rows_good = np.argwhere((mask == True))
rows_good = rows_good.flatten()
cmc[rows_good[0]:] = 1
for i in range(ngood):
d_recall = (1.0 / ngood)
precision = (((i + 1) * 1.0) / (rows_good[i] + 1))
if (rows_good[i] != 0):
old_precision = ((i * 1.0) / rows_good[i])
else:
old_precision = 1.0
ap = (ap + ((d_recall * (old_precision + precision)) / 2))
return (ap, cmc)
|
def weights_init_kaiming(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (classname.find('Linear') != (- 1)):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm1d') != (- 1)):
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
|
def weights_init_classifier(m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
init.normal_(m.weight.data, std=0.001)
init.constant_(m.bias.data, 0.0)
|
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, dropout=True, relu=True, num_bottleneck=512):
super(ClassBlock, self).__init__()
add_block = []
add_block += [nn.Linear(input_dim, num_bottleneck)]
add_block += [nn.BatchNorm1d(num_bottleneck)]
if relu:
add_block += [nn.LeakyReLU(0.1)]
if dropout:
add_block += [nn.Dropout(p=0.5)]
add_block = nn.Sequential(*add_block)
add_block.apply(weights_init_kaiming)
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.add_block = add_block
self.classifier = classifier
def forward(self, x):
x = self.add_block(x)
x = self.classifier(x)
return x
|
class ft_net(nn.Module):
def __init__(self, class_num):
super(ft_net, self).__init__()
model_ft = models.resnet50(pretrained=True)
model_ft.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.model = model_ft
self.classifier = ClassBlock(2048, class_num)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.squeeze(x)
x = self.classifier(x)
return x
|
class ft_net_dense(nn.Module):
def __init__(self, class_num):
super().__init__()
model_ft = models.densenet121(pretrained=True)
model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model_ft.fc = nn.Sequential()
self.model = model_ft
self.classifier = ClassBlock(1024, class_num)
def forward(self, x):
x = self.model.features(x)
x = torch.squeeze(x)
x = self.classifier(x)
return x
|
class ft_net_middle(nn.Module):
def __init__(self, class_num):
super(ft_net_middle, self).__init__()
model_ft = models.resnet50(pretrained=True)
model_ft.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.model = model_ft
self.classifier = ClassBlock((2048 + 1024), class_num)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x0 = self.model.avgpool(x)
x = self.model.layer4(x)
x1 = self.model.avgpool(x)
x = torch.cat((x0, x1), 1)
x = torch.squeeze(x)
x = self.classifier(x)
return x
|
def generate_labels_for_gan():
image_labels = {}
f = open('/home/paul/datasets/viper/train.list', 'r')
old_lbl = (- 1)
for line in f:
line = line.strip()
(img, lbl) = line.split()
lbl = int(lbl)
if (lbl != old_lbl):
splt = img.split('_')
image_labels[splt[0]] = int(lbl)
old_lbl = lbl
f.close()
for n_cluster in range(N_CLUSTER):
cluster_path = os.path.join(dataset.cluster_path(), ('cluster_%s' % n_cluster))
cluster_labels = {}
cluster_imgs = glob.glob(os.path.join(cluster_path, '*.jpg'))
cluster_imgs = sorted(cluster_imgs)
for img in cluster_imgs:
img = os.path.basename(img)
splt = img.split('_')
try:
cluster_labels[splt[0]] += 1
except KeyError:
cluster_labels[splt[0]] = 1
f = open(os.path.join(dataset.cluster_path(), ('gan%s.list' % n_cluster)), 'w')
for i in cluster_labels:
print(i)
f.write(('%s\n' % image_labels[i]))
f.close()
print(image_labels[i])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.