|
|
|
|
|
import json |
|
|
import time |
|
|
import pickle |
|
|
import scipy.misc |
|
|
import skimage.io |
|
|
import caffe |
|
|
|
|
|
import numpy as np |
|
|
import os.path as osp |
|
|
|
|
|
from xml.dom import minidom |
|
|
from random import shuffle |
|
|
from threading import Thread |
|
|
from PIL import Image |
|
|
|
|
|
from tools import SimpleTransformer |
|
|
|
|
|
|
|
|
class PascalMultilabelDataLayerSync(caffe.Layer): |
|
|
|
|
|
""" |
|
|
This is a simple synchronous datalayer for training a multilabel model on |
|
|
PASCAL. |
|
|
""" |
|
|
|
|
|
def setup(self, bottom, top): |
|
|
|
|
|
self.top_names = ['data', 'label'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
params = eval(self.param_str) |
|
|
|
|
|
|
|
|
check_params(params) |
|
|
|
|
|
|
|
|
self.batch_size = params['batch_size'] |
|
|
|
|
|
|
|
|
self.batch_loader = BatchLoader(params, None) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
top[0].reshape( |
|
|
self.batch_size, 3, params['im_shape'][0], params['im_shape'][1]) |
|
|
|
|
|
top[1].reshape(self.batch_size, 20) |
|
|
|
|
|
print_info("PascalMultilabelDataLayerSync", params) |
|
|
|
|
|
def forward(self, bottom, top): |
|
|
""" |
|
|
Load data. |
|
|
""" |
|
|
for itt in range(self.batch_size): |
|
|
|
|
|
im, multilabel = self.batch_loader.load_next_image() |
|
|
|
|
|
|
|
|
top[0].data[itt, ...] = im |
|
|
top[1].data[itt, ...] = multilabel |
|
|
|
|
|
def reshape(self, bottom, top): |
|
|
""" |
|
|
There is no need to reshape the data, since the input is of fixed size |
|
|
(rows and columns) |
|
|
""" |
|
|
pass |
|
|
|
|
|
def backward(self, top, propagate_down, bottom): |
|
|
""" |
|
|
These layers does not back propagate |
|
|
""" |
|
|
pass |
|
|
|
|
|
|
|
|
class BatchLoader(object): |
|
|
|
|
|
""" |
|
|
This class abstracts away the loading of images. |
|
|
Images can either be loaded singly, or in a batch. The latter is used for |
|
|
the asyncronous data layer to preload batches while other processing is |
|
|
performed. |
|
|
""" |
|
|
|
|
|
def __init__(self, params, result): |
|
|
self.result = result |
|
|
self.batch_size = params['batch_size'] |
|
|
self.pascal_root = params['pascal_root'] |
|
|
self.im_shape = params['im_shape'] |
|
|
|
|
|
list_file = params['split'] + '.txt' |
|
|
self.indexlist = [line.rstrip('\n') for line in open( |
|
|
osp.join(self.pascal_root, 'ImageSets/Main', list_file))] |
|
|
self._cur = 0 |
|
|
|
|
|
self.transformer = SimpleTransformer() |
|
|
|
|
|
print "BatchLoader initialized with {} images".format( |
|
|
len(self.indexlist)) |
|
|
|
|
|
def load_next_image(self): |
|
|
""" |
|
|
Load the next image in a batch. |
|
|
""" |
|
|
|
|
|
if self._cur == len(self.indexlist): |
|
|
self._cur = 0 |
|
|
shuffle(self.indexlist) |
|
|
|
|
|
|
|
|
index = self.indexlist[self._cur] |
|
|
image_file_name = index + '.jpg' |
|
|
im = np.asarray(Image.open( |
|
|
osp.join(self.pascal_root, 'JPEGImages', image_file_name))) |
|
|
im = scipy.misc.imresize(im, self.im_shape) |
|
|
|
|
|
|
|
|
flip = np.random.choice(2)*2-1 |
|
|
im = im[:, ::flip, :] |
|
|
|
|
|
|
|
|
multilabel = np.zeros(20).astype(np.float32) |
|
|
anns = load_pascal_annotation(index, self.pascal_root) |
|
|
for label in anns['gt_classes']: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
multilabel[label - 1] = 1 |
|
|
|
|
|
self._cur += 1 |
|
|
return self.transformer.preprocess(im), multilabel |
|
|
|
|
|
|
|
|
def load_pascal_annotation(index, pascal_root): |
|
|
""" |
|
|
This code is borrowed from Ross Girshick's FAST-RCNN code |
|
|
(https://github.com/rbgirshick/fast-rcnn). |
|
|
It parses the PASCAL .xml metadata files. |
|
|
See publication for further details: (http://arxiv.org/abs/1504.08083). |
|
|
|
|
|
Thanks Ross! |
|
|
|
|
|
""" |
|
|
classes = ('__background__', |
|
|
'aeroplane', 'bicycle', 'bird', 'boat', |
|
|
'bottle', 'bus', 'car', 'cat', 'chair', |
|
|
'cow', 'diningtable', 'dog', 'horse', |
|
|
'motorbike', 'person', 'pottedplant', |
|
|
'sheep', 'sofa', 'train', 'tvmonitor') |
|
|
class_to_ind = dict(zip(classes, xrange(21))) |
|
|
|
|
|
filename = osp.join(pascal_root, 'Annotations', index + '.xml') |
|
|
|
|
|
|
|
|
def get_data_from_tag(node, tag): |
|
|
return node.getElementsByTagName(tag)[0].childNodes[0].data |
|
|
|
|
|
with open(filename) as f: |
|
|
data = minidom.parseString(f.read()) |
|
|
|
|
|
objs = data.getElementsByTagName('object') |
|
|
num_objs = len(objs) |
|
|
|
|
|
boxes = np.zeros((num_objs, 4), dtype=np.uint16) |
|
|
gt_classes = np.zeros((num_objs), dtype=np.int32) |
|
|
overlaps = np.zeros((num_objs, 21), dtype=np.float32) |
|
|
|
|
|
|
|
|
for ix, obj in enumerate(objs): |
|
|
|
|
|
x1 = float(get_data_from_tag(obj, 'xmin')) - 1 |
|
|
y1 = float(get_data_from_tag(obj, 'ymin')) - 1 |
|
|
x2 = float(get_data_from_tag(obj, 'xmax')) - 1 |
|
|
y2 = float(get_data_from_tag(obj, 'ymax')) - 1 |
|
|
cls = class_to_ind[ |
|
|
str(get_data_from_tag(obj, "name")).lower().strip()] |
|
|
boxes[ix, :] = [x1, y1, x2, y2] |
|
|
gt_classes[ix] = cls |
|
|
overlaps[ix, cls] = 1.0 |
|
|
|
|
|
overlaps = scipy.sparse.csr_matrix(overlaps) |
|
|
|
|
|
return {'boxes': boxes, |
|
|
'gt_classes': gt_classes, |
|
|
'gt_overlaps': overlaps, |
|
|
'flipped': False, |
|
|
'index': index} |
|
|
|
|
|
|
|
|
def check_params(params): |
|
|
""" |
|
|
A utility function to check the parameters for the data layers. |
|
|
""" |
|
|
assert 'split' in params.keys( |
|
|
), 'Params must include split (train, val, or test).' |
|
|
|
|
|
required = ['batch_size', 'pascal_root', 'im_shape'] |
|
|
for r in required: |
|
|
assert r in params.keys(), 'Params must include {}'.format(r) |
|
|
|
|
|
|
|
|
def print_info(name, params): |
|
|
""" |
|
|
Output some info regarding the class |
|
|
""" |
|
|
print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format( |
|
|
name, |
|
|
params['split'], |
|
|
params['batch_size'], |
|
|
params['im_shape']) |
|
|
|