code
stringlengths
17
6.64M
def _add_loss_summaries(total_loss): 'Add summaries for losses in CIFAR-10 model.\n\n Generates moving average for all losses and associated summaries for\n visualizing the performance of the network.\n\n Args:\n total_loss: Total loss from loss().\n Returns:\n loss_averages_op: op for generating moving averages of losses.\n ' loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply((losses + [total_loss])) for l in (losses + [total_loss]): tf.summary.scalar((l.op.name + ' (raw)'), l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op
def train(total_loss, global_step): 'Train CIFAR-10 model.\n\n Create an optimizer and apply to all trainable variables. Add moving\n average for all trainable variables.\n\n Args:\n total_loss: Total loss from loss().\n global_step: Integer Variable counting the number of training steps\n processed.\n Returns:\n train_op: op for training.\n ' num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size) decay_steps = int((num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)) lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.summary.scalar('learning_rate', lr) loss_averages_op = _add_loss_summaries(total_loss) with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) for (grad, var) in grads: if (grad is not None): tf.summary.histogram((var.op.name + '/gradients'), grad) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
def maybe_download_and_extract(): "Download and extract the tarball from Alex's website." dest_directory = FLAGS.data_dir if (not os.path.exists(dest_directory)): os.makedirs(dest_directory) filename = DATA_URL.split('/')[(- 1)] filepath = os.path.join(dest_directory, filename) if (not os.path.exists(filepath)): def _progress(count, block_size, total_size): sys.stdout.write(('\r>> Downloading %s %.1f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0)))) sys.stdout.flush() (filepath, _) = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin') if (not os.path.exists(extracted_dir_path)): tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def read_cifar10(filename_queue): 'Reads and parses examples from CIFAR10 data files.\n\n Recommendation: if you want N-way read parallelism, call this function\n N times. This will give you N independent Readers reading different\n files & positions within those files, which will give better mixing of\n examples.\n\n Args:\n filename_queue: A queue of strings with the filenames to read from.\n\n Returns:\n An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number\n for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n ' class CIFAR10Record(object): pass result = CIFAR10Record() label_bytes = 1 result.height = 32 result.width = 32 result.depth = 3 image_bytes = ((result.height * result.width) * result.depth) record_bytes = (label_bytes + image_bytes) reader = tf.FixedLengthRecordReader(record_bytes=record_bytes) (result.key, value) = reader.read(filename_queue) record_bytes = tf.decode_raw(value, tf.uint8) result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32) depth_major = tf.reshape(tf.strided_slice(record_bytes, [label_bytes], [(label_bytes + image_bytes)]), [result.depth, result.height, result.width]) result.uint8image = tf.transpose(depth_major, [1, 2, 0]) return result
def _generate_image_and_label_batch(image, label, min_queue_examples, batch_size, shuffle): 'Construct a queued batch of images and labels.\n\n Args:\n image: 3-D Tensor of [height, width, 3] of type.float32.\n label: 1-D Tensor of type.int32\n min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n batch_size: Number of images per batch.\n shuffle: boolean indicating whether to use a shuffling queue.\n\n Returns:\n images: Images. 4D tensor of [batch_size, height, width, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n ' num_preprocess_threads = 16 if shuffle: (images, label_batch) = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size)), min_after_dequeue=min_queue_examples) else: (images, label_batch) = tf.train.batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size))) tf.summary.image('images', images) return (images, tf.reshape(label_batch, [batch_size]))
def distorted_inputs(data_dir, batch_size): 'Construct distorted input for CIFAR training using the Reader ops.\n\n Args:\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n ' filenames = [os.path.join(data_dir, ('data_batch_%d.bin' % i)) for i in xrange(1, 6)] for f in filenames: if (not tf.gfile.Exists(f)): raise ValueError(('Failed to find file: ' + f)) filename_queue = tf.train.string_input_producer(filenames) read_input = read_cifar10(filename_queue) reshaped_image = tf.cast(read_input.uint8image, tf.float32) height = IMAGE_SIZE width = IMAGE_SIZE distorted_image = tf.random_crop(reshaped_image, [height, width, 3]) distorted_image = tf.image.random_flip_left_right(distorted_image) distorted_image = tf.image.random_brightness(distorted_image, max_delta=63) distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8) float_image = tf.image.per_image_standardization(distorted_image) float_image.set_shape([height, width, 3]) read_input.label.set_shape([1]) min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int((NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)) print(('Filling queue with %d CIFAR images before starting to train. This will take a few minutes.' % min_queue_examples)) return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=True)
def inputs(eval_data, data_dir, batch_size): 'Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n ' if (not eval_data): filenames = [os.path.join(data_dir, ('data_batch_%d.bin' % i)) for i in xrange(1, 6)] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN else: filenames = [os.path.join(data_dir, 'test_batch.bin')] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL for f in filenames: if (not tf.gfile.Exists(f)): raise ValueError(('Failed to find file: ' + f)) filename_queue = tf.train.string_input_producer(filenames) read_input = read_cifar10(filename_queue) reshaped_image = tf.cast(read_input.uint8image, tf.float32) height = IMAGE_SIZE width = IMAGE_SIZE resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, height, width) float_image = tf.image.per_image_standardization(resized_image) float_image.set_shape([height, width, 3]) read_input.label.set_shape([1]) min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int((num_examples_per_epoch * min_fraction_of_examples_in_queue)) return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=False)
def conv_variable(weight_shape): w = weight_shape[0] h = weight_shape[1] input_channels = weight_shape[2] output_channels = weight_shape[3] d = (1.0 / np.sqrt(((input_channels * w) * h))) bias_shape = [output_channels] weight = tf.Variable(tf.random_uniform(weight_shape, minval=(- d), maxval=d)) bias = tf.Variable(tf.random_uniform(bias_shape, minval=(- d), maxval=d)) return (weight, bias)
def conv2d(x, W, stride): return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
def maxpool2d(x, k=2): return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
def IPE_r1(F1F2, F2F3, F3F4, F4F5, F5F6, x_cor, y_cor, FLAGS): img_pair = tf.concat([F1F2, F2F3, F3F4, F4F5, F5F6], 0) (w_1_1, b_1_1) = conv_variable([10, 10, (FLAGS.col_dim * 2), 4]) h_1_1 = tf.nn.relu((conv2d(img_pair, w_1_1, 1) + b_1_1)) (w_1_2, b_1_2) = conv_variable([10, 10, 4, 4]) h_1_2 = tf.nn.relu((conv2d(h_1_1, w_1_2, 1) + b_1_2)) (w_2_1, b_2_1) = conv_variable([3, 3, (FLAGS.col_dim * 2), 16]) h_2_1 = tf.nn.relu((conv2d(img_pair, w_2_1, 1) + b_2_1)) (w_2_2, b_2_2) = conv_variable([3, 3, 16, 16]) h_2_2 = tf.nn.relu((conv2d(h_2_1, w_2_2, 1) + b_2_2)) en_pair = tf.concat([h_1_2, h_2_2], 3) (w_3_1, b_3_1) = conv_variable([3, 3, 20, 16]) h_3_1 = tf.nn.relu((conv2d(en_pair, w_3_1, 1) + b_3_1)) (w_3_2, b_3_2) = conv_variable([3, 3, 16, 16]) h_3_2 = tf.nn.relu((conv2d(h_3_1, w_3_2, 1) + b_3_2)) h_3_2_x_y = tf.concat([h_3_2, x_cor, y_cor], 3) fil_num = 16 (w_4_1, b_4_1) = conv_variable([3, 3, 18, fil_num]) h_4_1 = tf.nn.relu((conv2d(h_3_2_x_y, w_4_1, 1) + b_4_1)) h_4_1 = maxpool2d(h_4_1) (w_4_2, b_4_2) = conv_variable([3, 3, fil_num, fil_num]) h_4_2 = tf.nn.relu((conv2d(h_4_1, w_4_2, 1) + b_4_2)) h_4_2 = maxpool2d(h_4_2) (w_4_3, b_4_3) = conv_variable([3, 3, fil_num, fil_num]) h_4_3 = tf.nn.relu((conv2d(h_4_2, w_4_3, 1) + b_4_3)) h_4_3 = maxpool2d(h_4_3) (w_4_4, b_4_4) = conv_variable([3, 3, fil_num, (fil_num * 2)]) h_4_4 = tf.nn.relu((conv2d(h_4_3, w_4_4, 1) + b_4_4)) fil_num = 32 h_4_4 = maxpool2d(h_4_4) (w_4_5, b_4_5) = conv_variable([3, 3, fil_num, fil_num]) h_4_5 = tf.nn.relu((conv2d(h_4_4, w_4_5, 1) + b_4_5)) h_4_5 = maxpool2d(h_4_5) res_pair = tf.reshape(h_4_5, [(- 1), fil_num]) pair1 = tf.slice(res_pair, [0, 0], [FLAGS.batch_num, (- 1)]) pair2 = tf.slice(res_pair, [FLAGS.batch_num, 0], [FLAGS.batch_num, (- 1)]) pair3 = tf.slice(res_pair, [(FLAGS.batch_num * 2), 0], [FLAGS.batch_num, (- 1)]) pair4 = tf.slice(res_pair, [(FLAGS.batch_num * 3), 0], [FLAGS.batch_num, (- 1)]) pair5 = tf.slice(res_pair, [(FLAGS.batch_num * 4), 0], [FLAGS.batch_num, (- 1)]) return (pair1, pair2, pair3, pair4, pair5)
def IPE_r2(F1F2, F2F3, F3F4, F4F5, F5F6, x_cor, y_cor, FLAGS): fil_num = FLAGS.fil_num img_pair = tf.concat([F1F2, F2F3, F3F4, F4F5, F5F6], 0) h_3_2_x_y = tf.concat([img_pair, x_cor, y_cor], 3) (w_4_1, b_4_1) = conv_variable([3, 3, 10, fil_num]) h_4_1 = tf.nn.relu((conv2d(h_3_2_x_y, w_4_1, 1) + b_4_1)) h_4_1 = maxpool2d(h_4_1) (w_4_2, b_4_2) = conv_variable([3, 3, fil_num, fil_num]) h_4_2 = tf.nn.relu(((conv2d(h_4_1, w_4_2, 1) + b_4_2) + h_4_1)) h_4_2 = maxpool2d(h_4_2) (w_4_3, b_4_3) = conv_variable([3, 3, fil_num, fil_num]) h_4_3 = tf.nn.relu(((conv2d(h_4_2, w_4_3, 1) + b_4_3) + h_4_2)) h_4_3 = maxpool2d(h_4_3) (w_4_4, b_4_4) = conv_variable([3, 3, fil_num, fil_num]) h_4_4 = tf.nn.relu(((conv2d(h_4_3, w_4_4, 1) + b_4_4) + h_4_3)) h_4_4 = maxpool2d(h_4_4) (w_4_5, b_4_5) = conv_variable([3, 3, fil_num, fil_num]) h_4_5 = tf.nn.relu(((conv2d(h_4_4, w_4_5, 1) + b_4_5) + h_4_4)) h_4_5 = maxpool2d(h_4_5) res_pair = tf.reshape(h_4_5, [(- 1), fil_num]) pair1 = tf.slice(res_pair, [0, 0], [FLAGS.batch_num, (- 1)]) pair2 = tf.slice(res_pair, [FLAGS.batch_num, 0], [FLAGS.batch_num, (- 1)]) pair3 = tf.slice(res_pair, [(FLAGS.batch_num * 2), 0], [FLAGS.batch_num, (- 1)]) pair4 = tf.slice(res_pair, [(FLAGS.batch_num * 3), 0], [FLAGS.batch_num, (- 1)]) pair5 = tf.slice(res_pair, [(FLAGS.batch_num * 4), 0], [FLAGS.batch_num, (- 1)]) return (pair1, pair2, pair3, pair4, pair5)
def VE(F1, F2, F3, F4, F5, F6, x_cor, y_cor, FLAGS): F1F2 = tf.concat([F1, F2], 3) F2F3 = tf.concat([F2, F3], 3) F3F4 = tf.concat([F3, F4], 3) F4F5 = tf.concat([F4, F5], 3) F5F6 = tf.concat([F5, F6], 3) (pair1, pair2, pair3, pair4, pair5) = IPE_r2(F1F2, F2F3, F3F4, F4F5, F5F6, x_cor, y_cor, FLAGS) concated_pair = tf.concat([pair1, pair2, pair3, pair4, pair5], 0) fil_num = FLAGS.fil_num w0 = tf.Variable(tf.truncated_normal([fil_num, (FLAGS.No * FLAGS.Ds)], stddev=0.1), dtype=tf.float32) b0 = tf.Variable(tf.zeros([(FLAGS.No * FLAGS.Ds)]), dtype=tf.float32) h0 = (tf.matmul(concated_pair, w0) + b0) enpair1 = tf.reshape(tf.slice(h0, [0, 0], [FLAGS.batch_num, (- 1)]), [(- 1), FLAGS.No, FLAGS.Ds]) enpair2 = tf.reshape(tf.slice(h0, [FLAGS.batch_num, 0], [FLAGS.batch_num, (- 1)]), [(- 1), FLAGS.No, FLAGS.Ds]) enpair3 = tf.reshape(tf.slice(h0, [(FLAGS.batch_num * 2), 0], [FLAGS.batch_num, (- 1)]), [(- 1), FLAGS.No, FLAGS.Ds]) enpair4 = tf.reshape(tf.slice(h0, [(FLAGS.batch_num * 3), 0], [FLAGS.batch_num, (- 1)]), [(- 1), FLAGS.No, FLAGS.Ds]) enpair5 = tf.reshape(tf.slice(h0, [(FLAGS.batch_num * 4), 0], [FLAGS.batch_num, (- 1)]), [(- 1), FLAGS.No, FLAGS.Ds]) three1 = tf.concat([enpair1, enpair2], 2) three2 = tf.concat([enpair2, enpair3], 2) three3 = tf.concat([enpair3, enpair4], 2) three4 = tf.concat([enpair4, enpair5], 2) three = tf.concat([three1, three2, three3, three4], 0) three = tf.reshape(three, [(- 1), (FLAGS.Ds * 2)]) w1 = tf.Variable(tf.truncated_normal([(FLAGS.Ds * 2), 64], stddev=0.1), dtype=tf.float32) b1 = tf.Variable(tf.zeros([64]), dtype=tf.float32) h1 = tf.nn.relu((tf.matmul(three, w1) + b1)) w2 = tf.Variable(tf.truncated_normal([64, 64], stddev=0.1), dtype=tf.float32) b2 = tf.Variable(tf.zeros([64]), dtype=tf.float32) h2 = tf.nn.relu(((tf.matmul(h1, w2) + b2) + h1)) w3 = tf.Variable(tf.truncated_normal([64, FLAGS.Ds], stddev=0.1), dtype=tf.float32) b3 = tf.Variable(tf.zeros([FLAGS.Ds]), dtype=tf.float32) h3 = ((tf.matmul(h2, w3) + b3) + h2) h3 = tf.reshape(h3, [(- 1), FLAGS.No, FLAGS.Ds]) S1 = tf.slice(h3, [0, 0, 0], [FLAGS.batch_num, (- 1), (- 1)]) S2 = tf.slice(h3, [FLAGS.batch_num, 0, 0], [FLAGS.batch_num, (- 1), (- 1)]) S3 = tf.slice(h3, [(FLAGS.batch_num * 2), 0, 0], [FLAGS.batch_num, (- 1), (- 1)]) S4 = tf.slice(h3, [(FLAGS.batch_num * 3), 0, 0], [FLAGS.batch_num, (- 1), (- 1)]) return (S1, S2, S3, S4)
def core_r1(S, FLAGS, idx): fil_num = 64 M = tf.unstack(S, FLAGS.No, 1) SD_in = tf.reshape(S, [(- 1), FLAGS.Ds]) with tf.variable_scope(('self-dynamics' + str(idx))): w1 = tf.get_variable('w1', shape=[FLAGS.Ds, fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu((tf.matmul(SD_in, w1) + b1)) w2 = tf.get_variable('w2', shape=[fil_num, fil_num]) b2 = tf.get_variable('b2', shape=[fil_num]) h2 = ((tf.matmul(h1, w2) + b2) + h1) M_self = tf.reshape(h2, [(- 1), FLAGS.No, fil_num]) rel_num = int((FLAGS.No * (FLAGS.No - 1))) rel_in = np.zeros(rel_num, dtype=object) for i in range(rel_num): row_idx = int((i / (FLAGS.No - 1))) col_idx = int((i % (FLAGS.No - 1))) rel_in[i] = tf.concat([M[row_idx], M[col_idx]], 1) rel_in = tf.concat(list(rel_in), 0) with tf.variable_scope(('Relation' + str(idx))): w1 = tf.get_variable('w1', shape=[(FLAGS.Ds * 2), fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu((tf.matmul(rel_in, w1) + b1)) w2 = tf.get_variable('w2', shape=[fil_num, fil_num]) b2 = tf.get_variable('b2', shape=[fil_num]) h2 = tf.nn.relu(((tf.matmul(h1, w2) + b2) + h1)) w3 = tf.get_variable('w3', shape=[fil_num, fil_num]) b3 = tf.get_variable('b3', shape=[fil_num]) h3 = ((tf.matmul(h2, w3) + b3) + h2) M_rel = np.zeros(rel_num, dtype=object) for i in range(rel_num): M_rel[i] = tf.slice(h3, [(FLAGS.batch_num * i), 0], [FLAGS.batch_num, (- 1)]) M_rel2 = np.zeros(FLAGS.No, dtype=object) for i in range(FLAGS.No): for j in range((FLAGS.No - 1)): M_rel2[i] += M_rel[((i * (FLAGS.No - 1)) + j)] M_rel2 = tf.stack(list(M_rel2), 1) M_update = (M_self + M_rel2) aff_in = tf.reshape(M_update, [(- 1), fil_num]) with tf.variable_scope(('Affector' + str(idx))): w1 = tf.get_variable('w1', shape=[fil_num, fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu(((tf.matmul(aff_in, w1) + b1) + aff_in)) w2 = tf.get_variable('w2', shape=[fil_num, fil_num]) b2 = tf.get_variable('b2', shape=[fil_num]) h2 = tf.nn.relu(((tf.matmul(h1, w2) + b2) + h1)) w3 = tf.get_variable('w3', shape=[fil_num, fil_num]) b3 = tf.get_variable('b3', shape=[fil_num]) h3 = ((tf.matmul(h2, w3) + b3) + h2) M_affect = tf.reshape(h3, [(- 1), FLAGS.No, fil_num]) M_i_M_affect = tf.concat([S, M_affect], 2) out_in = tf.reshape(M_i_M_affect, [(- 1), (FLAGS.Ds + fil_num)]) with tf.variable_scope(('Output' + str(idx))): w1 = tf.get_variable('w1', shape=[(FLAGS.Ds + fil_num), fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu((tf.matmul(out_in, w1) + b1)) w2 = tf.get_variable('w2', shape=[fil_num, FLAGS.Ds]) b2 = tf.get_variable('b2', shape=[FLAGS.Ds]) h2 = (tf.matmul(h1, w2) + b2) h2_out = tf.reshape(h2, [(- 1), FLAGS.No, FLAGS.Ds]) return h2_out
def core_r2(S, FLAGS, idx): fil_num = 64 M = tf.unstack(S, FLAGS.No, 1) M_self = np.zeros(FLAGS.No, dtype=object) for i in range(FLAGS.No): with tf.variable_scope(((('self-dynamics' + str(idx)) + '_') + str((i + 1)))): w1 = tf.get_variable('w1', shape=[FLAGS.Ds, fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu((tf.matmul(M[i], w1) + b1)) w2 = tf.get_variable('w2', shape=[fil_num, fil_num]) b2 = tf.get_variable('b2', shape=[fil_num]) h2 = (tf.matmul(h1, w2) + b2) M_self[i] = h2 rel_in = [] for row_idx in range(FLAGS.No): for col_idx in range(FLAGS.No): if (row_idx != col_idx): rel_in += [tf.concat([M[row_idx], M[col_idx]], 1)] rel_out = [] for i in range(FLAGS.No): rel_in_part = tf.concat(rel_in[(i * (FLAGS.No - 1)):((i + 1) * (FLAGS.No - 1))], 0) with tf.variable_scope(((('Relation' + str(idx)) + '_') + str((i + 1)))): w1 = tf.get_variable('w1', shape=[(FLAGS.Ds * 2), fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu((tf.matmul(rel_in_part, w1) + b1)) w2 = tf.get_variable('w2', shape=[fil_num, fil_num]) b2 = tf.get_variable('b2', shape=[fil_num]) h2 = tf.nn.relu((tf.matmul(h1, w2) + b2)) w3 = tf.get_variable('w3', shape=[fil_num, fil_num]) b3 = tf.get_variable('b3', shape=[fil_num]) h3 = (tf.matmul(h2, w3) + b3) for j in range((FLAGS.No - 1)): rel_out += [tf.slice(h3, [(FLAGS.batch_num * j), 0], [FLAGS.batch_num, (- 1)])] M_rel2 = np.zeros(FLAGS.No, dtype=object) for i in range(FLAGS.No): for j in range((FLAGS.No - 1)): M_rel2[i] += rel_out[((i * (FLAGS.No - 1)) + j)] M_update = np.zeros(FLAGS.No, dtype=object) for i in range(FLAGS.No): M_update[i] = (M_self[i] + M_rel2[i]) M_affect = np.zeros(FLAGS.No, dtype=object) for i in range(FLAGS.No): with tf.variable_scope(((('Affector' + str(idx)) + '_') + str((i + 1)))): w1 = tf.get_variable('w1', shape=[fil_num, fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu((tf.matmul(M_update[i], w1) + b1)) w2 = tf.get_variable('w2', shape=[fil_num, fil_num]) b2 = tf.get_variable('b2', shape=[fil_num]) h2 = tf.nn.relu((tf.matmul(h1, w2) + b2)) w3 = tf.get_variable('w3', shape=[fil_num, fil_num]) b3 = tf.get_variable('b3', shape=[fil_num]) h3 = (tf.matmul(h2, w3) + b3) M_affect[i] = h3 M_affect = tf.stack(list(M_affect), 1) M_i_M_affect = tf.concat([S, M_affect], 2) out_in = tf.reshape(M_i_M_affect, [(- 1), (FLAGS.Ds + fil_num)]) with tf.variable_scope(('Output' + str(idx))): w1 = tf.get_variable('w1', shape=[(FLAGS.Ds + fil_num), fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu((tf.matmul(out_in, w1) + b1)) w2 = tf.get_variable('w2', shape=[fil_num, FLAGS.Ds]) b2 = tf.get_variable('b2', shape=[FLAGS.Ds]) h2 = (tf.matmul(h1, w2) + b2) h2_out = tf.reshape(h2, [(- 1), FLAGS.No, FLAGS.Ds]) return h2_out
def DP(S1, S2, S3, S4, FLAGS): Sc1 = core_r1(S1, FLAGS, 4) Sc3 = core_r1(S3, FLAGS, 2) Sc4 = core_r1(S4, FLAGS, 1) fil_num = 64 S = tf.concat([Sc1, Sc3, Sc4], 2) S = tf.reshape(S, [(- 1), (FLAGS.Ds * 3)]) with tf.variable_scope('DP'): w1 = tf.get_variable('w1', shape=[(FLAGS.Ds * 3), fil_num]) b1 = tf.get_variable('b1', shape=[fil_num]) h1 = tf.nn.relu((tf.matmul(S, w1) + b1)) w2 = tf.get_variable('w2', shape=[fil_num, FLAGS.Ds]) b2 = tf.get_variable('b2', shape=[FLAGS.Ds]) h2 = (tf.matmul(h1, w2) + b2) h2 = tf.reshape(h2, [(- 1), FLAGS.No, FLAGS.Ds]) return h2
def SD(output_dp, FLAGS): input_sd = tf.reshape(output_dp, [(- 1), FLAGS.Ds]) w1 = tf.Variable(tf.truncated_normal([FLAGS.Ds, 4], stddev=0.1), dtype=tf.float32) b1 = tf.Variable(tf.zeros([4]), dtype=tf.float32) h1 = (tf.matmul(input_sd, w1) + b1) h1 = tf.reshape(h1, [(- 1), FLAGS.No, 4]) return h1
def add_path(path): if (path not in sys.path): sys.path.insert(0, path)
def update_config(config_file): exp_config = None with open(config_file) as f: exp_config = edict(yaml.load(f, Loader=yaml.SafeLoader)) for (k, v) in exp_config.items(): if (k in config): if isinstance(v, dict): if (k == 'TRAIN'): if ('BBOX_WEIGHTS' in v): v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS']) elif (k == 'network'): if ('PIXEL_MEANS' in v): v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS']) for (vk, vv) in v.items(): config[k][vk] = vv elif (k == 'SCALES'): config[k][0] = tuple(v) else: config[k] = v else: raise ValueError('key must exist in config.py')
def _load_general(data, targets, major_axis): 'Load a list of arrays into a list of arrays specified by slices' for (d_src, d_targets) in zip(data, targets): if isinstance(d_targets, nd.NDArray): d_src.copyto(d_targets) elif isinstance(d_src, (list, tuple)): for (src, dst) in zip(d_src, d_targets): src.copyto(dst) else: raise NotImplementedError
def _load_data(batch, targets, major_axis): 'Load data into sliced arrays' _load_general(batch.data, targets, major_axis)
def _load_label(batch, targets, major_axis): 'Load label into sliced arrays' _load_general(batch.label, targets, major_axis)
def _merge_multi_context(outputs, major_axis): 'Merge outputs that lives on multiple context into one, so that they look\n like living on one context.\n ' rets = [] for (tensors, axis) in zip(outputs, major_axis): if (axis >= 0): rets.append(nd.concatenate(tensors, axis=axis, always_copy=False)) else: rets.append(tensors[0]) return rets
class DataParallelExecutorGroup(object): "DataParallelExecutorGroup is a group of executors that lives on a group of devices.\n This is a helper class used to implement data parallelization. Each mini-batch will\n be split and run on the devices.\n\n Parameters\n ----------\n symbol : Symbol\n The common symbolic computation graph for all executors.\n contexts : list\n A list of contexts.\n workload : list\n If not `None`, could be a list of numbers that specify the workload to be assigned\n to different context. Larger number indicate heavier workload.\n data_shapes : list\n Should be a list of (name, shape) tuples, for the shapes of data. Note the order is\n important and should be the same as the order that the `DataIter` provide the data.\n label_shapes : list\n Should be a list of (name, shape) tuples, for the shapes of label. Note the order is\n important and should be the same as the order that the `DataIter` provide the label.\n param_names : list\n A list of strings, indicating the names of parameters (e.g. weights, filters, etc.)\n in the computation graph.\n for_training : bool\n Indicate whether the executors should be bind for training. When not doing training,\n the memory for gradients will not be allocated.\n inputs_need_grad : bool\n Indicate whether the gradients for the input data should be computed. This is currently\n not used. It will be useful for implementing composition of modules.\n shared_group : DataParallelExecutorGroup\n Default is `None`. This is used in bucketing. When not `None`, it should be a executor\n group corresponding to a different bucket. In other words, it will correspond to a different\n symbol but with the same set of parameters (e.g. unrolled RNNs with different lengths).\n In this case, many memory will be shared.\n logger : Logger\n Default is `logging`.\n fixed_param_names: list of str\n Indicate parameters to be fixed during training. Parameters in this list will not allocate\n space for gradient, nor do gradient calculation.\n grad_req : str, list of str, dict of str to str\n Requirement for gradient accumulation. Can be 'write', 'add', or 'null'\n (default to 'write').\n Can be specified globally (str) or for each argument (list, dict).\n " def __init__(self, symbol, contexts, workload, data_shapes, label_shapes, param_names, for_training, inputs_need_grad, shared_group=None, logger=logging, fixed_param_names=None, grad_req='write', state_names=None): self.param_names = param_names self.arg_names = symbol.list_arguments() self.aux_names = symbol.list_auxiliary_states() self.symbol = symbol self.contexts = contexts self.workload = workload self.for_training = for_training self.inputs_need_grad = inputs_need_grad self.logger = logger self.fixed_param_names = fixed_param_names if (self.fixed_param_names is None): self.fixed_param_names = [] self.state_names = state_names if (self.state_names is None): self.state_names = [] if (not for_training): grad_req = 'null' data_names = [x.name for x in data_shapes[0]] if isinstance(grad_req, str): self.grad_req = {} for k in self.arg_names: if (k in self.param_names): self.grad_req[k] = ('null' if (k in self.fixed_param_names) else grad_req) elif (k in data_names): self.grad_req[k] = (grad_req if self.inputs_need_grad else 'null') else: self.grad_req[k] = 'null' elif isinstance(grad_req, (list, tuple)): assert (len(grad_req) == len(self.arg_names)) self.grad_req = dict(zip(self.arg_names, grad_req)) elif isinstance(grad_req, dict): self.grad_req = {} for k in self.arg_names: if (k in self.param_names): self.grad_req[k] = ('null' if (k in self.fixed_param_names) else 'write') elif (k in data_names): self.grad_req[k] = ('write' if self.inputs_need_grad else 'null') else: self.grad_req[k] = 'null' self.grad_req.update(grad_req) else: raise ValueError('grad_req must be one of str, list, tuple, or dict.') if (shared_group is not None): self.shared_data_arrays = shared_group.shared_data_arrays else: self.shared_data_arrays = [{} for _ in contexts] self.batch_size = len(data_shapes) self.slices = None self.execs = [] self._default_execs = None self.data_arrays = None self.label_arrays = None self.param_arrays = None self.state_arrays = None self.grad_arrays = None self.aux_arrays = None self.input_grad_arrays = None self.data_shapes = None self.label_shapes = None self.data_layouts = None self.label_layouts = None self.output_layouts = [DataDesc.get_batch_axis(self.symbol[name].attr('__layout__')) for name in self.symbol.list_outputs()] self.bind_exec(data_shapes, label_shapes, shared_group) def decide_slices(self, data_shapes): 'Decide the slices for each context according to the workload.\n\n Parameters\n ----------\n data_shapes : list\n list of (name, shape) specifying the shapes for the input data or label.\n ' assert (len(data_shapes) > 0) major_axis = [DataDesc.get_batch_axis(x.layout) for x in data_shapes] for ((name, shape), axis) in zip(data_shapes, major_axis): if (axis == (- 1)): continue batch_size = shape[axis] if (self.batch_size is not None): assert (batch_size == self.batch_size), (('all data must have the same batch size: ' + ('batch_size = %d, but ' % self.batch_size)) + ('%s has shape %s' % (name, shape))) else: self.batch_size = batch_size self.slices = _split_input_slice(self.batch_size, self.workload) return major_axis def _collect_arrays(self): 'Collect internal arrays from executors.' self.data_arrays = [[e.arg_dict[name] for (name, _) in self.data_shapes[0]] for e in self.execs] self.state_arrays = [[e.arg_dict[name] for e in self.execs] for name in self.state_names] if (self.label_shapes is not None): self.label_arrays = [[e.arg_dict[name] for (name, _) in self.label_shapes[0]] for e in self.execs] else: self.label_arrays = None self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs] for (i, name) in enumerate(self.arg_names) if (name in self.param_names)] if self.for_training: self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs] for (i, name) in enumerate(self.arg_names) if (name in self.param_names)] else: self.grad_arrays = None data_names = [x[0] for x in self.data_shapes] if self.inputs_need_grad: self.input_grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs] for (i, name) in enumerate(self.arg_names) if (name in data_names)] else: self.input_grad_arrays = None self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs] for i in range(len(self.aux_names))] def bind_exec(self, data_shapes, label_shapes, shared_group=None, reshape=False): 'Bind executors on their respective devices.\n\n Parameters\n ----------\n data_shapes : list\n label_shapes : list\n shared_group : DataParallelExecutorGroup\n reshape : bool\n ' assert (reshape or (not self.execs)) for i in range(len(self.contexts)): data_shapes_i = data_shapes[i] if (label_shapes is not None): label_shapes_i = label_shapes[i] else: label_shapes_i = [] if reshape: self.execs[i] = self._default_execs[i].reshape(allow_up_sizing=True, **dict((data_shapes_i + label_shapes_i))) else: self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i, shared_group)) self.data_shapes = data_shapes self.label_shapes = label_shapes self._collect_arrays() def reshape(self, data_shapes, label_shapes): 'Reshape executors.\n\n Parameters\n ----------\n data_shapes : list\n label_shapes : list\n ' if (self._default_execs is None): self._default_execs = [i for i in self.execs] for i in range(len(self.contexts)): self.execs[i] = self._default_execs[i].reshape(allow_up_sizing=True, **dict((data_shapes[i] + (label_shapes[i] if (label_shapes is not None) else [])))) self.data_shapes = data_shapes self.label_shapes = label_shapes self._collect_arrays() def set_params(self, arg_params, aux_params): 'Assign, i.e. copy parameters to all the executors.\n\n Parameters\n ----------\n arg_params : dict\n A dictionary of name to `NDArray` parameter mapping.\n aux_params : dict\n A dictionary of name to `NDArray` auxiliary variable mapping.\n ' for exec_ in self.execs: exec_.copy_params_from(arg_params, aux_params) def get_params(self, arg_params, aux_params): ' Copy data from each executor to `arg_params` and `aux_params`.\n\n Parameters\n ----------\n arg_params : list of NDArray\n target parameter arrays\n aux_params : list of NDArray\n target aux arrays\n\n Notes\n -----\n - This function will inplace update the NDArrays in arg_params and aux_params.\n ' for (name, block) in zip(self.param_names, self.param_arrays): weight = (sum((w.copyto(ctx.cpu()) for w in block)) / len(block)) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for (name, block) in zip(self.aux_names, self.aux_arrays): weight = (sum((w.copyto(ctx.cpu()) for w in block)) / len(block)) weight.astype(aux_params[name].dtype).copyto(aux_params[name]) def forward(self, data_batch, is_train=None): 'Split `data_batch` according to workload and run forward on each devices.\n\n Parameters\n ----------\n data_batch : DataBatch\n Or could be any object implementing similar interface.\n is_train : bool\n The hint for the backend, indicating whether we are during training phase.\n Default is `None`, then the value `self.for_training` will be used.\n Returns\n -------\n\n ' if LEONID_PROFILING_ENABLED: import time tic = time.time() _load_data(data_batch, self.data_arrays, self.data_layouts) if (is_train is None): is_train = self.for_training if LEONID_PROFILING_ENABLED: toc = time.time() print('DataParallelExecutorGroup::forward 1 {0}'.format((toc - tic))) tic = time.time() if (self.label_arrays is not None): assert ((not is_train) or data_batch.label) if data_batch.label: _load_label(data_batch, self.label_arrays, self.label_layouts) if LEONID_PROFILING_ENABLED: toc = time.time() print('DataParallelExecutorGroup::forward 2 {0}'.format((toc - tic))) tic = time.time() for exec_ in self.execs: exec_.forward(is_train=is_train) if LEONID_PROFILING_ENABLED: toc = time.time() print('DataParallelExecutorGroup::forward 3 {0}'.format((toc - tic))) def get_outputs(self, merge_multi_context=True): 'Get outputs of the previous forward computation.\n\n Parameters\n ----------\n merge_multi_context : bool\n Default is `True`. In the case when data-parallelism is used, the outputs\n will be collected from multiple devices. A `True` value indicate that we\n should merge the collected results so that they look like from a single\n executor.\n\n Returns\n -------\n If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it\n is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output\n elements are `NDArray`.\n ' outputs = [[exec_.outputs[i] for exec_ in self.execs] for i in range(len(self.execs[0].outputs))] if merge_multi_context: outputs = _merge_multi_context(outputs, self.output_layouts) return outputs def get_states(self, merge_multi_context=True): 'Get states from all devices\n\n Parameters\n ----------\n merge_multi_context : bool\n Default is `True`. In the case when data-parallelism is used, the states\n will be collected from multiple devices. A `True` value indicate that we\n should merge the collected results so that they look like from a single\n executor.\n\n Returns\n -------\n If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it\n is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output\n elements are `NDArray`.\n ' assert (not merge_multi_context), 'merge_multi_context=True is not supported for get_states yet.' return self.state_arrays def set_states(self, states=None, value=None): 'Set value for states. Only one of states & value can be specified.\n\n Parameters\n ----------\n states : list of list of NDArrays\n source states arrays formatted like [[state1_dev1, state1_dev2],\n [state2_dev1, state2_dev2]].\n value : number\n a single scalar value for all state arrays.\n ' if (states is not None): assert (value is None), 'Only one of states & value can be specified.' _load_general(states, self.state_arrays, ((0,) * len(states))) else: assert (value is not None), 'At least one of states & value must be specified.' assert (states is None), 'Only one of states & value can be specified.' for d_dst in self.state_arrays: for dst in d_dst: dst[:] = value def get_input_grads(self, merge_multi_context=True): 'Get the gradients with respect to the inputs of the module.\n\n Parameters\n ----------\n merge_multi_context : bool\n Default is `True`. In the case when data-parallelism is used, the outputs\n will be collected from multiple devices. A `True` value indicate that we\n should merge the collected results so that they look like from a single\n executor.\n\n Returns\n -------\n If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it\n is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output\n elements are `NDArray`.\n ' assert self.inputs_need_grad if merge_multi_context: return _merge_multi_context(self.input_grad_arrays, self.data_layouts) return self.input_grad_arrays def backward(self, out_grads=None): 'Run backward on all devices. A backward should be called after\n a call to the forward function. Backward cannot be called unless\n `self.for_training` is `True`.\n\n Parameters\n ----------\n out_grads : NDArray or list of NDArray, optional\n Gradient on the outputs to be propagated back.\n This parameter is only needed when bind is called\n on outputs that are not a loss function.\n ' assert self.for_training, 're-bind with for_training=True to run backward' if (out_grads is None): out_grads = [] for (i, exec_) in enumerate(self.execs): out_grads_slice = [] exec_.backward(out_grads=out_grads_slice) def update_metric(self, eval_metric, labels): 'Accumulate the performance according to `eval_metric` on all devices.\n\n Parameters\n ----------\n eval_metric : EvalMetric\n The metric used for evaluation.\n labels : list of NDArray\n Typically comes from `label` of a `DataBatch`.\n ' for (texec, labels) in zip(self.execs, labels): eval_metric.update(labels, texec.outputs) def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group): 'Internal utility function to bind the i-th executor.\n ' shared_exec = (None if (shared_group is None) else shared_group.execs[i]) context = self.contexts[i] shared_data_arrays = self.shared_data_arrays[i] input_shapes = dict(data_shapes) if (label_shapes is not None): input_shapes.update(dict(label_shapes)) (arg_shapes, _, aux_shapes) = self.symbol.infer_shape(**input_shapes) assert (arg_shapes is not None), 'shape inference failed' input_types = {x.name: x.dtype for x in data_shapes} if (label_shapes is not None): input_types.update({x.name: x.dtype for x in label_shapes}) (arg_types, _, aux_types) = self.symbol.infer_type(**input_types) assert (arg_types is not None), 'type inference failed' arg_arrays = [] grad_arrays = ({} if self.for_training else None) def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger): 'Internal helper to get a memory block or re-use by re-shaping' if (name in shared_data_arrays): arg_arr = shared_data_arrays[name] if (np.prod(arg_arr.shape) >= np.prod(arg_shape)): assert (arg_arr.dtype == arg_type) arg_arr = arg_arr.reshape(arg_shape) else: logger.warning(((((((('bucketing: data "%s" has a shape %s' % (name, arg_shape)) + ', which is larger than already allocated ') + ('shape %s' % (arg_arr.shape,))) + '. Need to re-allocate. Consider putting ') + 'default_bucket_key to') + ' be the bucket taking the largest input for better ') + 'memory sharing.')) arg_arr = nd.zeros(arg_shape, context, dtype=arg_type) shared_data_arrays[name] = arg_arr else: arg_arr = nd.zeros(arg_shape, context, dtype=arg_type) shared_data_arrays[name] = arg_arr return arg_arr for j in range(len(self.arg_names)): name = self.arg_names[j] if (name in self.param_names): if (shared_exec is None): arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) if (self.grad_req[name] != 'null'): grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) grad_arrays[name] = grad_arr else: arg_arr = shared_exec.arg_dict[name] assert (arg_arr.shape == arg_shapes[j]) assert (arg_arr.dtype == arg_types[j]) if (self.grad_req[name] != 'null'): grad_arrays[name] = shared_exec.grad_dict[name] else: arg_arr = _get_or_reshape(name, shared_data_arrays, arg_shapes[j], arg_types[j], context, self.logger) if (self.grad_req[name] != 'null'): grad_arrays[name] = _get_or_reshape(('grad of ' + name), shared_data_arrays, arg_shapes[j], arg_types[j], context, self.logger) arg_arrays.append(arg_arr) if (shared_exec is None): aux_arrays = [nd.zeros(s, context, dtype=t) for (s, t) in zip(aux_shapes, aux_types)] else: for (j, arr) in enumerate(shared_exec.aux_arrays): assert (aux_shapes[j] == arr.shape) assert (aux_types[j] == arr.dtype) aux_arrays = shared_exec.aux_arrays[:] executor = self.symbol.bind(ctx=context, args=arg_arrays, args_grad=grad_arrays, aux_states=aux_arrays, grad_req=self.grad_req, shared_exec=shared_exec) return executor def _sliced_shape(self, shapes, i, major_axis): 'Get the sliced shapes for the i-th executor.\n\n Parameters\n ----------\n shapes : list of (str, tuple)\n The original (name, shape) pairs.\n i : int\n Which executor we are dealing with.\n ' sliced_shapes = [] for (desc, axis) in zip(shapes, major_axis): shape = list(desc.shape) if (axis >= 0): shape[axis] = (self.slices[i].stop - self.slices[i].start) sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout)) return sliced_shapes def install_monitor(self, mon): 'Install monitor on all executors' for exe in self.execs: mon.install(exe)
class Speedometer(object): def __init__(self, batch_size, frequent=50): self.batch_size = batch_size self.frequent = frequent self.init = False self.tic = 0 self.last_count = 0 def __call__(self, param): 'Callback to Show speed.' count = param.nbatch if (self.last_count > count): self.init = False self.last_count = count if self.init: if ((count % self.frequent) == 0): speed = ((self.frequent * self.batch_size) / (time.time() - self.tic)) s = '' if (param.eval_metric is not None): (name, value) = param.eval_metric.get() s = ('Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec\tTrain-' % (param.epoch, count, speed)) for (n, v) in zip(name, value): s += ('%s=%f,\t' % (n, v)) else: s = ('Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec' % (param.epoch, count, speed)) logging.info(s) print(s) self.tic = time.time() else: self.init = True self.tic = time.time()
def do_checkpoint(prefix, means, stds): def _callback(iter_no, sym, arg, aux): arg['bbox_pred_weight_test'] = (arg['bbox_pred_weight'].T * mx.nd.array(stds)).T arg['bbox_pred_bias_test'] = ((arg['bbox_pred_bias'] * mx.nd.array(stds)) + mx.nd.array(means)) mx.model.save_checkpoint(prefix, (iter_no + 1), sym, arg, aux) arg.pop('bbox_pred_weight_test') arg.pop('bbox_pred_bias_test') return _callback
def test_rcnn(cfg, dataset, image_set, root_path, dataset_path, ctx, prefix, epoch, vis, ignore_cache, shuffle, has_rpn, proposal, thresh, logger=None, output_path=None, nms_dets=None, is_docker=False): if (not logger): assert False, 'require a logger' datasets = dataset.split(';') dataset_paths = dataset_path.split(';') imagesets = image_set.split(';') output_paths = output_path.split(';') categ_index_offs = 20 for (dataset, dataset_path, image_set, output_path) in zip(datasets, dataset_paths, imagesets, output_paths): if (len(image_set.strip()) <= 0): continue if ('classes_list_fname' not in cfg.dataset): classes_list_fname = '' else: classes_list_fname = cfg.dataset.classes_list_fname if has_rpn: sym_instance = eval(((cfg.symbol + '.') + cfg.symbol))() sym = sym_instance.get_symbol(cfg, is_train=False) imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path, classes_list_fname=classes_list_fname, categ_index_offs=categ_index_offs) roidb = imdb.gt_roidb() else: sym_instance = eval(((cfg.symbol + '.') + cfg.symbol))() sym = sym_instance.get_symbol_rcnn(cfg, is_train=False) imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path) gt_roidb = imdb.gt_roidb() roidb = eval((('imdb.' + proposal) + '_roidb'))(gt_roidb) categ_index_offs += imdb.num_classes test_data = TestLoader(roidb, cfg, batch_size=len(ctx), shuffle=shuffle, has_rpn=has_rpn) if (not is_docker): (arg_params, aux_params) = load_param(prefix, epoch, process=True) data_shape_dict = dict(test_data.provide_data_single) sym_instance.infer_shape(data_shape_dict) sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False) data_names = [k[0] for k in test_data.provide_data_single] label_names = None max_data_shape = [[('data', (1, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]] if (not has_rpn): max_data_shape.append(('rois', ((cfg.TEST.PROPOSAL_POST_NMS_TOP_N + 30), 5))) predictor = Predictor(sym, data_names, label_names, context=ctx, max_data_shapes=max_data_shape, provide_data=test_data.provide_data, provide_label=test_data.provide_label, arg_params=arg_params, aux_params=aux_params) imdb.num_classes = cfg.dataset.NUM_CLASSES else: predictor = None pred_eval(predictor, test_data, imdb, cfg, vis=vis, ignore_cache=ignore_cache, thresh=thresh, logger=logger, nms_dets=nms_dets)
def train_rcnn(cfg, dataset, image_set, root_path, dataset_path, frequent, kvstore, flip, shuffle, resume, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, train_shared, lr, lr_step, proposal, logger=None, output_path=None): mx.random.seed(np.random.randint(10000)) np.random.seed(np.random.randint(10000)) if (not logger): logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) sym_instance = eval(((cfg.symbol + '.') + cfg.symbol))() sym = sym_instance.get_symbol_rcnn(cfg, is_train=True) batch_size = len(ctx) input_batch_size = (cfg.TRAIN.BATCH_IMAGES * batch_size) pprint.pprint(cfg) logger.info('training rcnn cfg:{}\n'.format(pprint.pformat(cfg))) image_sets = [iset for iset in image_set.split('+')] roidbs = [load_proposal_roidb(dataset, image_set, root_path, dataset_path, proposal=proposal, append_gt=True, flip=flip, result_path=output_path) for image_set in image_sets] roidb = merge_roidb(roidbs) roidb = filter_roidb(roidb, cfg) (means, stds) = add_bbox_regression_targets(roidb, cfg) train_data = ROIIter(roidb, cfg, batch_size=input_batch_size, shuffle=shuffle, ctx=ctx, aspect_grouping=cfg.TRAIN.ASPECT_GROUPING) max_data_shape = [('data', (cfg.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))] data_shape_dict = dict((train_data.provide_data_single + train_data.provide_label_single)) sym_instance.infer_shape(data_shape_dict) if resume: print('continue training from ', begin_epoch) (arg_params, aux_params) = load_param(prefix, begin_epoch, convert=True) else: (arg_params, aux_params) = load_param(pretrained, epoch, convert=True) sym_instance.init_weight_rcnn(cfg, arg_params, aux_params) sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict) data_names = [k[0] for k in train_data.provide_data_single] label_names = [k[0] for k in train_data.provide_label_single] if train_shared: fixed_param_prefix = cfg.network.FIXED_PARAMS_SHARED else: fixed_param_prefix = cfg.network.FIXED_PARAMS mod = MutableModule(sym, data_names=data_names, label_names=label_names, logger=logger, context=ctx, max_data_shapes=[max_data_shape for _ in range(batch_size)], fixed_param_prefix=fixed_param_prefix) if cfg.TRAIN.RESUME: mod._preload_opt_states = ('%s-%04d.states' % (prefix, begin_epoch)) eval_metric = metric.RCNNAccMetric(cfg) cls_metric = metric.RCNNLogLossMetric(cfg) bbox_metric = metric.RCNNL1LossMetric(cfg) eval_metrics = mx.metric.CompositeEvalMetric() for child_metric in [eval_metric, cls_metric, bbox_metric]: eval_metrics.add(child_metric) batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=frequent) epoch_end_callback = [mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True), callback.do_checkpoint(prefix, means, stds)] base_lr = lr lr_factor = cfg.TRAIN.lr_factor lr_epoch = [float(epoch) for epoch in lr_step.split(',')] lr_epoch_diff = [(epoch - begin_epoch) for epoch in lr_epoch if (epoch > begin_epoch)] lr = (base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))) lr_iters = [int(((epoch * len(roidb)) / batch_size)) for epoch in lr_epoch_diff] print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters) lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, cfg.TRAIN.warmup, cfg.TRAIN.warmup_lr, cfg.TRAIN.warmup_step) optimizer_params = {'momentum': cfg.TRAIN.momentum, 'wd': cfg.TRAIN.wd, 'learning_rate': lr, 'lr_scheduler': lr_scheduler, 'rescale_grad': 1.0, 'clip_gradient': None} if (not isinstance(train_data, PrefetchingIter)): train_data = PrefetchingIter(train_data) mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, optimizer='sgd', optimizer_params=optimizer_params, arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch)
class BoxAnnotatorOHEMOperator(mx.operator.CustomOp): def __init__(self, num_classes, num_reg_classes, rm_last, roi_per_img): super(BoxAnnotatorOHEMOperator, self).__init__() self._num_classes = num_classes self._num_reg_classes = num_reg_classes self._roi_per_img = roi_per_img self._rm_last = rm_last def forward(self, is_train, req, in_data, out_data, aux): cls_score = in_data[0] bbox_pred = in_data[1] labels = in_data[2].asnumpy() bbox_targets = in_data[3] bbox_weights = in_data[4] if config.network.SOFTMAX_ENABLED: per_roi_loss_cls = (mx.nd.SoftmaxActivation(cls_score) + 1e-14) else: per_roi_loss_cls = (cls_score + 1e-14) per_roi_loss_cls = per_roi_loss_cls.asnumpy() per_roi_loss_cls = per_roi_loss_cls[(np.arange(per_roi_loss_cls.shape[0], dtype='int'), labels.astype('int'))] per_roi_loss_cls = ((- 1) * np.log(per_roi_loss_cls)) per_roi_loss_cls = np.reshape(per_roi_loss_cls, newshape=((- 1),)) per_roi_loss_bbox = (bbox_weights * mx.nd.smooth_l1((bbox_pred - bbox_targets), scalar=1.0)) per_roi_loss_bbox = mx.nd.sum(per_roi_loss_bbox, axis=1).asnumpy() total_loss = (per_roi_loss_cls + per_roi_loss_bbox) top_k_per_roi_loss = np.argsort(total_loss) labels_ohem = labels if (self._rm_last == 1): labels_ohem[np.where((labels_ohem == (self._num_classes - 1)))] = (- 1) labels_ohem[top_k_per_roi_loss[::(- 1)][self._roi_per_img:]] = (- 1) bbox_weights_ohem = bbox_weights.asnumpy() bbox_weights_ohem[top_k_per_roi_loss[::(- 1)][self._roi_per_img:]] = 0 labels_ohem = mx.nd.array(labels_ohem) bbox_weights_ohem = mx.nd.array(bbox_weights_ohem) for (ind, val) in enumerate([labels_ohem, bbox_weights_ohem]): self.assign(out_data[ind], req[ind], val) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): for i in range(len(in_grad)): self.assign(in_grad[i], req[i], 0)
@mx.operator.register('BoxAnnotatorOHEM') class BoxAnnotatorOHEMProp(mx.operator.CustomOpProp): def __init__(self, num_classes, num_reg_classes, rm_last, roi_per_img): super(BoxAnnotatorOHEMProp, self).__init__(need_top_grad=False) self._num_classes = int(num_classes) self._num_reg_classes = int(num_reg_classes) self._roi_per_img = int(roi_per_img) self._rm_last = rm_last def list_arguments(self): return ['cls_score', 'bbox_pred', 'labels', 'bbox_targets', 'bbox_weights'] def list_outputs(self): return ['labels_ohem', 'bbox_weights_ohem'] def infer_shape(self, in_shape): labels_shape = in_shape[2] bbox_weights_shape = in_shape[4] return (in_shape, [labels_shape, bbox_weights_shape]) def create_operator(self, ctx, shapes, dtypes): return BoxAnnotatorOHEMOperator(self._num_classes, self._num_reg_classes, self._rm_last, self._roi_per_img) def declare_backward_dependency(self, out_grad, in_data, out_data): return []
class resnet_v1_101_fpn_dcn_rcnn(Symbol): def __init__(self): '\n Use __init__ to define parameter network needs\n ' self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred'] self.shared_param_dict = {} for name in self.shared_param_list: self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight')) self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias')) def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05): conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True) bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps) scale_conv1 = bn_conv1 conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu') pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max') res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch1 = bn2a_branch1 res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2a = bn2a_branch2a res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu') res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2b = bn2a_branch2b res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu') res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2c = bn2a_branch2c res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a') res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu') res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2a = bn2b_branch2a res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu') res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2b = bn2b_branch2b res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu') res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2c = bn2b_branch2c res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b') res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu') res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2a = bn2c_branch2a res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu') res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2b = bn2c_branch2b res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu') res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2c = bn2c_branch2c res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c') res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu') res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch1 = bn3a_branch1 res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2a = bn3a_branch2a res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu') res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2b = bn3a_branch2b res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu') res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2c = bn3a_branch2c res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a') res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu') res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2a = bn3b1_branch2a res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu') res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2b = bn3b1_branch2b res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu') res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2c = bn3b1_branch2c res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1') res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu') res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2a = bn3b2_branch2a res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu') res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2b = bn3b2_branch2b res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu') res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2c = bn3b2_branch2c res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2') res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu') res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2a = bn3b3_branch2a res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu') if with_dpyramid: res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2b = bn3b3_branch2b res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu') res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2c = bn3b3_branch2c res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3') res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu') res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch1 = bn4a_branch1 res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2a = bn4a_branch2a res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu') res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2b = bn4a_branch2b res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu') res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2c = bn4a_branch2c res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a') res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu') res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2a = bn4b1_branch2a res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu') res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2b = bn4b1_branch2b res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu') res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2c = bn4b1_branch2c res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1') res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu') res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2a = bn4b2_branch2a res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu') res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2b = bn4b2_branch2b res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu') res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2c = bn4b2_branch2c res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2') res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu') res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2a = bn4b3_branch2a res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu') res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2b = bn4b3_branch2b res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu') res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2c = bn4b3_branch2c res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3') res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu') res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2a = bn4b4_branch2a res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu') res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2b = bn4b4_branch2b res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu') res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2c = bn4b4_branch2c res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4') res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu') res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2a = bn4b5_branch2a res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu') res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2b = bn4b5_branch2b res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu') res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2c = bn4b5_branch2c res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5') res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu') res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2a = bn4b6_branch2a res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu') res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2b = bn4b6_branch2b res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu') res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2c = bn4b6_branch2c res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6') res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu') res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2a = bn4b7_branch2a res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu') res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2b = bn4b7_branch2b res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu') res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2c = bn4b7_branch2c res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7') res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu') res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2a = bn4b8_branch2a res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu') res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2b = bn4b8_branch2b res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu') res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2c = bn4b8_branch2c res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8') res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu') res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2a = bn4b9_branch2a res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu') res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2b = bn4b9_branch2b res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu') res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2c = bn4b9_branch2c res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9') res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu') res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2a = bn4b10_branch2a res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu') res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2b = bn4b10_branch2b res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu') res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2c = bn4b10_branch2c res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10') res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu') res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2a = bn4b11_branch2a res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu') res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2b = bn4b11_branch2b res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu') res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2c = bn4b11_branch2c res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11') res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu') res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2a = bn4b12_branch2a res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu') res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2b = bn4b12_branch2b res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu') res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2c = bn4b12_branch2c res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12') res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu') res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2a = bn4b13_branch2a res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu') res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2b = bn4b13_branch2b res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu') res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2c = bn4b13_branch2c res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13') res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu') res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2a = bn4b14_branch2a res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu') res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2b = bn4b14_branch2b res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu') res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2c = bn4b14_branch2c res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14') res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu') res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2a = bn4b15_branch2a res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu') res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2b = bn4b15_branch2b res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu') res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2c = bn4b15_branch2c res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15') res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu') res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2a = bn4b16_branch2a res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu') res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2b = bn4b16_branch2b res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu') res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2c = bn4b16_branch2c res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16') res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu') res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2a = bn4b17_branch2a res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu') res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2b = bn4b17_branch2b res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu') res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2c = bn4b17_branch2c res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17') res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu') res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2a = bn4b18_branch2a res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu') res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2b = bn4b18_branch2b res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu') res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2c = bn4b18_branch2c res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18') res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu') res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2a = bn4b19_branch2a res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu') res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2b = bn4b19_branch2b res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu') res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2c = bn4b19_branch2c res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19') res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu') res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2a = bn4b20_branch2a res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu') res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2b = bn4b20_branch2b res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu') res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2c = bn4b20_branch2c res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20') res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu') res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2a = bn4b21_branch2a res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu') res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2b = bn4b21_branch2b res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu') res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2c = bn4b21_branch2c res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21') res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu') res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2a = bn4b22_branch2a res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu') if with_dpyramid: res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2b = bn4b22_branch2b res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu') res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2c = bn4b22_branch2c res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22') res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu') if with_dilated: res5_stride = (1, 1) res5_dilate = (2, 2) else: res5_stride = (2, 2) res5_dilate = (1, 1) res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2a = bn5a_branch2a res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu') if with_dconv: res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True) else: res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2b = bn5a_branch2b res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu') res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2c = bn5a_branch2c res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch1 = bn5a_branch1 res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a') res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu') res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2a = bn5b_branch2a res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu') if with_dconv: res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2b = bn5b_branch2b res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu') res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2c = bn5b_branch2c res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b') res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu') res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2a = bn5c_branch2a res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu') if with_dconv: res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2b = bn5c_branch2b res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu') res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2c = bn5c_branch2c res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c') res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu') return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu) def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256): fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1') fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1') fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1') fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1') fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample') fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum') fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample') fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum') fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample') fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum') fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6') fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5') fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4') fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3') fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2') return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) def get_rpn_subnet(self, data, num_anchors, suffix): rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias']) rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix)) rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias']) rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias']) rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix)) rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix)) rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix)) rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix)) rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix)) return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred) def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7): offset = mx.contrib.sym.DeformablePSROIPooling(name=(('offset_' + name) + '_t'), data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale) offset = mx.sym.FullyConnected(name=('offset_' + name), data=offset, num_hidden=((part_size * part_size) * 2), lr_mult=0.01, weight=self.shared_param_dict[(('offset_' + param_name) + '_weight')], bias=self.shared_param_dict[(('offset_' + param_name) + '_bias')]) offset_reshape = mx.sym.Reshape(data=offset, shape=((- 1), 2, part_size, part_size), name=('offset_reshape_' + name)) output = mx.contrib.sym.DeformablePSROIPooling(name=('deformable_roi_pool_' + name), data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1) return output def get_symbol(self, cfg, is_train=True): num_classes = cfg.dataset.NUM_CLASSES num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes) data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') (res2, res3, res4, res5) = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True) (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5) (rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2') (rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3') (rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4') (rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5') (rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6') rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2} rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2} arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())) if is_train: rpn_label = mx.sym.Variable(name='label') rpn_bbox_target = mx.sym.Variable(name='bbox_target') rpn_bbox_weight = mx.sym.Variable(name='bbox_weight') gt_boxes = mx.sym.Variable(name='gt_boxes') rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2) rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2) rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob') rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE)) aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE} rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items()))) gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape') (rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION) else: aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE} rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items()))) offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01) offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01) offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01) offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01) offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01) offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01) offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01) offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01) roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True) fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024) fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu') fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024) fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu') cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes) bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4)) if is_train: if cfg.TRAIN.ENABLE_OHEM: (labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight) cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1)) bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)) rcnn_label = labels_ohem else: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid') bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS)) rcnn_label = label rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape') cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape') bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape') group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)]) else: cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape') bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape') group = mx.sym.Group([rois, cls_prob, bbox_pred]) self.sym = group return group def init_weight_rcnn(self, cfg, arg_params, aux_params): arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight']) arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias']) arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight']) arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias']) arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight']) arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias']) arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight']) arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias']) def init_deformable_convnet(self, cfg, arg_params, aux_params): arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight']) arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias']) arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight']) arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias']) arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight']) arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias']) arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight']) arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias']) arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight']) arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias']) def init_weight_fpn(self, cfg, arg_params, aux_params): arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight']) arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias']) arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight']) arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias']) arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight']) arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias']) arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight']) arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias']) arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight']) arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias']) arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight']) arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias']) arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight']) arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias']) arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight']) arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias']) arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight']) arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias']) def init_weight(self, cfg, arg_params, aux_params): (arg_params2, aux_params2) = ({}, {}) for name in self.shared_param_list: if ('offset' in name): arg_params2[(name + '_weight')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_weight')]) else: arg_params2[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')]) arg_params2[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')]) self.init_deformable_convnet(cfg, arg_params2, aux_params2) self.init_weight_rcnn(cfg, arg_params2, aux_params2) self.init_weight_fpn(cfg, arg_params2, aux_params2) for k in arg_params2: if ((k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape)): arg_params[k] = arg_params2[k] for k in aux_params2: if (k not in aux_params): aux_params[k] = aux_params2[k]
class resnet_v1_101_fpn_dcn_rcnn_oneshot_v3(Symbol): def __init__(self): '\n Use __init__ to define parameter network needs\n ' self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred'] self.shared_param_dict = {} for name in self.shared_param_list: self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight')) self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias')) self.constants_dict = {} def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05): conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True) bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps) scale_conv1 = bn_conv1 conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu') pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max') res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch1 = bn2a_branch1 res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2a = bn2a_branch2a res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu') res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2b = bn2a_branch2b res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu') res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2c = bn2a_branch2c res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a') res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu') res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2a = bn2b_branch2a res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu') res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2b = bn2b_branch2b res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu') res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2c = bn2b_branch2c res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b') res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu') res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2a = bn2c_branch2a res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu') res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2b = bn2c_branch2b res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu') res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2c = bn2c_branch2c res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c') res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu') res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch1 = bn3a_branch1 res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2a = bn3a_branch2a res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu') res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2b = bn3a_branch2b res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu') res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2c = bn3a_branch2c res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a') res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu') res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2a = bn3b1_branch2a res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu') res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2b = bn3b1_branch2b res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu') res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2c = bn3b1_branch2c res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1') res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu') res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2a = bn3b2_branch2a res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu') res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2b = bn3b2_branch2b res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu') res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2c = bn3b2_branch2c res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2') res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu') res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2a = bn3b3_branch2a res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu') if with_dpyramid: res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2b = bn3b3_branch2b res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu') res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2c = bn3b3_branch2c res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3') res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu') res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch1 = bn4a_branch1 res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2a = bn4a_branch2a res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu') res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2b = bn4a_branch2b res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu') res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2c = bn4a_branch2c res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a') res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu') res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2a = bn4b1_branch2a res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu') res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2b = bn4b1_branch2b res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu') res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2c = bn4b1_branch2c res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1') res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu') res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2a = bn4b2_branch2a res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu') res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2b = bn4b2_branch2b res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu') res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2c = bn4b2_branch2c res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2') res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu') res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2a = bn4b3_branch2a res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu') res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2b = bn4b3_branch2b res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu') res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2c = bn4b3_branch2c res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3') res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu') res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2a = bn4b4_branch2a res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu') res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2b = bn4b4_branch2b res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu') res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2c = bn4b4_branch2c res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4') res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu') res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2a = bn4b5_branch2a res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu') res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2b = bn4b5_branch2b res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu') res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2c = bn4b5_branch2c res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5') res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu') res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2a = bn4b6_branch2a res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu') res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2b = bn4b6_branch2b res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu') res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2c = bn4b6_branch2c res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6') res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu') res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2a = bn4b7_branch2a res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu') res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2b = bn4b7_branch2b res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu') res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2c = bn4b7_branch2c res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7') res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu') res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2a = bn4b8_branch2a res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu') res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2b = bn4b8_branch2b res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu') res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2c = bn4b8_branch2c res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8') res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu') res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2a = bn4b9_branch2a res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu') res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2b = bn4b9_branch2b res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu') res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2c = bn4b9_branch2c res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9') res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu') res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2a = bn4b10_branch2a res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu') res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2b = bn4b10_branch2b res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu') res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2c = bn4b10_branch2c res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10') res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu') res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2a = bn4b11_branch2a res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu') res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2b = bn4b11_branch2b res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu') res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2c = bn4b11_branch2c res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11') res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu') res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2a = bn4b12_branch2a res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu') res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2b = bn4b12_branch2b res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu') res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2c = bn4b12_branch2c res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12') res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu') res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2a = bn4b13_branch2a res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu') res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2b = bn4b13_branch2b res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu') res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2c = bn4b13_branch2c res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13') res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu') res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2a = bn4b14_branch2a res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu') res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2b = bn4b14_branch2b res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu') res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2c = bn4b14_branch2c res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14') res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu') res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2a = bn4b15_branch2a res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu') res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2b = bn4b15_branch2b res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu') res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2c = bn4b15_branch2c res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15') res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu') res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2a = bn4b16_branch2a res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu') res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2b = bn4b16_branch2b res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu') res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2c = bn4b16_branch2c res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16') res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu') res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2a = bn4b17_branch2a res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu') res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2b = bn4b17_branch2b res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu') res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2c = bn4b17_branch2c res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17') res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu') res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2a = bn4b18_branch2a res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu') res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2b = bn4b18_branch2b res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu') res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2c = bn4b18_branch2c res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18') res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu') res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2a = bn4b19_branch2a res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu') res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2b = bn4b19_branch2b res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu') res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2c = bn4b19_branch2c res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19') res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu') res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2a = bn4b20_branch2a res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu') res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2b = bn4b20_branch2b res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu') res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2c = bn4b20_branch2c res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20') res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu') res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2a = bn4b21_branch2a res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu') res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2b = bn4b21_branch2b res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu') res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2c = bn4b21_branch2c res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21') res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu') res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2a = bn4b22_branch2a res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu') if with_dpyramid: res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2b = bn4b22_branch2b res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu') res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2c = bn4b22_branch2c res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22') res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu') if with_dilated: res5_stride = (1, 1) res5_dilate = (2, 2) else: res5_stride = (2, 2) res5_dilate = (1, 1) res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2a = bn5a_branch2a res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu') if with_dconv: res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True) else: res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2b = bn5a_branch2b res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu') res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2c = bn5a_branch2c res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch1 = bn5a_branch1 res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a') res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu') res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2a = bn5b_branch2a res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu') if with_dconv: res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2b = bn5b_branch2b res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu') res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2c = bn5b_branch2c res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b') res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu') res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2a = bn5c_branch2a res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu') if with_dconv: res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2b = bn5c_branch2b res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu') res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2c = bn5c_branch2c res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c') res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu') return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu) def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256): fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1') fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1') fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1') fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1') fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample') fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum') fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample') fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum') fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample') fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum') fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6') fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5') fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4') fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3') fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2') return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) def get_rpn_subnet(self, data, num_anchors, suffix): rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias']) rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix)) rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias']) rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias']) rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix)) rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix)) rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix)) rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix)) rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix)) return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred) def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7): offset = mx.contrib.sym.DeformablePSROIPooling(name=(('offset_' + name) + '_t'), data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale) offset = mx.sym.FullyConnected(name=('offset_' + name), data=offset, num_hidden=((part_size * part_size) * 2), lr_mult=0.01, weight=self.shared_param_dict[(('offset_' + param_name) + '_weight')], bias=self.shared_param_dict[(('offset_' + param_name) + '_bias')]) offset_reshape = mx.sym.Reshape(data=offset, shape=((- 1), 2, part_size, part_size), name=('offset_reshape_' + name)) output = mx.contrib.sym.DeformablePSROIPooling(name=('deformable_roi_pool_' + name), data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1) return output def get_constant_symbol(self, const_val): if (const_val in self.constants_dict): return self.constants_dict[const_val] name = 'const_eq_{0}'.format(const_val) c = mx.sym.Variable(name, shape=(1,), init=MyConstant(value=[const_val])) c = mx.sym.BlockGrad(c) self.constants_dict[const_val] = c return c def cos_sim_2_dist_generic(self, cos_sim, x=None, y=None, x_is_norm=True, y_is_norm=True): if x_is_norm: x_norm = self.get_constant_symbol(1) else: assert (x is not None), 'if x is not L2 normalized then x must be provided' x_norm = mx.sym.sum_axis(mx.sym.square(x), axis=0, keepdims=True) x_norm = mx.sym.transpose(x_norm, axes=(1, 0)) if y_is_norm: y_norm = self.get_constant_symbol(1) else: assert (y is not None), 'if y is not L2 normalized then y must be provided' y_norm = mx.sym.sum_axis(mx.sym.square(y), axis=0, keepdims=True) dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(x_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), y_norm) return dist def cos_sim_2_dist(self, cos_sim, cfg=None, embd=None, reps=None): if cfg.network.EMBED_L2_NORM: embd_norm = self.get_constant_symbol(1) else: assert (embd is not None), 'if embedding is not L2 normalized then embd must be provided' embd_norm = mx.sym.sum_axis(mx.sym.square(embd), axis=1, keepdims=True) embd_norm = mx.sym.reshape(embd_norm, shape=(0, 1, 1)) if cfg.network.REP_L2_NORM: reps_norm = self.get_constant_symbol(1) else: assert (reps is not None), 'if representatives are not L2 normalized then reps must be provided' reps_norm = mx.sym.sum_axis(mx.sym.square(reps), axis=0, keepdims=True) dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(embd_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), reps_norm) return dist def get_symbol(self, cfg, is_train=True): num_classes = cfg.dataset.NUM_CLASSES num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes) data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') (res2, res3, res4, res5) = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True) (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5) (rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2') (rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3') (rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4') (rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5') (rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6') rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2} rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2} arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())) if is_train: if (not cfg.network.base_net_lock): rpn_label = mx.sym.Variable(name='label') rpn_bbox_target = mx.sym.Variable(name='bbox_target') rpn_bbox_weight = mx.sym.Variable(name='bbox_weight') gt_boxes = mx.sym.Variable(name='gt_boxes') if (not cfg.network.base_net_lock): rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2) rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2) rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob') rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE)) aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE} rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items()))) gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape') (rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION) else: aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE} rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items()))) offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01) offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01) offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01) offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01) offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01) offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01) offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01) offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01) roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True) fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024) fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu') fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024) fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu') if (is_train and cfg.network.base_net_lock): fc_new_2_relu = mx.sym.BlockGrad(fc_new_2_relu) rois = mx.sym.BlockGrad(rois) label = mx.sym.BlockGrad(label) bbox_target = mx.sym.BlockGrad(bbox_target) bbox_weight = mx.sym.BlockGrad(bbox_weight) lr_mult = cfg.TRAIN.REPS_LR_MULT if cfg.network.SEPARABLE_REPS: base = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_base', num_hidden=(cfg.network.EMBEDDING_DIM * (num_classes - 1)), no_bias=True, lr_mult=lr_mult) offset = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_offset', num_hidden=(cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS), no_bias=True, lr_mult=lr_mult) base = mx.sym.reshape(base, shape=(cfg.network.EMBEDDING_DIM, 1, (num_classes - 1))) offset = mx.sym.reshape(offset, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1)) representatives = mx.sym.broadcast_add(base, offset, name='fc_representatives') else: representatives = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives', num_hidden=((cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS) * (num_classes - 1)), no_bias=True, lr_mult=lr_mult) representatives = mx.sym.reshape(representatives, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, (num_classes - 1))) if cfg.network.REP_L2_NORM: representatives = mx.sym.transpose(mx.sym.L2Normalization(mx.sym.transpose(representatives, axes=(1, 0, 2)), mode='channel'), axes=(1, 0, 2)) extra_outputs = [mx.sym.BlockGrad(representatives)] batch_embed = mx.symbol.FullyConnected(name='batch_embed', data=fc_new_2_relu, num_hidden=cfg.network.EMBEDDING_DIM) if cfg.network.EMBED_L2_NORM: batch_embed = mx.sym.L2Normalization(data=batch_embed, name='batch_embed_nrm', mode='instance') cos_sim = mx.sym.dot(batch_embed, representatives, transpose_b=False) all_cls_rep_dist = self.cos_sim_2_dist(cos_sim, cfg, embd=batch_embed, reps=representatives) if (is_train and cfg.network.EMBED_LOSS_ENABLED): if cfg.network.SMOOTH_MIN: sum_exp_dist = mx.sym.sum_axis(mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(cfg.network.SMOOTH_CONST))), axis=1, keepdims=True) all_cls_min_dist = mx.sym.broadcast_div(mx.sym.log(sum_exp_dist), self.get_constant_symbol(cfg.network.SMOOTH_CONST)) else: all_cls_min_dist = mx.sym.min_axis(all_cls_rep_dist, axis=1, keepdims=True) all_cls_min_dist = mx.sym.reshape(all_cls_min_dist, shape=(0, (num_classes - 1))) mod_true_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1, off_value=0), axis=1, begin=1, end=None) mod_false_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1000, off_value=0), axis=1, begin=1, end=None) min_dist_true = mx.sym.sum_axis(mx.sym.broadcast_mul(all_cls_min_dist, mod_true_class), axis=1) min_dist_false = mx.sym.min_axis(mx.sym.broadcast_add(all_cls_min_dist, mod_false_class), axis=1) embed_loss_val = mx.sym.broadcast_sub(min_dist_true, min_dist_false) embed_loss_val = mx.sym.broadcast_add(embed_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN)) embed_loss_val = mx.sym.relu(embed_loss_val) embed_loss_val = mx.sym.reshape(embed_loss_val, shape=(0, 1)) if (is_train and cfg.network.REPS_CLS_LOSS): mask_block_ones = mx.sym.ones(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS)) mask_block_zeros = mx.sym.zeros(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS)) mask = None for iC1 in range((num_classes - 1)): mask_row = None for iC2 in range((num_classes - 1)): if (iC1 == iC2): cblock = mask_block_ones else: cblock = mask_block_zeros if (mask_row is None): mask_row = cblock else: mask_row = mx.sym.concat(mask_row, cblock, dim=1) if (mask is None): mask = mask_row else: mask = mx.sym.concat(mask, mask_row, dim=0) mask_NC = mx.sym.broadcast_mul(self.get_constant_symbol(1000), mask) mask_C = mx.sym.broadcast_sub(self.get_constant_symbol(1000), mask_NC) mask_C = mx.sym.BlockGrad(mask_C) mask_NC = mx.sym.BlockGrad(mask_NC) R = mx.sym.reshape(mx.sym.transpose(representatives, axes=(0, 2, 1)), shape=(0, (- 1))) R2R_cos_sim = mx.sym.dot(R, R, transpose_a=True) R2R = self.cos_sim_2_dist_generic(R2R_cos_sim, x=R, y=R, x_is_norm=cfg.network.REP_L2_NORM, y_is_norm=cfg.network.REP_L2_NORM) C2C = mx.sym.broadcast_add(R2R, mask_C) C2NC = mx.sym.broadcast_add(R2R, mask_NC) min_dist_C = mx.sym.topk(C2C, axis=1, k=2, ret_typ='value', is_ascend=True) min_dist_C = mx.sym.slice_axis(min_dist_C, axis=1, begin=1, end=2) min_dist_NC = mx.sym.min_axis(C2NC, axis=1, keepdims=True) reps_cls_loss_val = mx.sym.broadcast_sub(min_dist_C, min_dist_NC) reps_cls_loss_val = mx.sym.broadcast_add(reps_cls_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN)) reps_cls_loss_val = mx.sym.relu(reps_cls_loss_val) probs = mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(((- 0.5) / float((cfg.network.SIGMA ** 2)))))) comb_cls_scores = mx.sym.max_axis(probs, axis=1, keepdims=False) comb_cls_scores = mx.sym.broadcast_add(comb_cls_scores, self.get_constant_symbol(1e-07)) bg_scores = mx.sym.broadcast_sub(self.get_constant_symbol((1 + 1e-07)), mx.sym.max_axis(comb_cls_scores, axis=1, keepdims=True)) cls_score = mx.sym.concat(bg_scores, comb_cls_scores, dim=1, name='bg_concat') cls_score = mx.sym.reshape(cls_score, shape=(0, (- 1))) if cfg.network.SOFTMAX_ENABLED: cls_score = mx.sym.broadcast_mul(self.get_constant_symbol(cfg.network.SOFTMAX_MUL), cls_score) if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: cls_score_lin = mx.symbol.FullyConnected(name='cls_score_lin', data=fc_new_2_relu, num_hidden=num_classes) bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4)) if is_train: if cfg.TRAIN.ENABLE_OHEM: rm_last = int(cfg.TRAIN.RM_LAST) (labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight, rm_last=rm_last) if cfg.network.SOFTMAX_ENABLED: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1)) else: zz = mx.sym.zeros_like(label) cls_prob = mx.sym.BlockGrad(cls_score) invalid = mx.sym.broadcast_equal(labels_ohem, self.get_constant_symbol((- 1))) minoh_labels = mx.sym.one_hot(mx.sym.broadcast_add(mx.sym.cast(invalid, dtype='float32'), labels_ohem), depth=num_classes, on_value=(- 1), off_value=0) ce_loss = mx.sym.where(invalid, x=zz, y=mx.sym.sum(mx.sym.broadcast_mul(minoh_labels, mx.sym.log(mx.sym.broadcast_add(cls_score, self.get_constant_symbol(1e-07)))), axis=1)) ce_loss = mx.sym.MakeLoss(ce_loss, normalization='valid') bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)) rcnn_label = labels_ohem if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1)) if cfg.network.EMBED_LOSS_ENABLED: embed_loss_ = (mx.sym.slice_axis(bbox_weights_ohem, axis=1, begin=(- 1), end=None) * embed_loss_val) embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)) else: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid') bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS)) rcnn_label = label if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=label, normalization='valid') if cfg.network.EMBED_LOSS_ENABLED: embed_loss_ = (mx.sym.slice_axis(bbox_weight, axis=1, begin=0, end=1) * embed_loss_val) embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS)) if cfg.network.EMBED_LOSS_ENABLED: extra_outputs += [embed_loss] if cfg.network.REPS_CLS_LOSS: extra_outputs += [mx.sym.MakeLoss(name='reps_cls_loss', data=reps_cls_loss_val, grad_scale=(1.0 / (cfg.network.REPS_PER_CLASS * (num_classes - 1))))] if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: extra_outputs += [cls_prob_lin] if (not cfg.network.SOFTMAX_ENABLED): extra_outputs += [ce_loss] extra_outputs += [mx.sym.BlockGrad(rois), mx.sym.identity(mx.sym.BlockGrad(batch_embed), name='psp_final_embed')] rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape') cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape') bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape') if cfg.network.base_net_lock: group = mx.sym.Group(([cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs)) else: group = mx.sym.Group(([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs)) else: cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape') bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape') cls_score_orig = cls_score if cfg.network.SOFTMAX_ENABLED: cls_score_orig = mx.sym.broadcast_div(cls_score_orig, self.get_constant_symbol(cfg.network.SOFTMAX_MUL)) cls_score_orig = mx.sym.Reshape(data=cls_score_orig, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes)) group = mx.sym.Group([rois, cls_prob, bbox_pred, mx.sym.identity(batch_embed, name='psp_final_embed'), mx.sym.identity(cls_score_orig, name='cls_score')]) self.sym = group return group def init_weight_rcnn(self, cfg, arg_params, aux_params): arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight']) arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias']) arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight']) arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias']) arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight']) arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias']) arg_params['batch_embed_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['batch_embed_weight']) arg_params['batch_embed_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['batch_embed_bias']) name = 'fc_representatives' if cfg.network.SEPARABLE_REPS: arg_params[(name + '_base_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_base_weight')]) arg_params[(name + '_offset_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_offset_weight')]) elif cfg.network.SEPARABLE_REPS_INIT: C = mx.random.normal(0, 0.1, shape=(cfg.network.EMBEDDING_DIM, 1, (cfg.dataset.NUM_CLASSES - 1))) R = mx.random.normal(0, 0.05, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1)) CR = (C + R) arg_params[(name + '_weight')] = mx.nd.reshape(CR, shape=((- 1), 1)) else: arg_params[(name + '_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_weight')]) if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: arg_params['cls_score_lin_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_lin_weight']) arg_params['cls_score_lin_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_lin_bias']) def init_deformable_convnet(self, cfg, arg_params, aux_params): arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight']) arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias']) arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight']) arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias']) arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight']) arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias']) arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight']) arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias']) arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight']) arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias']) def init_weight_fpn(self, cfg, arg_params, aux_params): arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight']) arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias']) arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight']) arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias']) arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight']) arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias']) arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight']) arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias']) arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight']) arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias']) arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight']) arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias']) arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight']) arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias']) arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight']) arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias']) arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight']) arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias']) def init_weight(self, cfg, arg_params, aux_params): (arg_params2, aux_params2) = ({}, {}) for name in self.shared_param_list: if ('offset' in name): arg_params2[(name + '_weight')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_weight')]) else: arg_params2[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')]) arg_params2[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')]) self.init_deformable_convnet(cfg, arg_params2, aux_params2) self.init_weight_rcnn(cfg, arg_params2, aux_params2) self.init_weight_fpn(cfg, arg_params2, aux_params2) for k in arg_params2: if cfg.network.pretrained_weights_are_priority: if ((k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape)): arg_params[k] = arg_params2[k] else: arg_params[k] = arg_params2[k] for k in aux_params2: if cfg.network.pretrained_weights_are_priority: if (k not in aux_params): aux_params[k] = aux_params2[k] else: aux_params[k] = aux_params2[k]
class resnet_v1_101_fpn_dcn_rcnn_rep_noemb(Symbol): def __init__(self): '\n Use __init__ to define parameter network needs\n ' self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred'] self.shared_param_dict = {} for name in self.shared_param_list: self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight')) self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias')) self.constants_dict = {} def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05): conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True) bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps) scale_conv1 = bn_conv1 conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu') pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max') res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch1 = bn2a_branch1 res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2a = bn2a_branch2a res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu') res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2b = bn2a_branch2b res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu') res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2c = bn2a_branch2c res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a') res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu') res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2a = bn2b_branch2a res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu') res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2b = bn2b_branch2b res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu') res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2c = bn2b_branch2c res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b') res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu') res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2a = bn2c_branch2a res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu') res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2b = bn2c_branch2b res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu') res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2c = bn2c_branch2c res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c') res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu') res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch1 = bn3a_branch1 res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2a = bn3a_branch2a res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu') res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2b = bn3a_branch2b res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu') res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2c = bn3a_branch2c res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a') res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu') res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2a = bn3b1_branch2a res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu') res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2b = bn3b1_branch2b res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu') res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2c = bn3b1_branch2c res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1') res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu') res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2a = bn3b2_branch2a res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu') res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2b = bn3b2_branch2b res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu') res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2c = bn3b2_branch2c res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2') res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu') res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2a = bn3b3_branch2a res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu') if with_dpyramid: res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2b = bn3b3_branch2b res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu') res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2c = bn3b3_branch2c res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3') res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu') res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch1 = bn4a_branch1 res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2a = bn4a_branch2a res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu') res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2b = bn4a_branch2b res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu') res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2c = bn4a_branch2c res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a') res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu') res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2a = bn4b1_branch2a res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu') res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2b = bn4b1_branch2b res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu') res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2c = bn4b1_branch2c res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1') res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu') res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2a = bn4b2_branch2a res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu') res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2b = bn4b2_branch2b res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu') res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2c = bn4b2_branch2c res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2') res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu') res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2a = bn4b3_branch2a res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu') res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2b = bn4b3_branch2b res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu') res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2c = bn4b3_branch2c res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3') res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu') res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2a = bn4b4_branch2a res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu') res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2b = bn4b4_branch2b res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu') res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2c = bn4b4_branch2c res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4') res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu') res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2a = bn4b5_branch2a res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu') res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2b = bn4b5_branch2b res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu') res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2c = bn4b5_branch2c res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5') res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu') res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2a = bn4b6_branch2a res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu') res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2b = bn4b6_branch2b res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu') res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2c = bn4b6_branch2c res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6') res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu') res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2a = bn4b7_branch2a res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu') res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2b = bn4b7_branch2b res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu') res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2c = bn4b7_branch2c res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7') res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu') res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2a = bn4b8_branch2a res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu') res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2b = bn4b8_branch2b res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu') res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2c = bn4b8_branch2c res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8') res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu') res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2a = bn4b9_branch2a res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu') res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2b = bn4b9_branch2b res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu') res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2c = bn4b9_branch2c res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9') res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu') res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2a = bn4b10_branch2a res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu') res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2b = bn4b10_branch2b res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu') res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2c = bn4b10_branch2c res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10') res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu') res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2a = bn4b11_branch2a res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu') res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2b = bn4b11_branch2b res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu') res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2c = bn4b11_branch2c res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11') res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu') res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2a = bn4b12_branch2a res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu') res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2b = bn4b12_branch2b res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu') res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2c = bn4b12_branch2c res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12') res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu') res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2a = bn4b13_branch2a res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu') res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2b = bn4b13_branch2b res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu') res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2c = bn4b13_branch2c res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13') res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu') res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2a = bn4b14_branch2a res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu') res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2b = bn4b14_branch2b res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu') res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2c = bn4b14_branch2c res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14') res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu') res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2a = bn4b15_branch2a res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu') res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2b = bn4b15_branch2b res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu') res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2c = bn4b15_branch2c res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15') res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu') res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2a = bn4b16_branch2a res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu') res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2b = bn4b16_branch2b res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu') res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2c = bn4b16_branch2c res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16') res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu') res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2a = bn4b17_branch2a res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu') res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2b = bn4b17_branch2b res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu') res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2c = bn4b17_branch2c res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17') res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu') res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2a = bn4b18_branch2a res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu') res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2b = bn4b18_branch2b res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu') res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2c = bn4b18_branch2c res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18') res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu') res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2a = bn4b19_branch2a res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu') res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2b = bn4b19_branch2b res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu') res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2c = bn4b19_branch2c res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19') res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu') res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2a = bn4b20_branch2a res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu') res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2b = bn4b20_branch2b res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu') res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2c = bn4b20_branch2c res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20') res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu') res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2a = bn4b21_branch2a res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu') res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2b = bn4b21_branch2b res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu') res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2c = bn4b21_branch2c res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21') res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu') res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2a = bn4b22_branch2a res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu') if with_dpyramid: res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2b = bn4b22_branch2b res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu') res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2c = bn4b22_branch2c res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22') res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu') if with_dilated: res5_stride = (1, 1) res5_dilate = (2, 2) else: res5_stride = (2, 2) res5_dilate = (1, 1) res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2a = bn5a_branch2a res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu') if with_dconv: res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True) else: res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2b = bn5a_branch2b res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu') res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2c = bn5a_branch2c res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch1 = bn5a_branch1 res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a') res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu') res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2a = bn5b_branch2a res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu') if with_dconv: res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2b = bn5b_branch2b res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu') res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2c = bn5b_branch2c res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b') res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu') res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2a = bn5c_branch2a res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu') if with_dconv: res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2b = bn5c_branch2b res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu') res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2c = bn5c_branch2c res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c') res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu') return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu) def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256): fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1') fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1') fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1') fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1') fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample') fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum') fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample') fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum') fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample') fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum') fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6') fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5') fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4') fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3') fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2') return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) def get_rpn_subnet(self, data, num_anchors, suffix): rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias']) rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix)) rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias']) rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias']) rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix)) rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix)) rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix)) rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix)) rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix)) return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred) def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7): offset = mx.contrib.sym.DeformablePSROIPooling(name=(('offset_' + name) + '_t'), data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale) offset = mx.sym.FullyConnected(name=('offset_' + name), data=offset, num_hidden=((part_size * part_size) * 2), lr_mult=0.01, weight=self.shared_param_dict[(('offset_' + param_name) + '_weight')], bias=self.shared_param_dict[(('offset_' + param_name) + '_bias')]) offset_reshape = mx.sym.Reshape(data=offset, shape=((- 1), 2, part_size, part_size), name=('offset_reshape_' + name)) output = mx.contrib.sym.DeformablePSROIPooling(name=('deformable_roi_pool_' + name), data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1) return output def get_constant_symbol(self, const_val): if (const_val in self.constants_dict): return self.constants_dict[const_val] name = 'const_eq_{0}'.format(const_val) c = mx.sym.Variable(name, shape=(1,), init=MyConstant(value=[const_val])) c = mx.sym.BlockGrad(c) self.constants_dict[const_val] = c return c def cos_sim_2_dist_generic(self, cos_sim, x=None, y=None, x_is_norm=True, y_is_norm=True): if x_is_norm: x_norm = self.get_constant_symbol(1) else: assert (x is not None), 'if x is not L2 normalized then x must be provided' x_norm = mx.sym.sum_axis(mx.sym.square(x), axis=0, keepdims=True) x_norm = mx.sym.transpose(x_norm, axes=(1, 0)) if y_is_norm: y_norm = self.get_constant_symbol(1) else: assert (y is not None), 'if y is not L2 normalized then y must be provided' y_norm = mx.sym.sum_axis(mx.sym.square(y), axis=0, keepdims=True) dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(x_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), y_norm) return dist def cos_sim_2_dist(self, cos_sim, cfg=None, embd=None, reps=None): if cfg.network.EMBED_L2_NORM: embd_norm = self.get_constant_symbol(1) else: assert (embd is not None), 'if embedding is not L2 normalized then embd must be provided' embd_norm = mx.sym.sum_axis(mx.sym.square(embd), axis=1, keepdims=True) embd_norm = mx.sym.reshape(embd_norm, shape=(0, 1, 1)) if cfg.network.REP_L2_NORM: reps_norm = self.get_constant_symbol(1) else: assert (reps is not None), 'if representatives are not L2 normalized then reps must be provided' reps_norm = mx.sym.sum_axis(mx.sym.square(reps), axis=0, keepdims=True) dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(embd_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), reps_norm) return dist def get_symbol(self, cfg, is_train=True): num_classes = cfg.dataset.NUM_CLASSES num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes) data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') (res2, res3, res4, res5) = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True) (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5) (rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2') (rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3') (rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4') (rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5') (rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6') rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2} rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2} arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())) if is_train: if (not cfg.network.base_net_lock): rpn_label = mx.sym.Variable(name='label') rpn_bbox_target = mx.sym.Variable(name='bbox_target') rpn_bbox_weight = mx.sym.Variable(name='bbox_weight') gt_boxes = mx.sym.Variable(name='gt_boxes') if (not cfg.network.base_net_lock): rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2) rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2) rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob') rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE)) aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE} rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items()))) gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape') (rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION) else: aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE} rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items()))) offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01) offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01) offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01) offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01) offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01) offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01) offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01) offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01) roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True) fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024) fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu') fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024) fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu') if (is_train and cfg.network.base_net_lock): fc_new_2_relu = mx.sym.BlockGrad(fc_new_2_relu) rois = mx.sym.BlockGrad(rois) label = mx.sym.BlockGrad(label) bbox_target = mx.sym.BlockGrad(bbox_target) bbox_weight = mx.sym.BlockGrad(bbox_weight) lr_mult = cfg.TRAIN.REPS_LR_MULT if cfg.network.SEPARABLE_REPS: base = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_base', num_hidden=(cfg.network.EMBEDDING_DIM * (num_classes - 1)), no_bias=True, lr_mult=lr_mult) offset = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_offset', num_hidden=(cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS), no_bias=True, lr_mult=lr_mult) base = mx.sym.reshape(base, shape=(cfg.network.EMBEDDING_DIM, 1, (num_classes - 1))) offset = mx.sym.reshape(offset, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1)) representatives = mx.sym.broadcast_add(base, offset, name='fc_representatives') else: representatives = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives', num_hidden=((cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS) * (num_classes - 1)), no_bias=True, lr_mult=lr_mult) representatives = mx.sym.reshape(representatives, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, (num_classes - 1))) if cfg.network.REP_L2_NORM: representatives = mx.sym.transpose(mx.sym.L2Normalization(mx.sym.transpose(representatives, axes=(1, 0, 2)), mode='channel'), axes=(1, 0, 2)) extra_outputs = [mx.sym.BlockGrad(representatives)] eps = 1e-05 x = mx.sym.FullyConnected(name='embed_dense_1', data=fc_new_2_relu, num_hidden=2048) x = mx.sym.BatchNorm(name='embed_batchNorm_1', data=x, use_global_stats=True, fix_gamma=False, eps=eps) x = mx.sym.Activation(name='embed_relu_1', data=x, act_type='relu') x = mx.sym.FullyConnected(name='embed_dense_2', data=x, num_hidden=1024) x = mx.sym.BatchNorm(name='embed_batchNorm_2', data=x, use_global_stats=True, fix_gamma=False, eps=eps) x = mx.sym.Activation(name='embed_relu_2', data=x, act_type='relu') x = mx.sym.FullyConnected(name='embed_dense_3', data=x, num_hidden=1024) batch_embed = mx.sym.identity(name='batch_embed', data=fc_new_2_relu) if cfg.network.EMBED_L2_NORM: batch_embed = mx.sym.L2Normalization(data=batch_embed, name='batch_embed_nrm', mode='instance') cos_sim = mx.sym.dot(batch_embed, representatives, transpose_b=False) all_cls_rep_dist = self.cos_sim_2_dist(cos_sim, cfg, embd=batch_embed, reps=representatives) if (is_train and cfg.network.EMBED_LOSS_ENABLED): all_cls_min_dist = mx.sym.min_axis(all_cls_rep_dist, axis=1, keepdims=True) all_cls_min_dist = mx.sym.reshape(all_cls_min_dist, shape=(0, (num_classes - 1))) mod_true_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1, off_value=0), axis=1, begin=1, end=None) mod_false_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1000, off_value=0), axis=1, begin=1, end=None) min_dist_true = mx.sym.sum_axis(mx.sym.broadcast_mul(all_cls_min_dist, mod_true_class), axis=1) min_dist_false = mx.sym.min_axis(mx.sym.broadcast_add(all_cls_min_dist, mod_false_class), axis=1) embed_loss_val = mx.sym.broadcast_sub(min_dist_true, min_dist_false) embed_loss_val = mx.sym.broadcast_add(embed_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN)) embed_loss_val = mx.sym.relu(embed_loss_val) embed_loss_val = mx.sym.reshape(embed_loss_val, shape=(0, 1)) if (is_train and cfg.network.REPS_CLS_LOSS): mask_block_ones = mx.sym.ones(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS)) mask_block_zeros = mx.sym.zeros(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS)) mask = None for iC1 in range((num_classes - 1)): mask_row = None for iC2 in range((num_classes - 1)): if (iC1 == iC2): cblock = mask_block_ones else: cblock = mask_block_zeros if (mask_row is None): mask_row = cblock else: mask_row = mx.sym.concat(mask_row, cblock, dim=1) if (mask is None): mask = mask_row else: mask = mx.sym.concat(mask, mask_row, dim=0) mask_NC = mx.sym.broadcast_mul(self.get_constant_symbol(1000), mask) mask_C = mx.sym.broadcast_sub(self.get_constant_symbol(1000), mask_NC) mask_C = mx.sym.BlockGrad(mask_C) mask_NC = mx.sym.BlockGrad(mask_NC) R = mx.sym.reshape(mx.sym.transpose(representatives, axes=(0, 2, 1)), shape=(0, (- 1))) R2R_cos_sim = mx.sym.dot(R, R, transpose_a=True) R2R = self.cos_sim_2_dist_generic(R2R_cos_sim, x=R, y=R, x_is_norm=cfg.network.REP_L2_NORM, y_is_norm=cfg.network.REP_L2_NORM) C2C = mx.sym.broadcast_add(R2R, mask_C) C2NC = mx.sym.broadcast_add(R2R, mask_NC) min_dist_C = mx.sym.topk(C2C, axis=1, k=2, ret_typ='value', is_ascend=True) min_dist_C = mx.sym.slice_axis(min_dist_C, axis=1, begin=1, end=2) min_dist_NC = mx.sym.min_axis(C2NC, axis=1, keepdims=True) reps_cls_loss_val = mx.sym.broadcast_sub(min_dist_C, min_dist_NC) reps_cls_loss_val = mx.sym.broadcast_add(reps_cls_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN)) reps_cls_loss_val = mx.sym.relu(reps_cls_loss_val) probs = mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(((- 0.5) / float((cfg.network.SIGMA ** 2)))))) comb_cls_scores = mx.sym.max_axis(probs, axis=1, keepdims=False) comb_cls_scores = mx.sym.broadcast_add(comb_cls_scores, self.get_constant_symbol(1e-07)) bg_scores = mx.sym.broadcast_sub(self.get_constant_symbol((1 + 1e-07)), mx.sym.max_axis(comb_cls_scores, axis=1, keepdims=True)) cls_score = mx.sym.concat(bg_scores, comb_cls_scores, dim=1, name='bg_concat') cls_score = mx.sym.reshape(cls_score, shape=(0, (- 1))) if cfg.network.SOFTMAX_ENABLED: cls_score = mx.sym.broadcast_mul(self.get_constant_symbol(cfg.network.SOFTMAX_MUL), cls_score) if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: cls_score_lin = mx.symbol.FullyConnected(name='cls_score_lin', data=fc_new_2_relu, num_hidden=num_classes) bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4)) if is_train: if cfg.TRAIN.ENABLE_OHEM: (labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight) if cfg.network.SOFTMAX_ENABLED: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1)) else: zz = mx.sym.zeros_like(label) cls_prob = mx.sym.BlockGrad(cls_score) invalid = mx.sym.broadcast_equal(labels_ohem, self.get_constant_symbol((- 1))) minoh_labels = mx.sym.one_hot(mx.sym.broadcast_add(mx.sym.cast(invalid, dtype='float32'), labels_ohem), depth=num_classes, on_value=(- 1), off_value=0) ce_loss = mx.sym.where(invalid, x=zz, y=mx.sym.sum(mx.sym.broadcast_mul(minoh_labels, mx.sym.log(mx.sym.broadcast_add(cls_score, self.get_constant_symbol(1e-07)))), axis=1)) ce_loss = mx.sym.MakeLoss(ce_loss, normalization='valid') bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)) rcnn_label = labels_ohem if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1)) if cfg.network.EMBED_LOSS_ENABLED: embed_loss_ = (mx.sym.slice_axis(bbox_weights_ohem, axis=1, begin=(- 1), end=None) * embed_loss_val) embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)) else: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid') bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS)) rcnn_label = label if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=label, normalization='valid') if cfg.network.EMBED_LOSS_ENABLED: embed_loss_ = (mx.sym.slice_axis(bbox_weight, axis=1, begin=0, end=1) * embed_loss_val) embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS)) if cfg.network.EMBED_LOSS_ENABLED: extra_outputs += [embed_loss] if cfg.network.REPS_CLS_LOSS: extra_outputs += [mx.sym.MakeLoss(name='reps_cls_loss', data=reps_cls_loss_val, grad_scale=(1.0 / (cfg.network.REPS_PER_CLASS * (num_classes - 1))))] if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: extra_outputs += [cls_prob_lin] if (not cfg.network.SOFTMAX_ENABLED): extra_outputs += [ce_loss] extra_outputs += [mx.sym.BlockGrad(rois), mx.sym.identity(mx.sym.BlockGrad(batch_embed), name='psp_final_embed')] rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape') cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape') bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape') if cfg.network.base_net_lock: group = mx.sym.Group(([cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs)) else: group = mx.sym.Group(([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs)) else: cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape') bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape') cls_score_orig = cls_score if cfg.network.SOFTMAX_ENABLED: cls_score_orig = mx.sym.broadcast_div(cls_score_orig, self.get_constant_symbol(cfg.network.SOFTMAX_MUL)) cls_score_orig = mx.sym.Reshape(data=cls_score_orig, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes)) group = mx.sym.Group([rois, cls_prob, bbox_pred, mx.sym.identity(batch_embed, name='psp_final_embed'), mx.sym.identity(cls_score_orig, name='cls_score')]) self.sym = group return group def init_weight_rcnn(self, cfg, arg_params, aux_params): arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight']) arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias']) arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight']) arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias']) arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight']) arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias']) name = 'fc_representatives' if cfg.network.SEPARABLE_REPS: arg_params[(name + '_base_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_base_weight')]) arg_params[(name + '_offset_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_offset_weight')]) elif cfg.network.SEPARABLE_REPS_INIT: C = mx.random.normal(0, 0.1, shape=(cfg.network.EMBEDDING_DIM, 1, (cfg.dataset.NUM_CLASSES - 1))) R = mx.random.normal(0, 0.05, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1)) CR = (C + R) arg_params[(name + '_weight')] = mx.nd.reshape(CR, shape=((- 1), 1)) else: arg_params[(name + '_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_weight')]) if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS: arg_params['cls_score_lin_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_lin_weight']) arg_params['cls_score_lin_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_lin_bias']) def init_deformable_convnet(self, cfg, arg_params, aux_params): arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight']) arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias']) arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight']) arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias']) arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight']) arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias']) arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight']) arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias']) arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight']) arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias']) def init_weight_fpn(self, cfg, arg_params, aux_params): arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight']) arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias']) arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight']) arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias']) arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight']) arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias']) arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight']) arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias']) arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight']) arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias']) arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight']) arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias']) arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight']) arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias']) arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight']) arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias']) arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight']) arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias']) def init_weight(self, cfg, arg_params, aux_params): (arg_params2, aux_params2) = ({}, {}) for name in self.shared_param_list: if ('offset' in name): arg_params2[(name + '_weight')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_weight')]) else: arg_params2[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')]) arg_params2[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')]) self.init_deformable_convnet(cfg, arg_params2, aux_params2) self.init_weight_rcnn(cfg, arg_params2, aux_params2) self.init_weight_fpn(cfg, arg_params2, aux_params2) for k in arg_params2: if cfg.network.pretrained_weights_are_priority: if ((k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape)): arg_params[k] = arg_params2[k] else: arg_params[k] = arg_params2[k] for k in aux_params2: if cfg.network.pretrained_weights_are_priority: if (k not in aux_params): aux_params[k] = aux_params2[k] else: aux_params[k] = aux_params2[k]
class resnet_v1_101_fpn_rcnn(Symbol): def __init__(self): '\n Use __init__ to define parameter network needs\n ' self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred'] self.shared_param_dict = {} for name in self.shared_param_list: self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight')) self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias')) def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05): conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True) bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps) scale_conv1 = bn_conv1 conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu') pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max') res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch1 = bn2a_branch1 res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2a = bn2a_branch2a res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu') res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2b = bn2a_branch2b res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu') res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2c = bn2a_branch2c res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a') res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu') res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2a = bn2b_branch2a res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu') res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2b = bn2b_branch2b res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu') res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2c = bn2b_branch2c res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b') res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu') res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2a = bn2c_branch2a res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu') res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2b = bn2c_branch2b res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu') res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2c = bn2c_branch2c res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c') res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu') res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch1 = bn3a_branch1 res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2a = bn3a_branch2a res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu') res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2b = bn3a_branch2b res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu') res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2c = bn3a_branch2c res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a') res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu') res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2a = bn3b1_branch2a res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu') res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2b = bn3b1_branch2b res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu') res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2c = bn3b1_branch2c res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1') res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu') res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2a = bn3b2_branch2a res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu') res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2b = bn3b2_branch2b res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu') res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2c = bn3b2_branch2c res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2') res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu') res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2a = bn3b3_branch2a res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu') if with_dpyramid: res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2b = bn3b3_branch2b res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu') res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2c = bn3b3_branch2c res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3') res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu') res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch1 = bn4a_branch1 res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2a = bn4a_branch2a res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu') res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2b = bn4a_branch2b res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu') res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2c = bn4a_branch2c res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a') res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu') res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2a = bn4b1_branch2a res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu') res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2b = bn4b1_branch2b res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu') res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2c = bn4b1_branch2c res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1') res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu') res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2a = bn4b2_branch2a res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu') res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2b = bn4b2_branch2b res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu') res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2c = bn4b2_branch2c res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2') res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu') res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2a = bn4b3_branch2a res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu') res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2b = bn4b3_branch2b res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu') res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2c = bn4b3_branch2c res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3') res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu') res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2a = bn4b4_branch2a res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu') res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2b = bn4b4_branch2b res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu') res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2c = bn4b4_branch2c res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4') res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu') res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2a = bn4b5_branch2a res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu') res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2b = bn4b5_branch2b res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu') res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2c = bn4b5_branch2c res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5') res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu') res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2a = bn4b6_branch2a res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu') res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2b = bn4b6_branch2b res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu') res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2c = bn4b6_branch2c res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6') res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu') res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2a = bn4b7_branch2a res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu') res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2b = bn4b7_branch2b res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu') res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2c = bn4b7_branch2c res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7') res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu') res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2a = bn4b8_branch2a res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu') res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2b = bn4b8_branch2b res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu') res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2c = bn4b8_branch2c res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8') res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu') res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2a = bn4b9_branch2a res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu') res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2b = bn4b9_branch2b res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu') res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2c = bn4b9_branch2c res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9') res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu') res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2a = bn4b10_branch2a res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu') res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2b = bn4b10_branch2b res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu') res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2c = bn4b10_branch2c res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10') res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu') res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2a = bn4b11_branch2a res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu') res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2b = bn4b11_branch2b res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu') res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2c = bn4b11_branch2c res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11') res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu') res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2a = bn4b12_branch2a res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu') res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2b = bn4b12_branch2b res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu') res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2c = bn4b12_branch2c res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12') res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu') res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2a = bn4b13_branch2a res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu') res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2b = bn4b13_branch2b res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu') res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2c = bn4b13_branch2c res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13') res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu') res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2a = bn4b14_branch2a res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu') res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2b = bn4b14_branch2b res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu') res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2c = bn4b14_branch2c res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14') res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu') res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2a = bn4b15_branch2a res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu') res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2b = bn4b15_branch2b res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu') res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2c = bn4b15_branch2c res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15') res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu') res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2a = bn4b16_branch2a res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu') res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2b = bn4b16_branch2b res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu') res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2c = bn4b16_branch2c res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16') res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu') res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2a = bn4b17_branch2a res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu') res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2b = bn4b17_branch2b res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu') res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2c = bn4b17_branch2c res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17') res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu') res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2a = bn4b18_branch2a res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu') res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2b = bn4b18_branch2b res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu') res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2c = bn4b18_branch2c res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18') res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu') res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2a = bn4b19_branch2a res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu') res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2b = bn4b19_branch2b res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu') res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2c = bn4b19_branch2c res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19') res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu') res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2a = bn4b20_branch2a res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu') res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2b = bn4b20_branch2b res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu') res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2c = bn4b20_branch2c res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20') res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu') res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2a = bn4b21_branch2a res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu') res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2b = bn4b21_branch2b res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu') res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2c = bn4b21_branch2c res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21') res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu') res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2a = bn4b22_branch2a res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu') if with_dpyramid: res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2b = bn4b22_branch2b res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu') res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2c = bn4b22_branch2c res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22') res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu') if with_dilated: res5_stride = (1, 1) res5_dilate = (2, 2) else: res5_stride = (2, 2) res5_dilate = (1, 1) res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2a = bn5a_branch2a res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu') if with_dconv: res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True) else: res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2b = bn5a_branch2b res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu') res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2c = bn5a_branch2c res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch1 = bn5a_branch1 res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a') res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu') res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2a = bn5b_branch2a res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu') if with_dconv: res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2b = bn5b_branch2b res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu') res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2c = bn5b_branch2c res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b') res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu') res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2a = bn5c_branch2a res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu') if with_dconv: res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2b = bn5c_branch2b res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu') res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2c = bn5c_branch2c res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c') res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu') return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu) def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256): fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1') fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1') fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1') fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1') fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample') fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum') fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample') fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum') fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample') fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum') fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6') fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5') fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4') fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3') fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2') return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) def get_rpn_subnet(self, data, num_anchors, suffix): rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias']) rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix)) rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias']) rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias']) rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix)) rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix)) rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix)) rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix)) rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix)) return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred) def get_symbol(self, cfg, is_train=True): num_classes = cfg.dataset.NUM_CLASSES num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes) data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') (res2, res3, res4, res5) = self.get_resnet_backbone(data) (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5) (rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2') (rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3') (rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4') (rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5') (rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6') rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2} rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2} arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())) if is_train: rpn_label = mx.sym.Variable(name='label') rpn_bbox_target = mx.sym.Variable(name='bbox_target') rpn_bbox_weight = mx.sym.Variable(name='bbox_weight') gt_boxes = mx.sym.Variable(name='gt_boxes') rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2) rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2) rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob') rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE)) aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE} rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items()))) gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape') (rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION) else: aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE} rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items()))) roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling') fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024) fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu') fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024) fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu') cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes) bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4)) if is_train: if cfg.TRAIN.ENABLE_OHEM: (labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight) cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1)) bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)) rcnn_label = labels_ohem else: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid') bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS)) rcnn_label = label rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape') cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape') bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape') group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)]) else: cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape') bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape') group = mx.sym.Group([rois, cls_prob, bbox_pred]) self.sym = group return group def init_weight_rcnn(self, cfg, arg_params, aux_params): arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight']) arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias']) arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight']) arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias']) arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight']) arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias']) arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight']) arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias']) def init_weight_fpn(self, cfg, arg_params, aux_params): arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight']) arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias']) arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight']) arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias']) arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight']) arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias']) arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight']) arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias']) arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight']) arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias']) arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight']) arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias']) arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight']) arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias']) arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight']) arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias']) arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight']) arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias']) def init_weight(self, cfg, arg_params, aux_params): for name in self.shared_param_list: arg_params[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')]) arg_params[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')]) self.init_weight_rcnn(cfg, arg_params, aux_params) self.init_weight_fpn(cfg, arg_params, aux_params)
def customize_compiler_for_nvcc(self): "inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on." self.src_extensions.append('.cu') default_compiler_so = self.compiler_so super = self._compile def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): if (os.path.splitext(src)[1] == '.cu'): self.set_executable('compiler_so', CUDA['nvcc']) postargs = extra_postargs['nvcc'] else: postargs = extra_postargs['gcc'] super(obj, src, ext, cc_args, postargs, pp_opts) self.compiler_so = default_compiler_so self._compile = _compile
class custom_build_ext(build_ext): def build_extensions(self): customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self)
def find_in_path(name, path): 'Find a file in a search path' for dir in path.split(os.pathsep): binpath = pjoin(dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None
def locate_cuda(): "Locate the CUDA environment on the system\n Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'\n and values giving the absolute path to each directory.\n Starts by looking for the CUDAHOME env variable. If not found, everything\n is based on finding 'nvcc' in the PATH.\n " if ('CUDAHOME' in os.environ): home = os.environ['CUDAHOME'] nvcc = pjoin(home, 'bin', 'nvcc') else: default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') nvcc = find_in_path('nvcc', ((os.environ['PATH'] + os.pathsep) + default_path)) if (nvcc is None): raise EnvironmentError('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDAHOME') home = os.path.dirname(os.path.dirname(nvcc)) cudaconfig = {'home': home, 'nvcc': nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')} for (k, v) in cudaconfig.iteritems(): if (not os.path.exists(v)): raise EnvironmentError(('The CUDA %s path could not be located in %s' % (k, v))) return cudaconfig
def customize_compiler_for_nvcc(self): "inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on." self.src_extensions.append('.cu') default_compiler_so = self.compiler_so super = self._compile def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): if (os.path.splitext(src)[1] == '.cu'): self.set_executable('compiler_so', CUDA['nvcc']) postargs = extra_postargs['nvcc'] else: postargs = extra_postargs['gcc'] super(obj, src, ext, cc_args, postargs, pp_opts) self.compiler_so = default_compiler_so self._compile = _compile
class custom_build_ext(build_ext): def build_extensions(self): customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self)
def find_in_path(name, path): 'Find a file in a search path' for dir in path.split(os.pathsep): binpath = pjoin(dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None
def locate_cuda(): "Locate the CUDA environment on the system\n\n Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'\n and values giving the absolute path to each directory.\n\n Starts by looking for the CUDAHOME env variable. If not found, everything\n is based on finding 'nvcc' in the PATH.\n " if ('CUDA_PATH' in os.environ): home = os.environ['CUDA_PATH'] print(('home = %s\n' % home)) nvcc = pjoin(home, 'bin', nvcc_bin) else: default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') nvcc = find_in_path(nvcc_bin, ((os.environ['PATH'] + os.pathsep) + default_path)) if (nvcc is None): raise EnvironmentError('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDA_PATH') home = os.path.dirname(os.path.dirname(nvcc)) print(('home = %s, nvcc = %s\n' % (home, nvcc))) cudaconfig = {'home': home, 'nvcc': nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, lib_dir)} for (k, v) in cudaconfig.iteritems(): if (not os.path.exists(v)): raise EnvironmentError(('The CUDA %s path could not be located in %s' % (k, v))) return cudaconfig
def customize_compiler_for_nvcc(self): "inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on." super = self.compile def compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): postfix = os.path.splitext(sources[0])[1] if (postfix == '.cu'): postargs = extra_postargs['nvcc'] else: postargs = extra_postargs['gcc'] return super(sources, output_dir, macros, include_dirs, debug, extra_preargs, postargs, depends) self.compile = compile
class custom_build_ext(build_ext): def build_extensions(self): customize_compiler_for_nvcc(self.compiler) build_ext.build_extensions(self)
class CUDA_build_ext(build_ext): '\n Custom build_ext command that compiles CUDA files.\n Note that all extension source files will be processed with this compiler.\n ' def build_extensions(self): self.compiler.src_extensions.append('.cu') self.compiler.set_executable('compiler_so', 'nvcc') self.compiler.set_executable('linker_so', 'nvcc --shared') if hasattr(self.compiler, '_c_extensions'): self.compiler._c_extensions.append('.cu') self.compiler.spawn = self.spawn build_ext.build_extensions(self) def spawn(self, cmd, search_path=1, verbose=0, dry_run=0): '\n Perform any CUDA specific customizations before actually launching\n compile/link etc. commands.\n ' if ((sys.platform == 'darwin') and (len(cmd) >= 2) and (cmd[0] == 'nvcc') and (cmd[1] == '--shared') and (cmd.count('-arch') > 0)): while True: try: index = cmd.index('-arch') del cmd[index:(index + 2)] except ValueError: break elif (self.compiler.compiler_type == 'msvc'): cmd[:1] = ['nvcc', '--compiler-bindir', (os.path.dirname(find_executable('cl.exe', PATH)) or cmd[0])] for (idx, c) in enumerate(cmd): if (c == '/c'): cmd[idx] = '-c' elif (c == '/DLL'): cmd[idx] = '--shared' elif ('-fPIC' in c): del cmd[idx] elif c.startswith('/Tc'): cmd[idx] = c[3:] elif c.startswith('/Fo'): cmd[idx:(idx + 1)] = ['-o', c[3:]] elif c.startswith('/LIBPATH:'): cmd[idx] = ('-L' + c[9:]) elif c.startswith('/OUT:'): cmd[idx:(idx + 1)] = ['-o', c[5:]] elif c.startswith('/EXPORT:'): del cmd[idx] elif (c == 'cublas.lib'): cmd[idx] = '-lcublas' if ('--shared' in cmd): pass_on = '--linker-options=' cmd.append('/NODEFAULTLIB:libcmt.lib') else: pass_on = '--compiler-options=' cmd = ([c for c in cmd if (c[0] != '/')] + [(pass_on + ','.join((c for c in cmd if (c[0] == '/'))))]) spawn(cmd, search_path, verbose, dry_run)
def get_segmentation_test_batch(segdb, config): "\n return a dict of train batch\n :param segdb: ['image', 'flipped']\n :param config: the config setting\n :return: data, label, im_info\n " (imgs, seg_cls_gts, segdb) = get_segmentation_image(segdb, config) im_array = imgs im_info = [np.array([segdb[i]['im_info']], dtype=np.float32) for i in xrange(len(segdb))] data = [{'data': im_array[i], 'im_info': im_info[i]} for i in xrange(len(segdb))] label = [{'label': seg_cls_gts[i]} for i in xrange(len(segdb))] return (data, label, im_info)
def get_segmentation_train_batch(segdb, config): "\n return a dict of train batch\n :param segdb: ['image', 'flipped']\n :param config: the config setting\n :return: data, label, im_info\n " assert (len(segdb) == 1), 'Single batch only' (imgs, seg_cls_gts, segdb) = get_segmentation_image(segdb, config) im_array = imgs[0] seg_cls_gt = seg_cls_gts[0] im_info = np.array([segdb[0]['im_info']], dtype=np.float32) data = {'data': im_array, 'im_info': im_info} label = {'label': seg_cls_gt} return (data, label)
def file_lines_to_list(path): with open(path) as f: content = f.readlines() content = [x.strip() for x in content] return content
class Pred(): def __init__(self, id, conf, left, top, right, bottom): self.id = id self.conf = conf self.left = left self.top = top self.right = right self.bottom = bottom def calc_pred_intersection(self, pred): if (not (self.id == pred.id)): return 0 left = max(self.left, pred.left) right = min(self.right, pred.right) if (left > right): return 0 top = max(self.top, pred.top) bottom = min(self.bottom, pred.bottom) if (top > bottom): return 0 return ((right - left) * (bottom - top))
def plot_preds(img, preds, clr=(255, 0, 255)): if isinstance(img, str): img = cv2.imread(img) for pred in preds: cv2.rectangle(img, (pred.left, pred.top), (pred.right, pred.bottom), clr) cv2.putText(img, pred.id, (pred.left, pred.top), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) return img
def read_predictions(txt_file): lines_list = file_lines_to_list(txt_file) prediction = [] for line in lines_list: (id, conf, left, top, right, bottom) = line.split(';') pred = Pred(id, float(conf), int(left), int(top), int(right), int(bottom)) prediction.append(pred) return prediction
class ObjDetStats(): def __init__(self, gt_roidb_fname, cat_ords, logger): with open(gt_roidb_fname, 'rb') as fid: self.roidb = cPickle.load(fid) self.roidb_ni = {} for entry in self.roidb: image_path = entry['image'] (im_path, im_name) = os.path.split(image_path) self.roidb_ni[im_name] = entry self.cat_ords = cat_ords self.logger = logger def print_bboxes(self, dets_folder, query_images): import cv2 from utils.show_boxes import show_dets_gt_boxes for detsName in os.listdir(dets_folder): if (detsName[(- 4):] == '.txt'): detsPath = os.path.join(dets_folder, detsName) imgName = detName_2_imgName(detsName) entry = self.roidb_ni[imgName] imgName = detName_2_imgName(detsName) for q_im_path in query_images: (_, q_im_name) = os.path.split(q_im_path) if (q_im_name == imgName): im = cv2.imread(q_im_path) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) dvisName = imgName_2_dvisName(imgName) (q_dets, cat_names) = img_dets_CSV_2_A(detsPath, self.cat_ords) show_dets_gt_boxes(im, q_dets, cat_names, entry['boxes'], entry['gt_names'], scale=1.0, FS=12, LW=1.5, save_file_path=os.path.join(dets_folder, dvisName)) break def add_base_stats_inDomain(self, dets_folder, perf_stats, Nclasses=1, ovthresh=0.5): for detsName in os.listdir(dets_folder): if (detsName[(- 4):] == '.txt'): (q_dets, cat_names) = img_dets_CSV_2_A(os.path.join(dets_folder, detsName), self.cat_ords) imgName = detName_2_imgName(detsName) entry = self.roidb_ni[imgName] gt_boxes_test = [] gt_classes_test = [] for (gt_box, gt_class) in zip(entry['boxes'], entry['gt_classes']): if (gt_class in self.cat_ords): gt_boxes_test += [gt_box] gt_classes_test += [gt_class] gt_classes_test = np.asarray(gt_classes_test) gt_boxes_test = np.asarray(gt_boxes_test) perf_stats.comp_epi_stats_m(q_dets, gt_boxes_test, gt_classes_test, self.cat_ords, ovthresh) perf_stats.print_perf(self.logger, prefix='_') return perf_stats
class PrefetchingIter(mx.io.DataIter): 'Base class for prefetching iterators. Takes one or more DataIters (\n or any class with "reset" and "next" methods) and combine them with\n prefetching. For example:\n\n Parameters\n ----------\n iters : DataIter or list of DataIter\n one or more DataIters (or any class with "reset" and "next" methods)\n rename_data : None or list of dict\n i-th element is a renaming map for i-th iter, in the form of\n {\'original_name\' : \'new_name\'}. Should have one entry for each entry\n in iter[i].provide_data\n rename_label : None or list of dict\n Similar to rename_data\n\n Examples\n --------\n iter = PrefetchingIter([NDArrayIter({\'data\': X1}), NDArrayIter({\'data\': X2})],\n rename_data=[{\'data\': \'data1\'}, {\'data\': \'data2\'}])\n ' def __init__(self, iters, rename_data=None, rename_label=None): super(PrefetchingIter, self).__init__() if (not isinstance(iters, list)): iters = [iters] self.n_iter = len(iters) assert (self.n_iter == 1), 'Our prefetching iter only support 1 DataIter' self.iters = iters self.rename_data = rename_data self.rename_label = rename_label self.batch_size = (len(self.provide_data) * self.provide_data[0][0][1][0]) self.data_ready = [threading.Event() for i in range(self.n_iter)] self.data_taken = [threading.Event() for i in range(self.n_iter)] for e in self.data_taken: e.set() self.started = True self.current_batch = [None for _ in range(self.n_iter)] self.next_batch = [None for _ in range(self.n_iter)] def prefetch_func(self, i): 'Thread entry' while True: self.data_taken[i].wait() if (not self.started): break try: self.next_batch[i] = self.iters[i].next() except StopIteration: self.next_batch[i] = None self.data_taken[i].clear() self.data_ready[i].set() self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) for i in range(self.n_iter)] for thread in self.prefetch_threads: thread.setDaemon(True) thread.start() def __del__(self): self.started = False for e in self.data_taken: e.set() for thread in self.prefetch_threads: thread.join() @property def provide_data(self): 'The name and shape of data provided by this iterator' if (self.rename_data is None): return sum([i.provide_data for i in self.iters], []) else: return sum([[(DataDesc(r[x.name], x.shape, x.dtype) if isinstance(x, DataDesc) else DataDesc(*x)) for x in i.provide_data] for (r, i) in zip(self.rename_data, self.iters)], []) @property def provide_label(self): 'The name and shape of label provided by this iterator' if (self.rename_label is None): return sum([i.provide_label for i in self.iters], []) else: return sum([[(DataDesc(r[x.name], x.shape, x.dtype) if isinstance(x, DataDesc) else DataDesc(*x)) for x in i.provide_label] for (r, i) in zip(self.rename_label, self.iters)], []) def reset(self): for e in self.data_ready: e.wait() for i in self.iters: i.reset() for e in self.data_ready: e.clear() for e in self.data_taken: e.set() def iter_next(self): for e in self.data_ready: e.wait() if (self.next_batch[0] is None): return False else: self.current_batch = self.next_batch[0] for e in self.data_ready: e.clear() for e in self.data_taken: e.set() return True def next(self): if self.iter_next(): return self.current_batch else: raise StopIteration def getdata(self): return self.current_batch.data def getlabel(self): return self.current_batch.label def getindex(self): return self.current_batch.index def getpad(self): return self.current_batch.pad
def combine_model(prefix1, epoch1, prefix2, epoch2, prefix_out, epoch_out): (args1, auxs1) = load_checkpoint(prefix1, epoch1) (args2, auxs2) = load_checkpoint(prefix2, epoch2) arg_names = (args1.keys() + args2.keys()) aux_names = (auxs1.keys() + auxs2.keys()) args = dict() for arg in arg_names: if (arg in args1): args[arg] = args1[arg] if (arg in args2): args[arg] = args2[arg] auxs = dict() for aux in aux_names: if (aux in auxs1): auxs[aux] = auxs1[aux] if (aux in auxs2): auxs[aux] = auxs2[aux] save_checkpoint(prefix_out, epoch_out, args, auxs)
@mx.init.register class MyConstant(mx.init.Initializer): def __init__(self, value): super(MyConstant, self).__init__(value=value) self.value = value def _init_weight(self, _, arr): arr[:] = mx.nd.array(self.value)
def create_logger(root_output_path, cfg, image_set): if (not os.path.exists(root_output_path)): os.makedirs(root_output_path) assert os.path.exists(root_output_path), '{} does not exist'.format(root_output_path) cfg_name = os.path.basename(cfg).split('.')[0] config_output_path = os.path.join(root_output_path, '{}'.format(cfg_name)) if (not os.path.exists(config_output_path)): os.makedirs(config_output_path) image_sets = [iset for iset in image_set.split(';')] final_output_path = os.path.join(config_output_path, '{}'.format('_'.join(image_sets))) if (not os.path.exists(final_output_path)): os.makedirs(final_output_path) log_file = '{}_{}.log'.format(cfg_name, time.strftime('%Y-%m-%d-%H-%M')) head = '%(asctime)-15s %(message)s' logging.basicConfig(filename=os.path.join(final_output_path, log_file), format=head) logger = logging.getLogger() logger.setLevel(logging.INFO) return (logger, final_output_path)
class UnknownImageFormat(Exception): pass
class Image(collections.namedtuple('Image', image_fields)): def to_str_row(self): return ('%d\t%d\t%d\t%s\t%s' % (self.width, self.height, self.file_size, self.type, self.path.replace('\t', '\\t'))) def to_str_row_verbose(self): return ('%d\t%d\t%d\t%s\t%s\t##%s' % (self.width, self.height, self.file_size, self.type, self.path.replace('\t', '\\t'), self)) def to_str_json(self, indent=None): return json.dumps(self._asdict(), indent=indent)
def get_image_size(file_path): '\n Return (width, height) for a given img file content - no external\n dependencies except the os and struct builtin modules\n ' img = get_image_metadata(file_path) return (img.width, img.height)
def get_image_size_from_bytesio(input, size): '\n Return (width, height) for a given img file content - no external\n dependencies except the os and struct builtin modules\n\n Args:\n input (io.IOBase): io object support read & seek\n size (int): size of buffer in byte\n ' img = get_image_metadata_from_bytesio(input, size) return (img.width, img.height)
def get_image_metadata(file_path): '\n Return an `Image` object for a given img file content - no external\n dependencies except the os and struct builtin modules\n\n Args:\n file_path (str): path to an image file\n\n Returns:\n Image: (path, type, file_size, width, height)\n ' size = os.path.getsize(file_path) with io.open(file_path, 'rb') as input: return get_image_metadata_from_bytesio(input, size, file_path)
def get_image_metadata_from_bytesio(input, size, file_path=None): '\n Return an `Image` object for a given img file content - no external\n dependencies except the os and struct builtin modules\n\n Args:\n input (io.IOBase): io object support read & seek\n size (int): size of buffer in byte\n file_path (str): path to an image file\n\n Returns:\n Image: (path, type, file_size, width, height)\n ' height = (- 1) width = (- 1) data = input.read(26) msg = ' raised while trying to decode as JPEG.' if ((size >= 10) and (data[:6] in (b'GIF87a', b'GIF89a'))): imgtype = GIF (w, h) = struct.unpack('<HH', data[6:10]) width = int(w) height = int(h) elif ((size >= 24) and data.startswith(b'\x89PNG\r\n\x1a\n') and (data[12:16] == b'IHDR')): imgtype = PNG (w, h) = struct.unpack('>LL', data[16:24]) width = int(w) height = int(h) elif ((size >= 16) and data.startswith(b'\x89PNG\r\n\x1a\n')): imgtype = PNG (w, h) = struct.unpack('>LL', data[8:16]) width = int(w) height = int(h) elif ((size >= 2) and data.startswith(b'\xff\xd8')): imgtype = JPEG input.seek(0) input.read(2) b = input.read(1) try: while (b and (ord(b) != 218)): while (ord(b) != 255): b = input.read(1) while (ord(b) == 255): b = input.read(1) if ((ord(b) >= 192) and (ord(b) <= 195)): input.read(3) (h, w) = struct.unpack('>HH', input.read(4)) break else: input.read((int(struct.unpack('>H', input.read(2))[0]) - 2)) b = input.read(1) width = int(w) height = int(h) except struct.error: raise UnknownImageFormat(('StructError' + msg)) except ValueError: raise UnknownImageFormat(('ValueError' + msg)) except Exception as e: raise UnknownImageFormat((e.__class__.__name__ + msg)) elif ((size >= 26) and data.startswith(b'BM')): imgtype = 'BMP' headersize = struct.unpack('<I', data[14:18])[0] if (headersize == 12): (w, h) = struct.unpack('<HH', data[18:22]) width = int(w) height = int(h) elif (headersize >= 40): (w, h) = struct.unpack('<ii', data[18:26]) width = int(w) height = abs(int(h)) else: raise UnknownImageFormat(('Unkown DIB header size:' + str(headersize))) elif ((size >= 8) and (data[:4] in (b'II*\x00', b'MM\x00*'))): imgtype = TIFF byteOrder = data[:2] boChar = ('>' if (byteOrder == 'MM') else '<') tiffTypes = {1: (1, (boChar + 'B')), 2: (1, (boChar + 'c')), 3: (2, (boChar + 'H')), 4: (4, (boChar + 'L')), 5: (8, (boChar + 'LL')), 6: (1, (boChar + 'b')), 7: (1, (boChar + 'c')), 8: (2, (boChar + 'h')), 9: (4, (boChar + 'l')), 10: (8, (boChar + 'll')), 11: (4, (boChar + 'f')), 12: (8, (boChar + 'd'))} ifdOffset = struct.unpack((boChar + 'L'), data[4:8])[0] try: countSize = 2 input.seek(ifdOffset) ec = input.read(countSize) ifdEntryCount = struct.unpack((boChar + 'H'), ec)[0] ifdEntrySize = 12 for i in range(ifdEntryCount): entryOffset = ((ifdOffset + countSize) + (i * ifdEntrySize)) input.seek(entryOffset) tag = input.read(2) tag = struct.unpack((boChar + 'H'), tag)[0] if ((tag == 256) or (tag == 257)): type = input.read(2) type = struct.unpack((boChar + 'H'), type)[0] if (type not in tiffTypes): raise UnknownImageFormat(('Unkown TIFF field type:' + str(type))) typeSize = tiffTypes[type][0] typeChar = tiffTypes[type][1] input.seek((entryOffset + 8)) value = input.read(typeSize) value = int(struct.unpack(typeChar, value)[0]) if (tag == 256): width = value else: height = value if ((width > (- 1)) and (height > (- 1))): break except Exception as e: raise UnknownImageFormat(str(e)) elif (size >= 2): imgtype = 'ICO' input.seek(0) reserved = input.read(2) if (0 != struct.unpack('<H', reserved)[0]): raise UnknownImageFormat(FILE_UNKNOWN) format = input.read(2) assert (1 == struct.unpack('<H', format)[0]) num = input.read(2) num = struct.unpack('<H', num)[0] if (num > 1): import warnings warnings.warn('ICO File contains more than one image') w = input.read(1) h = input.read(1) width = ord(w) height = ord(h) else: raise UnknownImageFormat(FILE_UNKNOWN) return Image(path=file_path, type=imgtype, file_size=size, width=width, height=height)
class Test_get_image_size(unittest.TestCase): data = [{'path': 'lookmanodeps.png', 'width': 251, 'height': 208, 'file_size': 22228, 'type': 'PNG'}] def setUp(self): pass def test_get_image_size_from_bytesio(self): img = self.data[0] p = img['path'] with io.open(p, 'rb') as fp: b = fp.read() fp = io.BytesIO(b) sz = len(b) output = get_image_size_from_bytesio(fp, sz) self.assertTrue(output) self.assertEqual(output, (img['width'], img['height'])) def test_get_image_metadata_from_bytesio(self): img = self.data[0] p = img['path'] with io.open(p, 'rb') as fp: b = fp.read() fp = io.BytesIO(b) sz = len(b) output = get_image_metadata_from_bytesio(fp, sz) self.assertTrue(output) for field in image_fields: self.assertEqual(getattr(output, field), (None if (field == 'path') else img[field])) def test_get_image_metadata(self): img = self.data[0] output = get_image_metadata(img['path']) self.assertTrue(output) for field in image_fields: self.assertEqual(getattr(output, field), img[field]) def test_get_image_metadata__ENOENT_OSError(self): with self.assertRaises(OSError): get_image_metadata('THIS_DOES_NOT_EXIST') def test_get_image_metadata__not_an_image_UnknownImageFormat(self): with self.assertRaises(UnknownImageFormat): get_image_metadata('README.rst') def test_get_image_size(self): img = self.data[0] output = get_image_size(img['path']) self.assertTrue(output) self.assertEqual(output, (img['width'], img['height'])) def tearDown(self): pass
def main(argv=None): '\n Print image metadata fields for the given file path.\n\n Keyword Arguments:\n argv (list): commandline arguments (e.g. sys.argv[1:])\n Returns:\n int: zero for OK\n ' import logging import optparse import sys prs = optparse.OptionParser(usage='%prog [-v|--verbose] [--json|--json-indent] <path0> [<pathN>]', description='Print metadata for the given image paths (without image library bindings).') prs.add_option('--json', dest='json', action='store_true') prs.add_option('--json-indent', dest='json_indent', action='store_true') prs.add_option('-v', '--verbose', dest='verbose', action='store_true') prs.add_option('-q', '--quiet', dest='quiet', action='store_true') prs.add_option('-t', '--test', dest='run_tests', action='store_true') argv = (list(argv) if (argv is not None) else sys.argv[1:]) (opts, args) = prs.parse_args(args=argv) loglevel = logging.INFO if opts.verbose: loglevel = logging.DEBUG elif opts.quiet: loglevel = logging.ERROR logging.basicConfig(level=loglevel) log = logging.getLogger() log.debug('argv: %r', argv) log.debug('opts: %r', opts) log.debug('args: %r', args) if opts.run_tests: import sys sys.argv = ([sys.argv[0]] + args) import unittest return unittest.main() output_func = Image.to_str_row if opts.json_indent: import functools output_func = functools.partial(Image.to_str_json, indent=2) elif opts.json: output_func = Image.to_str_json elif opts.verbose: output_func = Image.to_str_row_verbose EX_OK = 0 EX_NOT_OK = 2 if (len(args) < 1): prs.print_help() print('') prs.error('You must specify one or more paths to image files') errors = [] for path_arg in args: try: img = get_image_metadata(path_arg) print(output_func(img)) except KeyboardInterrupt: raise except OSError as e: log.error((path_arg, e)) errors.append((path_arg, e)) except Exception as e: log.exception(e) errors.append((path_arg, e)) pass if len(errors): import pprint print('ERRORS', file=sys.stderr) print('======', file=sys.stderr) print(pprint.pformat(errors, indent=2), file=sys.stderr) return EX_NOT_OK return EX_OK
def load_checkpoint(prefix, epoch): "\n Load model checkpoint from file.\n :param prefix: Prefix of model name.\n :param epoch: Epoch number of model we would like to load.\n :return: (arg_params, aux_params)\n arg_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n " save_dict = mx.nd.load(('%s-%04d.params' % (prefix, epoch))) arg_params = {} aux_params = {} for (k, v) in save_dict.items(): (tp, name) = k.split(':', 1) if (tp == 'arg'): arg_params[name] = v if (tp == 'aux'): aux_params[name] = v return (arg_params, aux_params)
def convert_context(params, ctx): '\n :param params: dict of str to NDArray\n :param ctx: the context to convert to\n :return: dict of str of NDArray with context ctx\n ' new_params = dict() for (k, v) in params.items(): new_params[k] = v.as_in_context(ctx) return new_params
def load_param(prefix, epoch, convert=False, ctx=None, process=False): '\n wrapper for load checkpoint\n :param prefix: Prefix of model name.\n :param epoch: Epoch number of model we would like to load.\n :param convert: reference model should be converted to GPU NDArray first\n :param ctx: if convert then ctx must be designated.\n :param process: model should drop any test\n :return: (arg_params, aux_params)\n ' (arg_params, aux_params) = load_checkpoint(prefix, epoch) if convert: if (ctx is None): ctx = mx.cpu() arg_params = convert_context(arg_params, ctx) aux_params = convert_context(aux_params, ctx) if process: tests = [k for k in arg_params.keys() if ('_test' in k)] for test in tests: arg_params[test.replace('_test', '')] = arg_params.pop(test) return (arg_params, aux_params)
class WarmupMultiFactorScheduler(LRScheduler): 'Reduce learning rate in factor at steps specified in a list\n\n Assume the weight has been updated by n times, then the learning rate will\n be\n\n base_lr * factor^(sum((step/n)<=1)) # step is an array\n\n Parameters\n ----------\n step: list of int\n schedule learning rate after n updates\n factor: float\n the factor for reducing the learning rate\n ' def __init__(self, step, factor=1, warmup=False, warmup_lr=0, warmup_step=0): super(WarmupMultiFactorScheduler, self).__init__() assert (isinstance(step, list) and (len(step) >= 1)) for (i, _step) in enumerate(step): if ((i != 0) and (step[i] <= step[(i - 1)])): raise ValueError('Schedule step must be an increasing integer list') if (_step < 1): raise ValueError('Schedule step must be greater or equal than 1 round') if (factor > 1.0): raise ValueError('Factor must be no more than 1 to make lr reduce') self.step = step self.cur_step_ind = 0 self.factor = factor self.count = 0 self.warmup = warmup self.warmup_lr = warmup_lr self.warmup_step = warmup_step def __call__(self, num_update): '\n Call to schedule current learning rate\n\n Parameters\n ----------\n num_update: int\n the maximal number of updates applied to a weight.\n ' if (self.warmup and (num_update < self.warmup_step)): return self.warmup_lr while (self.cur_step_ind <= (len(self.step) - 1)): if (num_update > self.step[self.cur_step_ind]): self.count = self.step[self.cur_step_ind] self.cur_step_ind += 1 self.base_lr *= self.factor logging.info('Update[%d]: Change learning rate to %0.5e', num_update, self.base_lr) else: return self.base_lr return self.base_lr
def save_checkpoint(prefix, epoch, arg_params, aux_params): "Checkpoint the model data into file.\n :param prefix: Prefix of model name.\n :param epoch: The epoch number of the model.\n :param arg_params: dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n :param aux_params: dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n :return: None\n prefix-epoch.params will be saved for parameters.\n " save_dict = {('arg:%s' % k): v for (k, v) in arg_params.items()} save_dict.update({('aux:%s' % k): v for (k, v) in aux_params.items()}) param_name = ('%s-%04d.params' % (prefix, epoch)) mx.nd.save(param_name, save_dict)
class Symbol(): def __init__(self): self.arg_shape_dict = None self.out_shape_dict = None self.aux_shape_dict = None self.sym = None @property def symbol(self): return self.sym def get_symbol(self, cfg, is_train=True): '\n return a generated symbol, it also need to be assigned to self.sym\n ' raise NotImplementedError() def init_weights(self, cfg, arg_params, aux_params): raise NotImplementedError() def get_msra_std(self, shape): fan_in = float(shape[1]) if (len(shape) > 2): fan_in *= np.prod(shape[2:]) print(np.sqrt((2 / fan_in))) return np.sqrt((2 / fan_in)) def infer_shape(self, data_shape_dict): (arg_shape, out_shape, aux_shape) = self.sym.infer_shape(**data_shape_dict) self.arg_shape_dict = dict(zip(self.sym.list_arguments(), arg_shape)) self.out_shape_dict = dict(zip(self.sym.list_outputs(), out_shape)) self.aux_shape_dict = dict(zip(self.sym.list_auxiliary_states(), aux_shape)) def check_parameter_shapes(self, arg_params, aux_params, data_shape_dict, is_train=True): for k in self.sym.list_arguments(): if ((k in data_shape_dict) or (False if is_train else ('label' in k)) or ('const_eq_' in k) or ('cls_reps' in k)): continue assert (k in arg_params), (k + ' not initialized') assert (arg_params[k].shape == self.arg_shape_dict[k]), ((((('shape inconsistent for ' + k) + ' inferred ') + str(self.arg_shape_dict[k])) + ' provided ') + str(arg_params[k].shape)) for k in self.sym.list_auxiliary_states(): assert (k in aux_params), (k + ' not initialized') assert (aux_params[k].shape == self.aux_shape_dict[k]), ((((('shape inconsistent for ' + k) + ' inferred ') + str(self.aux_shape_dict[k])) + ' provided ') + str(aux_params[k].shape))
def tic(): import time global startTime_for_tictoc startTime_for_tictoc = time.time() return startTime_for_tictoc
def toc(): if ('startTime_for_tictoc' in globals()): endTime = time.time() return (endTime - startTime_for_tictoc) else: return None
def get_dets_class_names(dets_n): for det in dets_n: class_names = [] for prd in det[3]: if (prd in prd2ename.keys()): class_names += [prd2ename[prd]] else: class_names += [prd] det += [class_names] return dets_n
def print_typical_tray_multiview(): imgname_left = os.path.join(data_root, '15849_left.jpg') imgname_top = os.path.join(data_root, '15849_top.jpg') imgname_right = os.path.join(data_root, '15849_right.jpg') img_left = mpimg.imread(imgname_left) img_top = mpimg.imread(imgname_top) img_right = mpimg.imread(imgname_right) fig = plt.figure(1) ff = 3 fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False) plt.subplot(131) plt.imshow(img_left) plt.axis('off') plt.subplot(132) plt.imshow(img_top) plt.axis('off') plt.subplot(133) plt.imshow(img_right) plt.axis('off')
def display_few_shot_examples(): image_set = ['5cadb37d4b967f67d3047964.jpg', '12686073-1-white.jpg', 'lacoste-logo-sweatshirt-white-p12654-72879_image.jpg'] fig = plt.figure(2) ff = 2 fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False) for (cnt, img_basename) in enumerate(image_set): imgname = os.path.join(data_root, img_basename) img = mpimg.imread(imgname) plt.subplot(1, 3, (cnt + 1)) plt.imshow(img) plt.axis('off')
def test_on_query_image(fs_serv, test_img_fname, det_engines): img = cv2.imread(test_img_fname, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)) score_thresh = 0.1 dets_n = fs_serv.detect_on_image(img, score_thresh=score_thresh, det_engines=det_engines) dets_n = get_dets_class_names(dets_n) test_img_basename = os.path.basename(test_img_fname) img_file_path = os.path.join(disp_folder, test_img_basename.replace('.jpg', '_disp_n.jpg')) disp_dets(img, dets_n, img_file_path, figure_factor=2.0)
def get_box_proposal(fs_serv, img_path): from show_boxes import show_detsB_boxes q_dets_p = fs_serv.get_box_proposal(img_path) image_basename = os.path.basename(img_path) save_file_path = os.path.join(disp_folder, 'box_prop_{0}'.format(image_basename)) img = cv2.cvtColor(cv2.imread(img_path, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)), cv2.COLOR_BGR2RGB) show_detsB_boxes(img, q_dets_p, save_file_path=save_file_path)
def disp_dets2(img, dets, save_file_path): import matplotlib.pyplot as plt fig = plt.figure(1) fig.set_size_inches(((1.4 * 8.5), (1.4 * 11)), forward=False) plt.axis('off') img_d = cv2.cvtColor(copy.deepcopy(img), cv2.COLOR_BGR2RGB) w = 12 for det in dets: bbox = det[0] scores = det[1] labels = det[3] pn = det[4] left = int(bbox[0]) top = int(bbox[1]) right = int(bbox[2]) bottom = int(bbox[3]) if (pn == 'p'): cv2.rectangle(img_d, (left, top), (right, bottom), (255, 0, 0), 4) else: cv2.rectangle(img_d, (left, top), (right, bottom), (0, 255, 0), 4) i = 0 for (s, l) in zip(scores, labels): cv2.rectangle(img_d, (left, (top - w)), ((left + 450), ((top + w) + ((2 * w) * i))), (255, 255, 255), cv2.FILLED) i += 1 i = 0 for (s, l) in zip(scores, labels): text = '{0} - {1:.3f} {2}'.format(l, s, id2name[l]) cv2.putText(img_d, text, (left, (top + ((2 * w) * i))), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color=(0, 0, 0), thickness=2) i += 1 plt.imshow(img_d) fig.savefig(save_file_path)
def test_on_query_image(fs_serv, test_img_fname, score_thresh=0.1, det_engines=1, figure_factor=2.2, FontScale=2.3): img = cv2.imread(test_img_fname, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)) dets_n = fs_serv.detect_on_image(img, score_thresh=score_thresh, det_engines=det_engines) test_img_basename = os.path.basename(test_img_fname) img_file_path = os.path.join(disp_folder, test_img_basename.replace('.jpg', '_disp_n.jpg')) disp_dets2(img, dets_n, img_file_path, figure_factor=figure_factor, FontScale=FontScale)
def display_RedHat_examples(): image_set = ['logo1.jpg', 'logo2.jpg', 'logo3.jpg'] fig = plt.figure(2) ff = 2 fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False) enrollment_root = '../data/Logo/logo_usecase_data/RedHat/RedHat_enrollment' for (cnt, img_basename) in enumerate(image_set): imgname = os.path.join(enrollment_root, img_basename) img = mpimg.imread(imgname) plt.subplot(1, 3, (cnt + 1)) plt.imshow(img) plt.axis('off')
def display_lacoste_examples(): image_set = ['5cadb37d4b967f67d3047964.jpg', 'lacoste-logo-sweatshirt-white-p12654-72879_image.jpg', '12686073-1-white.jpg'] fig = plt.figure(2) ff = 2 fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False) enrollment_root = '../data/Logo/logo_usecase_data/lacoste/lacoste_enrollment/images' for (cnt, img_basename) in enumerate(image_set): imgname = os.path.join(enrollment_root, img_basename) img = mpimg.imread(imgname) plt.subplot(1, 3, (cnt + 1)) plt.imshow(img) plt.axis('off')
def display_few_shot_examples(): data_root = '/dccstor/jsdata1/dev/RepMet/notebooks/food_usecase_data' image_set = ['PRDS990000000000000025_0_192_501_589_885_top.jpg', 'PRDS990000000000000024_0_119_137_523_447_top.jpg', 'PRDS990000000000000023_0_118_208_470_612_top.jpg', 'PRDS990000000000000021_0_571_234_923_608_top.jpg'] nrows = 1 ncols = 4 fig = plt.figure(2) ff = 2 fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False) for (cnt, img_basename) in enumerate(image_set): imgname = os.path.join(data_root, img_basename) img = mpimg.imread(imgname) plt.subplot(nrows, ncols, (cnt + 1)) plt.imshow(img) plt.axis('off')
def print_typical_tray_multiview(): imgname_left = os.path.join(data_root, '1007_10000000006530_left.jpg') imgname_top = os.path.join(data_root, '1007_10000000006530_top.jpg') imgname_right = os.path.join(data_root, '1007_10000000006530_right.jpg') img_left = mpimg.imread(imgname_left) img_top = mpimg.imread(imgname_top) img_right = mpimg.imread(imgname_right) fig = plt.figure(1) ff = 3 fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False) plt.subplot(131) plt.imshow(img_left) plt.axis('off') plt.subplot(132) plt.imshow(img_top) plt.axis('off') plt.subplot(133) plt.imshow(img_right) plt.axis('off')
def disp_dets(img, dets, save_file_path): import matplotlib.pyplot as plt fig = plt.figure(1) ff = 2 fig.set_size_inches(((ff * 8.5), (ff * 11)), forward=False) plt.axis('off') img_d = cv2.cvtColor(copy.deepcopy(img), cv2.COLOR_BGR2RGB) for det in dets: bbox = det[0] scores = det[1] labels = det[3] pn = det[4] left = int(bbox[0]) top = int(bbox[1]) right = int(bbox[2]) bottom = int(bbox[3]) if (pn == 'p'): cv2.rectangle(img_d, (left, top), (right, bottom), (255, 0, 0), 4) elif (pn == 'n'): cv2.rectangle(img_d, (left, top), (right, bottom), (0, 255, 0), 4) elif (pn == 'c'): cv2.rectangle(img_d, (left, top), (right, bottom), (0, 0, 255), 4) elif (pn == 'a'): cv2.rectangle(img_d, (left, top), (right, bottom), (0, 255, 255), 4) for det in dets: bbox = det[0] scores = det[1] labels = det[3] pn = det[4] left = int(bbox[0]) top = int(bbox[1]) text = '{0} - {1:.3f}'.format(labels[0], scores[0]) string_len = (len(text) * 13) bk_w = 17 FontScale = 0.6 cv2.rectangle(img_d, ((left - 2), (top - bk_w)), ((left + string_len), (top + bk_w)), (255, 255, 255), cv2.FILLED) cv2.putText(img_d, text, (left, top), cv2.FONT_HERSHEY_TRIPLEX, FontScale, color=(0, 0, 0), thickness=1) if (len(det) == 6): text = det[5][0] string_len = (len(text) * 13) cv2.rectangle(img_d, ((left - 2), ((top - bk_w) + (2 * bk_w))), ((left + string_len), ((top + bk_w) + (2 * bk_w))), (255, 255, 255), cv2.FILLED) cv2.putText(img_d, text, (left, (top + (2 * bk_w))), cv2.FONT_HERSHEY_TRIPLEX, FontScale, color=(0, 0, 0), thickness=1) plt.imshow(img_d) fig.savefig(save_file_path)
def test_on_query_image(fs_serv, test_img_fname, det_engines): prd2ename_fname = '/dccstor/jsdata1/dev/RepMet/data/JES_pilot/all_GT.csv_converted_Feb24_prd2ename.csv' prd2ename_data = np.load(prd2ename_fname) prd2ename = prd2ename_data['prd2ename'] img = cv2.imread(test_img_fname, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)) disp_folder = '../notebooks/food_usecase_data' score_thresh = 0.1 dets_n = fs_serv.detect_on_image(img, score_thresh=score_thresh, num_candidates=3, det_engines=det_engines) for det in dets_n: class_names = [] for prd in det[3]: if (prd in prd2ename.keys()): class_names += [prd2ename[prd]] else: class_names += [prd] det += [class_names] test_img_basename = os.path.basename(test_img_fname) img_file_path = os.path.join(disp_folder, test_img_basename.replace('.jpg', '_disp_n.jpg')) det_file_path = os.path.join(disp_folder, test_img_basename.replace('.jpg', '_dets_n.txt')) disp_dets(img, dets_n, img_file_path)
def get_box_proposal(fs_serv, img_path): from show_boxes import show_detsB_boxes output_folder = '../notebooks/food_usecase_data' q_dets_p = fs_serv.get_box_proposal(img_path) image_basename = os.path.basename(img_path) save_file_path = os.path.join(output_folder, 'box_prop_{0}'.format(image_basename)) img = cv2.cvtColor(cv2.imread(img_path, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)), cv2.COLOR_BGR2RGB) show_detsB_boxes(img, q_dets_p, save_file_path=save_file_path)
def display_few_shot_examples(): data_root = '/opt/DNN/dataset/retail/retail_test' image_set = ['0de3ec888566edc2.jpg', '2e90529bcb43f44c.jpg', '15a8f82801fe4911.jpg', '34caf4cf9ae0ae92.jpg'] nrows = 1 ncols = np.ceil((len(image_set) / nrows)) print('nrows={0},ncols={1}'.format(nrows, ncols)) fig = plt.figure(2) ff = 2 fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False) for (cnt, img_basename) in enumerate(image_set): imgname = os.path.join(data_root, img_basename) img = mpimg.imread(imgname) plt.subplot(nrows, ncols, (cnt + 1)) plt.imshow(img) plt.axis('off')
def print_typical_tray_multiview(): data_root = '/opt/DNN/dataset/retail/retail_test' imgname_left = os.path.join(data_root, '0de3ec888566edc2.jpg') imgname_top = os.path.join(data_root, '2e90529bcb43f44c.jpg') imgname_right = os.path.join(data_root, '15a8f82801fe4911.jpg') img_left = mpimg.imread(imgname_left) img_top = mpimg.imread(imgname_top) img_right = mpimg.imread(imgname_right) fig = plt.figure(1) ff = 3 fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False) plt.subplot(131) plt.imshow(img_left) plt.axis('off') plt.subplot(132) plt.imshow(img_top) plt.axis('off') plt.subplot(133) plt.imshow(img_right) plt.axis('off')
def oid2coco(): img_dir = '/opt/DNN/dataset/openimage/openimage_train' boxes_csv = '/opt/DNN/dataset/openimage/retail.csv' json_file = '/opt/DNN/dataset/openimage/annotations/instances_openimage_train.json' classes_csv = '/opt/DNN/linkdata/eval_dataset/open_images/metadata/class-descriptions-boxable.csv' with open(classes_csv, 'r') as classes_file: classes = csv.reader(classes_file) class_dict = dict(((rows[0], rows[1]) for rows in classes)) classes_keys = class_dict.keys() classes_values = class_dict.values() (images, anns, categories) = ([], [], []) img_paths = [x for x in glob.glob((img_dir + '/*.jpg'))] i = 1 for img_path in sorted(img_paths): img = Image.open(img_path) (width, height) = img.size (_, img_name) = os.path.split(img_path) dic = {'file_name': img_name, 'id': i, 'height': height, 'width': width} images.append(dic) i += 1 ann_index = 1 i = 0 with open(boxes_csv, 'r') as boxes: lines = csv.reader(boxes) file_name_last_line = '' for line in lines: if (line[0] == 'ImageID'): continue file_name = (line[0] + '.jpg') full_image_path = os.path.join(img_dir, file_name) if (os.path.exists(full_image_path) is not True): continue if (file_name != file_name_last_line): i += 1 img = Image.open(full_image_path) (width, height) = img.size xmin = float(line[4]) xmax = float(line[5]) ymin = float(line[6]) ymax = float(line[7]) area = int((((xmax - xmin) * width) * ((ymax - ymin) * height))) poly = [] bbox = [int((xmin * width)), int((ymin * height)), int(((xmax - xmin) * width)), int(((ymax - ymin) * height))] category = line[2] cat_id = classes_keys.index(category) dic2 = {'segmentation': poly, 'area': area, 'iscrowd': 0, 'image_id': i, 'bbox': bbox, 'category_id': cat_id, 'id': ann_index, 'ignore': 0} anns.append(dic2) file_name_last_line = file_name ann_index += 1 for cate in classes_values: cat = {'supercategory': 'none', 'id': classes_values.index(cate), 'name': cate} categories.append(cat) data = {'images': images, 'type': 'instances', 'annotations': anns, 'categories': categories} with open(json_file, 'w') as outfile: json.dump(data, outfile) with open((os.path.split(json_file)[0] + '/train_cats_split.txt'), 'w') as fid: fid.writelines('\n'.join(classes_values))
def show_detsB_boxes(im, dets_B, scale=1.0, save_file_path='temp.png'): fig = plt.figure(1) ff = 1.0 fig.set_size_inches(((ff * 8.5), (ff * 11)), forward=False) plt.cla() plt.axis('off') plt.imshow(im) for det in dets_B: det_row = det[0] cat_name = det[2] bbox = (det_row[:4] * scale) color = (rand(), rand(), rand()) rect = plt.Rectangle((bbox[0], bbox[1]), (bbox[2] - bbox[0]), (bbox[3] - bbox[1]), fill=False, edgecolor=color, linewidth=2.5) plt.gca().add_patch(rect) score = det_row[(- 1)] plt.gca().text(bbox[0], bbox[1], '{:s} {:.3f}'.format(cat_name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=16, color='white')
def display_few_shot_test(benchmark, query_image): import shutil from FSD_engine import FSD_RepMet as DetectionEngine from config.bench_config import bcfg, update_bench_config import cv2 update_bench_config('../experiments/bench_configs/openimage_3_5_10_1.yaml') (q_dets_B, q_dets_multi_B) = benchmark.det_eng.detect_on_image(query_image, num_candidates=5, cand_ver=1) img = cv2.cvtColor(cv2.imread(query_image, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)), cv2.COLOR_BGR2RGB) prob = [item for item in q_dets_B if (item[0][4] > 0.2)] save_file_path = '/opt/DNN/dataset/retail/openimage_3_5_10_1/temp.jpg' show_detsB_boxes(img, prob, save_file_path=save_file_path)
class Data(): def __init__(self, args): kwargs = {} if (not args.cpu): kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = True else: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = False self.loader_train = None if (not args.test_only): module_train = import_module(('data.' + args.data_train.lower())) trainset = getattr(module_train, args.data_train)(args) self.loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs) if (args.data_test in ['Set5', 'Set14', 'B100', 'Urban100', 'BSDS300', 'DIV2K']): if (not args.benchmark_noise): module_test = import_module('data.benchmark') testset = getattr(module_test, 'Benchmark')(args, train=False) else: module_test = import_module('data.benchmark_noise') testset = getattr(module_test, 'BenchmarkNoise')(args, train=False) else: module_test = import_module(('data.' + args.data_test.lower())) testset = getattr(module_test, args.data_test)(args, train=False) self.loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, **kwargs)
class Benchmark(srdata.SRData): def __init__(self, args, train=True): super(Benchmark, self).__init__(args, train, benchmark=True) def _scan(self): list_hr = [] list_lr = [[] for _ in self.scale] for entry in os.scandir(self.dir_hr): filename = os.path.splitext(entry.name)[0] list_hr.append(os.path.join(self.dir_hr, (filename + self.ext))) for (si, s) in enumerate(self.scale): list_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}x{}{}'.format(s, filename, s, self.ext))) list_hr.sort() for l in list_lr: l.sort() return (list_hr, list_lr) def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, self.args.data_test) self.dir_hr = os.path.join(self.apath, 'HR') self.dir_lr = os.path.join(self.apath, 'LR') self.ext = '.png'
class Demo(data.Dataset): def __init__(self, args, train=False): self.args = args self.name = 'Demo' self.scale = args.scale self.idx_scale = 0 self.train = False self.benchmark = False self.filelist = [] for f in os.listdir(args.dir_demo): if ((f.find('.png') >= 0) or (f.find('.jp') >= 0)): self.filelist.append(os.path.join(args.dir_demo, f)) self.filelist.sort() def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[(- 1)] (filename, _) = os.path.splitext(filename) lr = misc.imread(self.filelist[idx]) lr = common.set_channel([lr], self.args.n_colors)[0] return (common.np2Tensor([lr], self.args.rgb_range)[0], (- 1), filename) def __len__(self): return len(self.filelist) def set_scale(self, idx_scale): self.idx_scale = idx_scale
class DIV2K(srdata.SRData): def __init__(self, args, train=True): super(DIV2K, self).__init__(args, train) self.repeat = (args.test_every // (args.n_train // args.batch_size)) def _scan(self): list_hr = [] list_lr = [[] for _ in self.scale] if self.train: idx_begin = 0 idx_end = self.args.n_train else: idx_begin = self.args.n_train idx_end = (self.args.offset_val + self.args.n_val) for i in range((idx_begin + 1), (idx_end + 1)): filename = '{:0>4}'.format(i) list_hr.append(os.path.join(self.dir_hr, (filename + self.ext))) for (si, s) in enumerate(self.scale): list_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}x{}{}'.format(s, filename, s, self.ext))) return (list_hr, list_lr) def _set_filesystem(self, dir_data): self.apath = ((dir_data + '/DIV2K') + '/train') self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR') self.dir_lr = os.path.join(self.apath, ('DIV2K_train_LR_' + self.args.manner_of_downsampling)) self.ext = '.png' def _name_hrbin(self): return os.path.join(self.apath, 'bin', '{}_bin_HR.npy'.format(self.split)) def _name_lrbin(self, scale): return os.path.join(self.apath, 'bin', '{}_bin_LR_X{}.npy'.format(self.split, scale)) def __len__(self): if self.train: return (len(self.images_hr) * self.repeat) else: return len(self.images_hr) def _get_index(self, idx): if self.train: return (idx % len(self.images_hr)) else: return idx
def _ms_loop(dataset, index_queue, data_queue, collate_fn, scale, seed, init_fn, worker_id): global _use_shared_memory _use_shared_memory = True _set_worker_signal_handlers() torch.set_num_threads(1) torch.manual_seed(seed) while True: r = index_queue.get() if (r is None): break (idx, batch_indices) = r try: idx_scale = 0 if ((len(scale) > 1) and dataset.train): idx_scale = random.randrange(0, len(scale)) dataset.set_scale(idx_scale) samples = collate_fn([dataset[i] for i in batch_indices]) samples.append(idx_scale) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples))
class _MSDataLoaderIter(_DataLoaderIter): def __init__(self, loader): self.dataset = loader.dataset self.scale = loader.scale self.collate_fn = loader.collate_fn self.batch_sampler = loader.batch_sampler self.num_workers = loader.num_workers self.pin_memory = (loader.pin_memory and torch.cuda.is_available()) self.timeout = loader.timeout self.done_event = threading.Event() self.sample_iter = iter(self.batch_sampler) if (self.num_workers > 0): self.worker_init_fn = loader.worker_init_fn self.index_queues = [multiprocessing.Queue() for _ in range(self.num_workers)] self.worker_queue_idx = 0 self.worker_result_queue = multiprocessing.SimpleQueue() self.batches_outstanding = 0 self.worker_pids_set = False self.shutdown = False self.send_idx = 0 self.rcvd_idx = 0 self.reorder_dict = {} base_seed = torch.LongTensor(1).random_()[0] self.workers = [multiprocessing.Process(target=_ms_loop, args=(self.dataset, self.index_queues[i], self.worker_result_queue, self.collate_fn, self.scale, (base_seed + i), self.worker_init_fn, i)) for i in range(self.num_workers)] if (self.pin_memory or (self.timeout > 0)): self.data_queue = queue.Queue() if self.pin_memory: maybe_device_id = torch.cuda.current_device() else: maybe_device_id = None self.worker_manager_thread = threading.Thread(target=_worker_manager_loop, args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, maybe_device_id)) self.worker_manager_thread.daemon = True self.worker_manager_thread.start() else: self.data_queue = self.worker_result_queue for w in self.workers: w.daemon = True w.start() _update_worker_pids(id(self), tuple((w.pid for w in self.workers))) _set_SIGCHLD_handler() self.worker_pids_set = True for _ in range((2 * self.num_workers)): self._put_indices()
class MSDataLoader(DataLoader): def __init__(self, args, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, collate_fn=default_collate, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): super(MSDataLoader, self).__init__(dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=args.n_threads, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn) self.scale = args.scale def __iter__(self): return _MSDataLoaderIter(self)
class Adversarial(nn.Module): def __init__(self, args, gan_type): super(Adversarial, self).__init__() self.gan_type = gan_type self.gan_k = args.gan_k self.discriminator = discriminator.Discriminator(args, gan_type) if (gan_type != 'WGAN_GP'): self.optimizer = utility.make_optimizer(args, self.discriminator) else: self.optimizer = optim.Adam(self.discriminator.parameters(), betas=(0, 0.9), eps=1e-08, lr=1e-05) self.scheduler = utility.make_scheduler(args, self.optimizer) def forward(self, fake, real): fake_detach = fake.detach() self.loss = 0 for _ in range(self.gan_k): self.optimizer.zero_grad() d_fake = self.discriminator(fake_detach) d_real = self.discriminator(real) if (self.gan_type == 'GAN'): label_fake = torch.zeros_like(d_fake) label_real = torch.ones_like(d_real) loss_d = (F.binary_cross_entropy_with_logits(d_fake, label_fake) + F.binary_cross_entropy_with_logits(d_real, label_real)) elif (self.gan_type.find('WGAN') >= 0): loss_d = (d_fake - d_real).mean() if (self.gan_type.find('GP') >= 0): epsilon = torch.rand_like(fake).view((- 1), 1, 1, 1) hat = (fake_detach.mul((1 - epsilon)) + real.mul(epsilon)) hat.requires_grad = True d_hat = self.discriminator(hat) gradients = torch.autograd.grad(outputs=d_hat.sum(), inputs=hat, retain_graph=True, create_graph=True, only_inputs=True)[0] gradients = gradients.view(gradients.size(0), (- 1)) gradient_norm = gradients.norm(2, dim=1) gradient_penalty = (10 * gradient_norm.sub(1).pow(2).mean()) loss_d += gradient_penalty self.loss += loss_d.item() loss_d.backward() self.optimizer.step() if (self.gan_type == 'WGAN'): for p in self.discriminator.parameters(): p.data.clamp_((- 1), 1) self.loss /= self.gan_k d_fake_for_g = self.discriminator(fake) if (self.gan_type == 'GAN'): loss_g = F.binary_cross_entropy_with_logits(d_fake_for_g, label_real) elif (self.gan_type.find('WGAN') >= 0): loss_g = (- d_fake_for_g.mean()) return loss_g def state_dict(self, *args, **kwargs): state_discriminator = self.discriminator.state_dict(*args, **kwargs) state_optimizer = self.optimizer.state_dict() return dict(**state_discriminator, **state_optimizer)
class Discriminator(nn.Module): def __init__(self, args, gan_type='GAN'): super(Discriminator, self).__init__() in_channels = 3 out_channels = 64 depth = 7 bn = True act = nn.LeakyReLU(negative_slope=0.2, inplace=True) m_features = [common.BasicBlock(args.n_colors, out_channels, 3, bn=bn, act=act)] for i in range(depth): in_channels = out_channels if ((i % 2) == 1): stride = 1 out_channels *= 2 else: stride = 2 m_features.append(common.BasicBlock(in_channels, out_channels, 3, stride=stride, bn=bn, act=act)) self.features = nn.Sequential(*m_features) patch_size = (args.patch_size // (2 ** ((depth + 1) // 2))) m_classifier = [nn.Linear((out_channels * (patch_size ** 2)), 1024), act, nn.Linear(1024, 1)] self.classifier = nn.Sequential(*m_classifier) def forward(self, x): features = self.features(x) output = self.classifier(features.view(features.size(0), (- 1))) return output