code
stringlengths
17
6.64M
def Convolution(data, num_filter, kernel, stride=None, dilate=None, pad=None, num_group=1, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None, **kwargs): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('conv') if (name is None) else name) stride = (((1,) * len(kernel)) if (stride is None) else stride) dilate = (((1,) * len(kernel)) if (dilate is None) else dilate) if isinstance(pad, str): input_size = kwargs.get('input_size', None) if (input_size is None): raise ValueError('`input_size` is needed for padding') del kwargs['input_size'] if isinstance(input_size, int): in_size_h = in_size_w = input_size else: (in_size_h, in_size_w) = input_size (ph0, ph1) = padding_helper(in_size_h, kernel[0], stride[0], pad) (pw0, pw1) = padding_helper(in_size_w, kernel[1], stride[1], pad) data = mx.sym.pad(data, mode='constant', pad_width=(0, 0, 0, 0, ph0, ph1, pw0, pw1)) pad = ((0,) * len(kernel)) else: pad = (((0,) * len(kernel)) if (pad is None) else pad) assert (len(kwargs) == 0), sorted(kwargs) W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight) if no_bias: x = mx.sym.Convolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W) else: B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias) x = mx.sym.Convolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W, bias=B) return x
def Deconvolution(data, num_filter, kernel, stride=None, dilate=None, pad=None, adj=None, target_shape=None, num_group=1, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('deconv') if (name is None) else name) stride = (((1,) * len(kernel)) if (stride is None) else stride) dilate = (((1,) * len(kernel)) if (dilate is None) else dilate) pad = (((0,) * len(kernel)) if (pad is None) else pad) adj = (((0,) * len(kernel)) if (adj is None) else adj) target_shape = (tuple([]) if (target_shape is None) else target_shape) W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight) if no_bias: x = mx.sym.Deconvolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, adj=adj, target_shape=target_shape, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W) else: B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias) x = mx.sym.Deconvolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, adj=adj, target_shape=target_shape, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W, bias=B) return x
def FullyConnected(data, num_hidden, flatten=True, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('fc') if (name is None) else name) W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight) if no_bias: x = mx.sym.FullyConnected(data, num_hidden=num_hidden, flatten=flatten, no_bias=no_bias, weight=W, name=(name if (reuse is None) else None)) else: B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias) x = mx.sym.FullyConnected(data, num_hidden=num_hidden, flatten=flatten, no_bias=no_bias, weight=W, bias=B, name=(name if (reuse is None) else None)) return x
def Relu(data, name=None): name = (GetLayerName.get('relu') if (name is None) else name) x = mx.sym.Activation(data, act_type='relu', name=name) return x
def LeakyRelu(data, slope=0.25, name=None): name = (GetLayerName.get('leakyRelu') if (name is None) else name) x = mx.sym.LeakyReLU(data, slope=slope, act_type='leaky', name=name) return x
def Tanh(data, name=None): name = (GetLayerName.get('tanh') if (name is None) else name) x = mx.sym.tanh(data, name=name) return x
def Swish(data, name=None): name = (GetLayerName.get('swish') if (name is None) else name) x = (data * mx.sym.sigmoid(data)) return x
def Pooling(data, kernel, stride=None, pad=None, pool_type='max', global_pool=False, name=None): name = (GetLayerName.get('pool') if (name is None) else name) stride = (kernel if (stride is None) else stride) pad = (((0,) * len(kernel)) if (pad is None) else pad) x = mx.sym.Pooling(data, kernel=kernel, stride=stride, pad=pad, pool_type=pool_type, global_pool=global_pool, name=name) return x
def Dropout(data, p, name=None): name = (GetLayerName.get('drop') if (name is None) else name) x = mx.sym.Dropout(data, p=p, name=name) return x
def BatchNorm(data, fix_gamma=False, momentum=0.9, eps=1e-05, use_global_stats=False, gamma=None, beta=None, moving_mean=None, moving_var=None, name=None, lr_mult=1, reuse=None): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('bn') if (name is None) else name) gamma = (get_variable((name + '_gamma'), lr_mult, reuse) if (gamma is None) else gamma) beta = (get_variable((name + '_beta'), lr_mult, reuse) if (beta is None) else beta) moving_mean = (get_variable((name + '_moving_mean'), 1, reuse) if (moving_mean is None) else moving_mean) moving_var = (get_variable((name + '_moving_var'), 1, reuse) if (moving_var is None) else moving_var) x = mx.sym.BatchNorm(data, fix_gamma=fix_gamma, momentum=momentum, eps=eps, use_global_stats=use_global_stats, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, name=(name if (reuse is None) else None)) return x
def InstanceNorm(data, eps=1e-05, gamma=None, beta=None, name=None, lr_mult=1, reuse=None): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('in') if (name is None) else name) gamma = (get_variable((name + '_gamma'), lr_mult, reuse) if (gamma is None) else gamma) beta = (get_variable((name + '_beta'), lr_mult, reuse) if (beta is None) else beta) x = mx.sym.InstanceNorm(data, eps=eps, gamma=gamma, beta=beta, name=(name if (reuse is None) else None)) return x
def Flatten(data, name=None): name = (GetLayerName.get('flatten') if (name is None) else name) x = mx.sym.flatten(data, name=name) return x
def ConvRelu(*args, **kwargs): x = Conv(*args, **kwargs) x = Relu(x, (x.name + '_relu')) return x
def BNRelu(*args, **kwargs): x = BN(*args, **kwargs) x = Relu(x, (x.name + '_relu')) return x
def FCRelu(*args, **kwargs): x = FC(*args, **kwargs) x = Relu(x, (x.name + '_relu')) return x
def ConvBNRelu(*args, **kwargs): x = Conv(*args, **kwargs) x = BN(x, name=(x.name + '_bn'), lr_mult=kwargs.get('lr_mult', 1), reuse=kwargs.get('reuse', None)) x = Relu(x, (x.name + '_relu')) return x
def get_variable(name, lr_mult=1, reuse=None): if (reuse is None): return mx.sym.Variable(name, lr_mult=lr_mult) return reuse.get_internals()[name]
class GetLayerName(object): _name_count = {} @classmethod def get(cls, name_prefix): cnt = cls._name_count.get(name_prefix, 0) cls._name_count[name_prefix] = (cnt + 1) return (name_prefix + str(cnt))
def padding_helper(in_size, kernel_size, stride, pad_type='same'): pad_type = pad_type.lower() if (pad_type == 'same'): out_size = ((in_size // stride) + int(((in_size % stride) > 0))) pad_size = max(((((out_size - 1) * stride) + kernel_size) - in_size), 0) return ((pad_size // 2), (pad_size - (pad_size // 2))) else: raise ValueError(pad_type)
class OpConstant(mx.operator.CustomOp): def __init__(self, val): self.val = val def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], self.val) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass
@mx.operator.register('Constant') class OpConstantProp(mx.operator.CustomOpProp): def __init__(self, val_str, shape_str, type_str='float32'): super(OpConstantProp, self).__init__(need_top_grad=False) val = [float(x) for x in val_str.split(',')] shape = [int(x) for x in shape_str.split(',')] self.val = mx.nd.array(val, dtype=type_str).reshape(shape) def list_arguments(self): return [] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return (in_shape, [self.val.shape], []) def infer_type(self, in_type): return (in_type, [self.val.dtype], []) def create_operator(self, ctx, shapes, dtypes): return OpConstant(self.val.as_in_context(ctx))
def CustomConstantEncoder(value, dtype='float32'): if (not isinstance(value, np.ndarray)): if (not isinstance(value, (list, tuple))): value = [value] value = np.array(value, dtype=dtype) return (','.join([str(x) for x in value.ravel()]), ','.join([str(x) for x in value.shape]))
def Constant(value, dtype='float32'): assert isinstance(dtype, str), dtype (val, shape) = CustomConstantEncoder(value, dtype) return mx.sym.Custom(val_str=val, shape_str=shape, type_str=dtype, op_type='Constant')
class BilinearScale(mx.operator.CustomOp): def __init__(self, scale): self.scale = scale def forward(self, is_train, req, in_data, out_data, aux): x = in_data[0] (h, w) = x.shape[2:] new_h = (int(((h - 1) * self.scale)) + 1) new_w = (int(((w - 1) * self.scale)) + 1) x.attach_grad() with mx.autograd.record(): new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w) self.new_x = new_x self.x = x self.assign(out_data[0], req[0], new_x) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.new_x.backward(out_grad[0]) self.assign(in_grad[0], req[0], self.x.grad)
@mx.operator.register('BilinearScale') class BilinearScaleProp(mx.operator.CustomOpProp): def __init__(self, scale): super(BilinearScaleProp, self).__init__(need_top_grad=True) self.scale = float(scale) def infer_shape(self, in_shape): (n, c, h, w) = in_shape[0] new_h = (int(((h - 1) * self.scale)) + 1) new_w = (int(((w - 1) * self.scale)) + 1) return (in_shape, [(n, c, new_h, new_w)], []) def create_operator(self, ctx, shapes, dtypes): return BilinearScale(self.scale)
class BilinearScaleLike(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): (x, x_ref) = in_data (new_h, new_w) = x_ref.shape[2:] x.attach_grad() with mx.autograd.record(): new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w) self.new_x = new_x self.x = x self.assign(out_data[0], req[0], new_x) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.new_x.backward(out_grad[0]) in_grad[1][:] = 0 self.assign(in_grad[0], req[0], self.x.grad)
@mx.operator.register('BilinearScaleLike') class BilinearScaleLikeProp(mx.operator.CustomOpProp): def __init__(self): super(BilinearScaleLikeProp, self).__init__(need_top_grad=True) def list_arguments(self): return ['d1', 'd2'] def infer_shape(self, in_shape): out_shape = list(in_shape[1]) out_shape[1] = in_shape[0][1] return (in_shape, [out_shape], []) def create_operator(self, ctx, shapes, dtypes): return BilinearScaleLike()
class SegmentLoss(mx.operator.CustomOp): def __init__(self, has_grad_scale, onehot_label, grad_scale): self.has_grad_scale = has_grad_scale self.onehot_label = onehot_label self.grad_scale = grad_scale def forward(self, is_train, req, in_data, out_data, aux): prediction = mx.nd.softmax(in_data[0], axis=1) self.assign(out_data[0], req[0], prediction) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): prediction = out_data[0] if (not self.onehot_label): label = mx.nd.one_hot(in_data[1], depth=prediction.shape[1]).transpose((0, 3, 1, 2)) else: label = in_data[1] if (prediction.shape[2] != label.shape[2]): label = mx.nd.contrib.BilinearResize2D(label, height=prediction.shape[2], width=prediction.shape[3]) label = (mx.nd.one_hot(mx.nd.argmax(label, axis=1), depth=prediction.shape[1]).transpose((0, 3, 1, 2)) * (mx.nd.max(label, axis=1, keepdims=True) > 0.5)) mask = label.sum(axis=1, keepdims=True) num_pixel = mx.nd.maximum((mask.sum() / mask.shape[0]), 1) grad = (((prediction - label) * mask) / num_pixel) if self.has_grad_scale: grad_scale = in_data[2].reshape((- 1), 1, 1, 1) grad = (grad * grad_scale) grad = (grad * self.grad_scale) in_grad[1][:] = 0 self.assign(in_grad[0], req[0], grad)
@mx.operator.register('SegmentLoss') class SegmentLossProp(mx.operator.CustomOpProp): def __init__(self, has_grad_scale=0, onehot_label=0, grad_scale=1): super(SegmentLossProp, self).__init__(need_top_grad=False) self.has_grad_scale = (int(has_grad_scale) > 0) self.onehot_label = (int(onehot_label) > 0) self.grad_scale = float(grad_scale) def list_arguments(self): if self.has_grad_scale: return ['data', 'label', 'scale'] else: return ['data', 'label'] def infer_shape(self, in_shape): return (in_shape, [in_shape[0]], []) def create_operator(self, ctx, shapes, dtypes): return SegmentLoss(self.has_grad_scale, self.onehot_label, self.grad_scale)
class MultiSigmoidLoss(mx.operator.CustomOp): def __init__(self, grad_scale): self.grad_scale = grad_scale def forward(self, is_train, req, in_data, out_data, aux): (logit, label) = in_data prediction = mx.nd.sigmoid(logit, axis=1) self.assign(out_data[0], req[0], prediction) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): prediction = out_data[0] label = in_data[1] grad = ((prediction - label) * self.grad_scale) in_grad[1][:] = 0 self.assign(in_grad[0], req[0], grad)
@mx.operator.register('MultiSigmoidLoss') class MultiSigmoidLossProp(mx.operator.CustomOpProp): def __init__(self, grad_scale=1): super(MultiSigmoidLossProp, self).__init__(need_top_grad=False) self.grad_scale = float(grad_scale) def list_arguments(self): return ['data', 'label'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return (in_shape, [in_shape[0]], []) def create_operator(self, ctx, shapes, dtypes): return MultiSigmoidLoss(self.grad_scale)
class MultiSoftmaxLoss(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): (logit, label) = in_data prediction = mx.nd.softmax(logit, axis=1) self.assign(out_data[0], req[0], prediction) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): prediction = out_data[0] label = in_data[1] grad = (prediction - label) in_grad[1][:] = 0 self.assign(in_grad[0], req[0], grad)
@mx.operator.register('MultiSoftmaxLoss') class MultiSoftmaxLossProp(mx.operator.CustomOpProp): def __init__(self): super(MultiSoftmaxLossProp, self).__init__(need_top_grad=False) def list_arguments(self): return ['data', 'label'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return (in_shape, [in_shape[0]], []) def create_operator(self, ctx, shapes, dtypes): return MultiSoftmaxLoss()
def vgg16_deeplab(x, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_2'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool1')) x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_2'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool2')) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool3')) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool4')) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool5')) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool5a'), pool_type='avg') return x
def vgg16_largefov(x, num_cls, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse) x = ConvRelu(x, 1024, (3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc6'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop6')) x = ConvRelu(x, 1024, (1, 1), name=(name + 'fc7'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop7')) x = Conv(x, num_cls, (1, 1), name=(name + 'fc8'), lr_mult=lr_mult, reuse=reuse) return x
def vgg16_aspp(x, num_cls, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x_backbone = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse) x_aspp = [] for d in (6, 12, 18, 24): x = ConvRelu(x_backbone, 1024, (3, 3), dilate=(d, d), pad=(d, d), name=(name + ('fc6_aspp%d' % d)), reuse=reuse) x = Drop(x, 0.5) x = ConvRelu(x, 1024, (1, 1), name=(name + ('fc7_aspp%d' % d)), reuse=reuse) x = Drop(x, 0.5) x = Conv(x, num_cls, (1, 1), name=(name + ('fc8_aspp%d' % d)), lr_mult=lr_mult, reuse=reuse) x_aspp.append(x) x = sum(x_aspp) return x
def vgg16_cam(x, num_cls, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse) x = ConvRelu(x, 1024, (3, 3), pad=(1, 1), name=(name + 'fc6'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop6')) x = ConvRelu(x, 1024, (1, 1), name=(name + 'fc7'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop7')) x = Conv(x, num_cls, (1, 1), name=(name + 'fc8'), lr_mult=lr_mult, reuse=reuse) return x
class _VOC_proto(object): @staticmethod def _get_palette(): def bitget(bit, idx): return ((bit & (1 << idx)) > 0) cmap = [] for i in range(256): (r, g, b) = (0, 0, 0) idx = i for j in range(8): r = (r | (bitget(idx, 0) << (7 - j))) g = (g | (bitget(idx, 1) << (7 - j))) b = (b | (bitget(idx, 2) << (7 - j))) idx = (idx >> 3) cmap.append((b, g, r)) return np.array(cmap).astype(np.uint8) def __init__(self): self.categories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] self.palette = self._get_palette() def name2index(self, name): return self.categories.index(name) def index2name(self, index): return self.categories[index] def get_annotation(self, filename, use_diff=False): tree = ET.parse(filename) root = tree.getroot() annotation = [] tmp_annotation = [] for obj in root.findall('object'): cat = obj.find('name').text non_diff = (1 - int(obj.find('difficult').text)) if (use_diff or non_diff): annotation.append(self.name2index(cat)) else: tmp_annotation.append(self.name2index(cat)) annotation = list(set(annotation)) if (len(annotation) == 0): annotation += list(set(tmp_annotation)) annotation.sort() return annotation
def imwrite(filename, image): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass cv2.imwrite(filename, image)
def npsave(filename, data): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass np.save(filename, data)
def pkldump(filename, data): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass with open(filename, 'wb') as f: pickle.dump(data, f)
def imhstack(images, height=None): images = as_list(images) images = list(map(image2C3, images)) if (height is None): height = np.array([img.shape[0] for img in images]).max() images = [resize_height(img, height) for img in images] if (len(images) == 1): return images[0] images = [[img, np.full((height, 3, 3), 255, np.uint8)] for img in images] images = np.hstack(sum(images, [])) return images
def imvstack(images, width=None): images = as_list(images) images = list(map(image2C3, images)) if (width is None): width = np.array([img.shape[1] for img in images]).max() images = [resize_width(img, width) for img in images] if (len(images) == 1): return images[0] images = [[img, np.full((3, width, 3), 255, np.uint8)] for img in images] images = np.vstack(sum(images, [])) return images
def as_list(data): if (not isinstance(data, (list, tuple))): return [data] return list(data)
def image2C3(image): if (image.ndim == 3): return image if (image.ndim == 2): return np.repeat(image[(..., np.newaxis)], 3, axis=2) raise ValueError('image.ndim = {}, invalid image.'.format(image.ndim))
def resize_height(image, height): if (image.shape[0] == height): return image (h, w) = image.shape[:2] width = ((height * w) // h) image = cv2.resize(image, (width, height)) return image
def resize_width(image, width): if (image.shape[1] == width): return image (h, w) = image.shape[:2] height = ((width * h) // w) image = cv2.resize(image, (width, height)) return image
def imtext(image, text, space=(3, 3), color=(0, 0, 0), thickness=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0): assert isinstance(text, str), type(text) size = cv2.getTextSize(text, fontFace, fontScale, thickness) image = cv2.putText(image, text, (space[0], (size[1] + space[1])), fontFace, fontScale, color, thickness) return image
def setGPU(gpus): len_gpus = len(gpus.split(',')) os.environ['CUDA_VISIBLE_DEVICES'] = gpus gpus = ','.join(map(str, range(len_gpus))) return gpus
def getTime(): return datetime.now().strftime('%m-%d %H:%M:%S')
class Timer(object): curr_record = None prev_record = None @classmethod def record(cls): cls.prev_record = cls.curr_record cls.curr_record = time.time() @classmethod def interval(cls): if (cls.prev_record is None): return 0 return (cls.curr_record - cls.prev_record)
def wrapColor(string, color): try: header = {'red': '\x1b[91m', 'green': '\x1b[92m', 'yellow': '\x1b[93m', 'blue': '\x1b[94m', 'purple': '\x1b[95m', 'cyan': '\x1b[96m', 'darkcyan': '\x1b[36m', 'bold': '\x1b[1m', 'underline': '\x1b[4m'}[color.lower()] except KeyError: raise ValueError('Unknown color: {}'.format(color)) return ((header + string) + '\x1b[0m')
def info(logger, msg, color=None): msg = ('[{}]'.format(getTime()) + msg) if (logger is not None): logger.info(msg) if (color is not None): msg = wrapColor(msg, color) print(msg)
def summaryArgs(logger, args, color=None): if isinstance(args, ModuleType): args = vars(args) keys = [key for key in args.keys() if (key[:2] != '__')] keys.sort() length = max([len(x) for x in keys]) msg = [(('{:<' + str(length)) + '}: {}').format(k, args[k]) for k in keys] msg = ('\n' + '\n'.join(msg)) info(logger, msg, color)
def loadParams(filename): data = mx.nd.load(filename) (arg_params, aux_params) = ({}, {}) for (name, value) in data.items(): if (name[:3] == 'arg'): arg_params[name[4:]] = value elif (name[:3] == 'aux'): aux_params[name[4:]] = value if (len(arg_params) == 0): arg_params = None if (len(aux_params) == 0): aux_params = None return (arg_params, aux_params)
class SaveParams(object): def __init__(self, model, snapshot, model_name, num_save=5): self.model = model self.snapshot = snapshot self.model_name = model_name self.num_save = num_save self.save_params = [] def save(self, n_epoch): self.save_params += [os.path.join(self.snapshot, '{}-{:04d}.params'.format(self.model_name, n_epoch)), os.path.join(self.snapshot, '{}-{:04d}.states'.format(self.model_name, n_epoch))] self.model.save_params(self.save_params[(- 2)]) self.model.save_optimizer_states(self.save_params[(- 1)]) if (len(self.save_params) > (2 * self.num_save)): call(['rm', self.save_params[0], self.save_params[1]]) self.save_params = self.save_params[2:] return self.save_params[(- 2):] def __call__(self, n_epoch): return self.save(n_epoch)
def getLogger(snapshot, model_name): if (not os.path.exists(snapshot)): os.makedirs(snapshot) logging.basicConfig(filename=os.path.join(snapshot, (model_name + '.log')), level=logging.INFO) logger = logging.getLogger() return logger
class LrScheduler(object): def __init__(self, method, init_lr, kwargs): self.method = method self.init_lr = init_lr if (method == 'step'): self.step_list = kwargs['step_list'] self.factor = kwargs['factor'] self.get = self._step elif (method == 'poly'): self.num_epoch = kwargs['num_epoch'] self.power = kwargs['power'] self.get = self._poly elif (method == 'ramp'): self.ramp_up = kwargs['ramp_up'] self.ramp_down = kwargs['ramp_down'] self.num_epoch = kwargs['num_epoch'] self.scale = kwargs['scale'] self.get = self._ramp else: raise ValueError(method) def _step(self, current_epoch): lr = self.init_lr step_list = [x for x in self.step_list] while ((len(step_list) > 0) and (current_epoch >= step_list[0])): lr *= self.factor del step_list[0] return lr def _poly(self, current_epoch): lr = (self.init_lr * ((1.0 - (float(current_epoch) / self.num_epoch)) ** self.power)) return lr def _ramp(self, current_epoch): if (current_epoch < self.ramp_up): decay = np.exp(((- ((1 - (float(current_epoch) / self.ramp_up)) ** 2)) * self.scale)) elif (current_epoch > (self.num_epoch - self.ramp_down)): decay = np.exp(((- ((float(((current_epoch + self.ramp_down) - self.num_epoch)) / self.ramp_down) ** 2)) * self.scale)) else: decay = 1.0 lr = (self.init_lr * decay) return lr
class GradBuffer(object): def __init__(self, model): self.model = model self.cache = None def write(self): if (self.cache is None): self.cache = [[(None if (g is None) else g.copyto(g.context)) for g in g_list] for g_list in self.model._exec_group.grad_arrays] else: for (gs_src, gs_dst) in zip(self.model._exec_group.grad_arrays, self.cache): for (g_src, g_dst) in zip(gs_src, gs_dst): if (g_src is None): continue g_src.copyto(g_dst) def read_add(self): assert (self.cache is not None) for (gs_src, gs_dst) in zip(self.model._exec_group.grad_arrays, self.cache): for (g_src, g_dst) in zip(gs_src, gs_dst): if (g_src is None): continue g_src += g_dst
def initNormal(mean, std, name, shape): if name.endswith('_weight'): return mx.nd.normal(mean, std, shape) if name.endswith('_bias'): return mx.nd.zeros(shape) if name.endswith('_gamma'): return mx.nd.ones(shape) if name.endswith('_beta'): return mx.nd.zeros(shape) if name.endswith('_moving_mean'): return mx.nd.zeros(shape) if name.endswith('_moving_var'): return mx.nd.ones(shape) raise ValueError('Unknown name type for `{}`'.format(name))
def checkParams(mod, arg_params, aux_params, auto_fix=True, initializer=mx.init.Normal(0.01), logger=None): arg_params = ({} if (arg_params is None) else arg_params) aux_params = ({} if (aux_params is None) else aux_params) arg_shapes = {name: array[0].shape for (name, array) in zip(mod._exec_group.param_names, mod._exec_group.param_arrays)} aux_shapes = {name: array[0].shape for (name, array) in zip(mod._exec_group.aux_names, mod._exec_group.aux_arrays)} (extra_arg_params, extra_aux_params) = ([], []) for name in arg_params.keys(): if (name not in arg_shapes): extra_arg_params.append(name) for name in aux_params.keys(): if (name not in aux_shapes): extra_aux_params.append(name) (miss_arg_params, miss_aux_params) = ([], []) for name in arg_shapes.keys(): if (name not in arg_params): miss_arg_params.append(name) for name in aux_shapes.keys(): if (name not in aux_params): miss_aux_params.append(name) (mismatch_arg_params, mismatch_aux_params) = ([], []) for name in arg_params.keys(): if ((name in arg_shapes) and (arg_shapes[name] != arg_params[name].shape)): mismatch_arg_params.append(name) for name in aux_params.keys(): if ((name in aux_shapes) and (aux_shapes[name] != aux_params[name].shape)): mismatch_aux_params.append(name) for name in extra_arg_params: info(logger, 'Find extra arg_params: {}: given {}'.format(name, arg_params[name].shape), 'red') for name in extra_aux_params: info(logger, 'Find extra aux_params: {}: given {}'.format(name, aux_params[name].shape), 'red') for name in miss_arg_params: info(logger, 'Find missing arg_params: {}: target {}'.format(name, arg_shapes[name]), 'red') for name in miss_aux_params: info(logger, 'Find missing aux_params: {}: target {}'.format(name, aux_shapes[name]), 'red') for name in mismatch_arg_params: info(logger, 'Find mismatch arg_params: {}: given {}, target {}'.format(name, arg_params[name].shape, arg_shapes[name]), 'red') for name in mismatch_aux_params: info(logger, 'Find mismatch aux_params: {}: given {}, target {}'.format(name, aux_params[name].shape, aux_shapes[name]), 'red') if (len((((((extra_arg_params + extra_aux_params) + miss_arg_params) + miss_aux_params) + mismatch_arg_params) + mismatch_aux_params)) == 0): return (arg_params, aux_params) if (not auto_fix): info(logger, 'Bad params not fixed.', 'red') return (arg_params, aux_params) for name in (extra_arg_params + mismatch_arg_params): del arg_params[name] for name in (extra_aux_params + mismatch_aux_params): del aux_params[name] attrs = mod._symbol.attr_dict() for name in (miss_arg_params + mismatch_arg_params): arg_params[name] = mx.nd.zeros(arg_shapes[name]) try: initializer(mx.init.InitDesc(name, attrs.get(name, None)), arg_params[name]) except ValueError: initializer(name, arg_params[name]) for name in (miss_aux_params + mismatch_aux_params): aux_params[name] = mx.nd.zeros(aux_shapes[name]) try: initializer(mx.init.InitDesc(name, attrs.get(name, None)), aux_params[name]) except ValueError: initializer(name, aux_params[name]) info(logger, 'Bad params auto fixed successfully.', 'red') return (arg_params, aux_params)
def compute_embeddings(dataset: str, architecture: str, seed: int, step: int, layer: int) -> np.ndarray: '\n Compute the representations of a layer specified by the arguments and save to a npy file\n\n :param dataset: Dataset to compute embeddings for\n :param architecture: Model weights to load\n :param seed: Random seed used in the model pretraining\n :param step: Checkpoint during pretraining to use\n :param layer: Layer of the model to load\n :return: embedding (i.e. representation) just computed\n ' assert (dataset in ['ptb_dev', 'mnli_matched', 'mnli_matched_100', 'mnli_mismatched', 'hans_evaluation', 'hans_evaluation_100']) if (dataset == 'ptb_dev'): datapath = PTB_PATH elif (dataset == 'mnli_matched'): datapath = MNLI_MATCHED_PATH elif (dataset == 'mnli_matched_100'): datapath = MNLI_MATCHED_100_PATH elif (dataset == 'mnli_mismatched'): datapath = MNLI_MISMATCHED_PATH elif (dataset == 'hans_evaluation'): datapath = HANS_PATH elif (dataset == 'hans_evaluation_100'): datapath = HANS_100_PATH else: datapath = None if (architecture == 'feather'): bertnumber = str(seed) if (seed < 10): bertnumber = ('0' + bertnumber) model_path = '{head}/feather/bert_{number}'.format(head=BERT_CHECKPOINT_PATH, number=bertnumber) output_path = get_embedding_folder(dataset, architecture, seed, step, layer) json_output = (output_path / pathlib.Path('rep.json')) npy_output = (output_path / pathlib.Path('rep.npy')) command_outline = 'python extract_features.py --input_file={data} --output_file={output} --vocab_file={bertbase}/vocab.txt --bert_config_file={bertbase}/bert_config.json --init_checkpoint={model}/model.ckpt-36815 --layers={layer} --max_seq_length=128 --batch_size=8' command = command_outline.format(data=datapath, output=str(json_output), bertbase=BERT_BASE_DIR, model=model_path, layer=layer) else: model_path = '{head}/{architecture}/pretrain_seed{seed}step{step}'.format(head=EMBEDDING_PATH, architecture=architecture, seed=seed, step=step) output_path = get_embedding_folder(dataset, architecture, seed, step, layer) json_output = (output_path / pathlib.Path('rep.json')) npy_output = (output_path / pathlib.Path('rep.npy')) command_outline = 'python extract_features.py --input_file={data} --output_file={output} --vocab_file={model}/vocab.txt --bert_config_file={model}/bert_config.json --init_checkpoint={model}/bert_model.ckpt --layers={layer} --max_seq_length=128 --batch_size=8' command = command_outline.format(data=datapath, output=str(json_output), model=model_path, layer=layer) os.system('echo {}'.format(command)) os.system('cd {}'.format(BERT_PATH)) os.system(command) representation = [] with open(json_output) as f: for line in f: data = json.loads(line) for token in data['features']: representation.append(token['layers'][0]['values']) representation = np.array(representation).T print('Saving representations at {}'.format(npy_output)) np.save(npy_output, representation) os.system('rm {}'.format(str(json_output))) return representation
def get_filepath(dataset, architecture, seed, step, layer, folder=False): '\n Get filepath for embedding of interest (in order to check whether it has already\n been computed\n ' if folder: return os.path.join(EMBEDDING_PATH, dataset, architecture, str(seed), str(step), str(layer)) else: return os.path.join(EMBEDDING_PATH, dataset, architecture, str(seed), str(step), str(layer), 'rep.npy')
def get_string_filepath(dataset, architecture, seed, step, layer): return '{head}/{dataset}/{architecture}/{seed}/{step}/{layer}'.format(head=EMBEDDING_PATH, dataset=dataset, architecture=architecture, seed=seed, step=step, layer=layer)
def get_embedding_folderpath(dataset: str, architecture: str, seed: int, step: int) -> pathlib.Path: '\n Return path of folder containing embedding arrays corresponding to:\n - layers of model specified by architecture, seed and step\n - inputs from dataset\n\n Args:\n dataset (str): name of the dataset on which to compute embedding, eg "tiny_imagenet"\n architecture (str): name of model architecture, eg "resnet18"\n seed (int): seed used to train model\n step (int): number of training steps to train model\n\n Returns:\n pathlib.Path: path to embedding folder\n ' path_suffix = f'embeddings/{dataset}/{architecture}/{seed}/{step}/' return (SCRATCH_PATH / pathlib.Path(path_suffix))
def get_checkpoint_filepath(architecture: str, seed: int, step: int) -> pathlib.Path: '\n Return path to model checkpoint specified by architecture, seed and step\n\n Args:\n architecture (str): name of model architecture, eg "resnet18"\n seed (int): seed used to train model\n step (int): number of training steps to train model\n\n Returns:\n pathlib.Path: path to model checkpoint\n ' path_suffix = f'checkpoints/{architecture}/seed_{seed}_step_{step}.pt' return (DATA_PATH / pathlib.Path(path_suffix))
def initialise_model(architecture: str) -> nn.Module: '\n Return initialised network of a given architecture\n Currently: only works for resnet18, resnet34, resnet50, resnet101, resnet152\n\n Args:\n architecture (str): name of model architecture, eg "resnet18"\n\n Returns:\n nn.Module: initialised network\n ' assert (architecture in ARCHITECTURES) assert (architecture != 'inceptionv1') if (architecture == 'resnet18'): blocked_model = models.resnet18(pretrained=True) if (architecture == 'resnet34'): blocked_model = models.resnet34(pretrained=True) if (architecture == 'resnet50'): blocked_model = models.resnet50(pretrained=True) if (architecture == 'resnet101'): blocked_model = models.resnet101(pretrained=True) if (architecture == 'resnet152'): blocked_model = models.resnet152(pretrained=True) return blocked_model
def initialise_dataset(dataset: str, sample_size: int, sample_seed: int, normalize=True): '\n Return Dataset object corresponding to a dataset name\n\n Args:\n dataset (str): name of dataset\n sample_size (int): number of inputs to subsample\n sample_seed (int): seed to use when subsampling inputs\n\n Returns:\n torch.utils.data.Dataset: Dataset object corresponding to the name\n ' dataset_folderpath = (DATA_PATH / pathlib.Path('datasets/')) if (dataset == 'tiny_imagenet'): ds = datasets.ImageFolder(root=(dataset_folderpath / pathlib.Path('tiny-imagenet-200/val/')), transform=transforms.ToTensor()) if (dataset == 'imagenet'): if normalize: normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) else: transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]) ds = datasets.ImageFolder(root=(dataset_folderpath / pathlib.Path('imagenet/val/')), transform=transform) if (sample_seed == None): torch.manual_seed(0) else: torch.manual_seed(sample_seed) random_indices = torch.randperm(len(ds))[:sample_size] ds = torch.utils.data.Subset(ds, indices=random_indices) return (ds, random_indices)
def get_embedding_folder(dataset, architecture, seed, step, layer): suffix = pathlib.Path(f'embeddings/{dataset}/{architecture}/{seed}/{step}/{layer}') return (resources_path / suffix)
def load_embedding(dataset: str, architecture: str, seed: int, step: int, layer: int) -> np.ndarray: folder_path = get_embedding_folder(dataset, architecture, seed, step, layer) if (not os.path.exists(folder_path)): print('Computing representations for model') os.makedirs(folder_path) rep = compute_embeddings(dataset, architecture, seed, step, layer) else: print('Representation already exists...loading...') rep = np.load((folder_path / pathlib.Path('rep.npy'))) return rep
def score_pair_to_csv(rep1_dict: dict, rep2_dict: dict, filename: str, metrics: list) -> None: '\n Compute metric distance between two representations and save it to a csv file\n\n Args:\n rep1_dict (dict): dictionary specifying configuration of representation 1, to load its representation from disk\n rep2_dict (dict): dictionary specifying configuration of representation 2, to load its representation from disk\n filename (str): output filename to save results to\n metrics (list, optional): list of metrics to apply, eg CCA and/or CKA and/or GLD (by default all)\n ' rep1 = load_embedding(rep1_dict['dataset'], rep1_dict['architecture'], rep1_dict['seed'], rep1_dict['step'], rep1_dict['layer']) rep2 = load_embedding(rep2_dict['dataset'], rep2_dict['architecture'], rep2_dict['seed'], rep2_dict['step'], rep2_dict['layer']) logging.info(f'representation 1 shape: {rep1.shape}') logging.info(f'representation 2 shape: {rep2.shape}') results = {'dataset1': rep1_dict['dataset'], 'architecture1': rep1_dict['architecture'], 'seed1': rep1_dict['seed'], 'step1': rep1_dict['step'], 'layer1': rep1_dict['layer'], 'dataset2': rep2_dict['dataset'], 'architecture2': rep2_dict['architecture'], 'seed2': rep2_dict['seed'], 'step2': rep2_dict['step'], 'layer2': rep2_dict['layer']} score_local_pair(rep1=rep1, rep2=rep2, metrics=metrics, filename=filename, metadata=results)
def score_local_pair(rep1: np.ndarray, rep2: np.ndarray, filename: str, metrics: list, metadata: dict={}) -> None: '\n Compute metric distances between two representations (in numpy array format) and\n save results to a csv file\n\n Args:\n rep1 (np.ndarray): representation 1 to compare\n rep2 (np.ndarray): representation 2 to compare\n filename (str): file name for output csv\n metrics (list, optional): list of metrics to apply (by default all)\n metadata (dict, optional): metadata for the representations to print to the csv (by default empty)\n ' rep1 = (rep1 - rep1.mean(axis=1, keepdims=True)) rep2 = (rep2 - rep2.mean(axis=1, keepdims=True)) rep1 = (rep1 / np.linalg.norm(rep1)) rep2 = (rep2 / np.linalg.norm(rep2)) results = metadata if (('PWCCA' in metrics) or ('mean_sq_cca_corr' in metrics) or ('mean_cca_corr' in metrics)): logging.info('Computing CCA decomposition...') (cca_u, cca_rho, cca_vh, transformed_rep1, transformed_rep2) = cca_decomp(rep1, rep2) if ('PWCCA' in metrics): logging.info('Computing PWCCA distance...') results['PWCCA'] = pwcca_dist(rep1, cca_rho, transformed_rep1) if ('mean_sq_cca_corr' in metrics): logging.info('Computing mean square CCA corelation...') results['mean_sq_cca_corr'] = mean_sq_cca_corr(cca_rho) if ('mean_cca_corr' in metrics): logging.info('Computing mean CCA corelation...') results['mean_cca_corr'] = mean_cca_corr(cca_rho) if ('CKA' in metrics): logging.info('Computing Linear CKA dist...') lin_cka_sim = lin_cka_dist(rep1, rep2) results['CKA'] = lin_cka_sim if ("CKA'" in metrics): logging.info("Computing Linear CKA' dist...") lin_cka_sim = lin_cka_prime_dist(rep1, rep2) results["CKA'"] = lin_cka_sim if ('Procrustes' in metrics): logging.info('Computing GLD dist...') results['Procrustes'] = procrustes(rep1, rep2) with open(filename, mode='a') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=results.keys()) if (csv_file.tell() == 0): writer.writeheader() writer.writerow(results)
def cca_decomp(A, B): 'Computes CCA vectors, correlations, and transformed matrices\n requires a < n and b < n\n Args:\n A: np.array of size a x n where a is the number of neurons and n is the dataset size\n B: np.array of size b x n where b is the number of neurons and n is the dataset size\n Returns:\n u: left singular vectors for the inner SVD problem\n s: canonical correlation coefficients\n vh: right singular vectors for the inner SVD problem\n transformed_a: canonical vectors for matrix A, a x n array\n transformed_b: canonical vectors for matrix B, b x n array\n ' assert (A.shape[0] < A.shape[1]) assert (B.shape[0] < B.shape[1]) (evals_a, evecs_a) = np.linalg.eigh((A @ A.T)) evals_a = ((evals_a + np.abs(evals_a)) / 2) inv_a = np.array([((1 / np.sqrt(x)) if (x > 0) else 0) for x in evals_a]) (evals_b, evecs_b) = np.linalg.eigh((B @ B.T)) evals_b = ((evals_b + np.abs(evals_b)) / 2) inv_b = np.array([((1 / np.sqrt(x)) if (x > 0) else 0) for x in evals_b]) cov_ab = (A @ B.T) temp = ((((evecs_a @ np.diag(inv_a)) @ evecs_a.T) @ cov_ab) @ ((evecs_b @ np.diag(inv_b)) @ evecs_b.T)) try: (u, s, vh) = np.linalg.svd(temp) except: (u, s, vh) = np.linalg.svd((temp * 100)) s = (s / 100) transformed_a = ((u.T @ ((evecs_a @ np.diag(inv_a)) @ evecs_a.T)) @ A).T transformed_b = ((vh @ ((evecs_b @ np.diag(inv_b)) @ evecs_b.T)) @ B).T return (u, s, vh, transformed_a, transformed_b)
def mean_sq_cca_corr(rho): 'Compute mean squared CCA correlation\n :param rho: canonical correlation coefficients returned by cca_decomp(A,B)\n ' return (np.sum((rho * rho)) / len(rho))
def mean_cca_corr(rho): 'Compute mean CCA correlation\n :param rho: canonical correlation coefficients returned by cca_decomp(A,B)\n ' return (np.sum(rho) / len(rho))
def pwcca_dist(A, rho, transformed_a): 'Computes projection weighted CCA distance between A and B given the correlation\n coefficients rho and the transformed matrices after running CCA\n :param A: np.array of size a x n where a is the number of neurons and n is the dataset size\n :param B: np.array of size b x n where b is the number of neurons and n is the dataset size\n :param rho: canonical correlation coefficients returned by cca_decomp(A,B)\n :param transformed_a: canonical vectors for A returned by cca_decomp(A,B)\n :param transformed_b: canonical vectors for B returned by cca_decomp(A,B)\n :return: PWCCA distance\n ' in_prod = (transformed_a.T @ A.T) weights = np.sum(np.abs(in_prod), axis=1) weights = (weights / np.sum(weights)) dim = min(len(weights), len(rho)) return (1 - np.dot(weights[:dim], rho[:dim]))
def lin_cka_dist(A, B): '\n Computes Linear CKA distance bewteen representations A and B\n ' similarity = (np.linalg.norm((B @ A.T), ord='fro') ** 2) normalization = (np.linalg.norm((A @ A.T), ord='fro') * np.linalg.norm((B @ B.T), ord='fro')) return (1 - (similarity / normalization))
def lin_cka_prime_dist(A, B): '\n Computes Linear CKA prime distance bewteen representations A and B\n The version here is suited to a, b >> n\n ' if (A.shape[0] > A.shape[1]): At_A = (A.T @ A) Bt_B = (B.T @ B) numerator = np.sum(((At_A - Bt_B) ** 2)) denominator = ((np.sum((A ** 2)) ** 2) + (np.sum((B ** 2)) ** 2)) return (numerator / denominator) else: similarity = (np.linalg.norm((B @ A.T), ord='fro') ** 2) denominator = ((np.sum((A ** 2)) ** 2) + (np.sum((B ** 2)) ** 2)) return (1 - ((2 * similarity) / denominator))
def procrustes(A, B): '\n Computes Procrustes distance bewteen representations A and B\n ' A_sq_frob = np.sum((A ** 2)) B_sq_frob = np.sum((B ** 2)) nuc = np.linalg.norm((A @ B.T), ord='nuc') return ((A_sq_frob + B_sq_frob) - (2 * nuc))
def get_acc_diff(row, scores_df, task_list): score_row1 = scores_df.iloc[row['seed1']] score_row2 = scores_df.iloc[row['seed2']] for task in task_list: acc1 = score_row1[task] acc2 = score_row2[task] row[f'{task}_diff'] = abs((acc1 - acc2)) return row
def rename_scores(scores_df): scores_df = scores_df.rename(columns={'MNLI dev acc.': 'mnli_dev_acc', 'Lexical (entailed)': 'lex_ent', 'Subseq (entailed)': 'sub_ent', 'Constituent (entailed)': 'const_ent', 'Lexical (nonent)': 'lex_nonent', 'Subseq (nonent)': 'sub_nonent', 'Constituent (nonent)': 'const_nonent', 'Overall accuracy': 'overall_accuracy'}) return scores_df
def get_full_df(scores_path, dists_path, full_df_path): scores_df = pd.read_csv(scores_path)[0:100] scores_df = rename_scores(scores_df) task_list = list(scores_df.columns[1:9]) print('got scores_df') dists_df = pd.read_csv(dists_path) print('got dists_df') print('getting full_df, will take a while') full_df = dists_df.apply((lambda row: get_acc_diff(row, scores_df, task_list)), axis=1) print('got full_df, saving:') full_df.to_csv(full_df_path) print('saved') return full_df
def feather_sub_df(df, task, ref_depth): seeds = list(df.seed1.unique()) accs = [scores_df.iloc[seed][task] for seed in seeds] acc_dict = dict(zip(seeds, accs)) best_seed = max(acc_dict, key=acc_dict.get) sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))] return sub_df
def feather_sub_df(df, task, ref_depth): seeds = list(df.seed1.unique()) accs = [scores_df.iloc[seed][task] for seed in seeds] acc_dict = dict(zip(seeds, accs)) best_seed = max(acc_dict, key=acc_dict.get) sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))] return sub_df
def get_probing_accuracy(data_dict, task, seed, depth): '\n average accuracy of model finetuned with finetuning seed seed on mnli\n when probing layer layer on task\n ' return np.mean(data_dict[task][seed][(depth + 1)][0][0])
def get_full_df(scores_path, dists_path, full_df_path): dists_df = pd.read_csv(dists_path) print('got dists_df') print('adding probing scores to get full_df') full_df = dists_df data_dict = pkl.load(open(scores_path, 'rb')) for task in task_list: task_diff_list = [] for (_, row) in dists_df.iterrows(): acc1 = get_probing_accuracy(data_dict, task, row['seed1'], row['layer1']) acc2 = get_probing_accuracy(data_dict, task, row['seed2'], row['layer2']) task_diff_list.append(np.abs((acc1 - acc2))) full_df[f'{task}_diff'] = np.array(task_diff_list) print('got full_df, saving:') full_df.to_csv(full_df_path) print('saved') return full_df
def best_probing_seed(task, ref_depth, list_ref_seeds): data_dict = pkl.load(open(scores_path, 'rb')) list_to_max = [np.mean(data_dict[task][seed][(ref_depth + 1)][0][0]) for seed in list_ref_seeds] (idx, _) = max(enumerate(list_to_max), key=(lambda x: x[1])) return list_ref_seeds[idx]
def layer_sub_df(df, ref_depth, ref_seed): sub_df = df.loc[(((df['seed1'] == ref_seed) & (df['layer1'] == ref_depth)) | ((df['seed2'] == ref_seed) & (df['layer2'] == ref_depth)))].reset_index() num_layers = 12 assert (len(sub_df) == (num_layers * 10)) return sub_df
def aggregate_rank_corrs(df, task, layer_depths, list_ref_seeds, METRICS, sub_df_fn): rho = {metric: [] for metric in METRICS} rho_p = {metric: [] for metric in METRICS} tau = {metric: [] for metric in METRICS} tau_p = {metric: [] for metric in METRICS} bad_fracs = {metric: [] for metric in METRICS} for ref_depth in layer_depths: ref_seed = best_probing_seed(task, ref_depth, list_ref_seeds) sub_df = sub_df_fn(df, ref_depth, ref_seed) for metric in METRICS: (rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac) = get_rank_corrs(sub_df, metric, task) rho[metric].append(rho_corr) rho_p[metric].append(rho_os_p) tau[metric].append(tau_corr) tau_p[metric].append(tau_os_p) bad_fracs[metric].append(bad_frac) return (rho, rho_p, tau, tau_p, bad_fracs)
def best_probing_seed(task, ref_depth, list_ref_seeds): data_dict = pkl.load(open(scores_path, 'rb')) list_to_max = [np.mean(data_dict[task][seed][(ref_depth + 1)][0][0]) for seed in list_ref_seeds] (idx, _) = max(enumerate(list_to_max), key=(lambda x: x[1])) return list_ref_seeds[idx]
def layer_sub_df(df, ref_depth, ref_seed): sub_df = df.loc[(((df['seed1'] == ref_seed) & (df['layer1'] == ref_depth)) | ((df['seed2'] == ref_seed) & (df['layer2'] == ref_depth)))].reset_index() num_layers = 12 assert (len(sub_df) == (num_layers * 10)) return sub_df
def aggregate_rank_corrs(df, task, layer_depths, list_ref_seeds, METRICS, sub_df_fn): rho = {metric: [] for metric in METRICS} rho_p = {metric: [] for metric in METRICS} tau = {metric: [] for metric in METRICS} tau_p = {metric: [] for metric in METRICS} bad_fracs = {metric: [] for metric in METRICS} for ref_depth in layer_depths: ref_seed = best_probing_seed(task, ref_depth, list_ref_seeds) sub_df = sub_df_fn(df, ref_depth, ref_seed) for metric in METRICS: (rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac) = get_rank_corrs(sub_df, metric, task) rho[metric].append(rho_corr) rho_p[metric].append(rho_os_p) tau[metric].append(tau_corr) tau_p[metric].append(tau_os_p) bad_fracs[metric].append(bad_frac) return (rho, rho_p, tau, tau_p, bad_fracs)
def get_acc(data_dict, task, seed, layer, dims, run='average'): if (run == 'average'): return np.mean(data_dict[task][seed][(layer + 1)][dims]) elif (run == 'std'): return np.std(data_dict[task][seed][(layer + 1)][dims]) else: return data_dict[task][seed][(layer + 1)][dims][run]
def get_acc_diff(data_dict, row): acc1 = get_acc(data_dict, task=probe_task, seed=row['seed1'], layer=row['layer1'], dims=0, run='average') acc2 = get_acc(data_dict, task=probe_task, seed=row['seed2'], layer=row['layer2'], dims=row['dims_deleted'], run='average') return np.abs((acc1 - acc2))
def get_full_df(scores_path, dists_path, full_df_path): dists_df = pd.read_csv(dists_path) print('got dists_df') full_df = pd.DataFrame(dists_df[(((dists_df['seed1'].isin(REF_SEEDS) & dists_df['seed2'].isin(REF_SEEDS)) & dists_df['layer1'].isin(LAYERS)) & dists_df['layer2'].isin(LAYERS))]) print('filtered full_df layers and seeds') print('adding probing scores to get full_df') data_dict = pkl.load(open(scores_path, 'rb')) f = (lambda row: get_acc_diff(data_dict, row)) full_df[f'{probe_task}_diff'] = full_df.apply(f, axis=1) print('got full_df, saving:') full_df.to_csv(full_df_path) print('saved') return full_df
def pca_sub_df(df, task, ref_depth): data_dict = pkl.load(open(scores_path, 'rb')) accs = [get_acc(data_dict, probe_task, seed, layer=ref_depth, dims=0, run='average') for seed in REF_SEEDS] acc_dict = dict(zip(REF_SEEDS, accs)) best_seed = max(acc_dict, key=acc_dict.get) sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))] return sub_df
def pca_sub_df(df, task, ref_depth): data_dict = pkl.load(open(scores_path, 'rb')) accs = [get_acc(data_dict, probe_task, seed, layer=ref_depth, dims=0, run='average') for seed in REF_SEEDS] acc_dict = dict(zip(REF_SEEDS, accs)) best_seed = max(acc_dict, key=acc_dict.get) sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))] return sub_df
def collect_scores(scores_path): (model2correctness_tensor, data_dict) = pkl.load(open(scores_path, 'rb')) guid_set = set() for datapoint in data_dict: guid_set.add(datapoint['guid'].split('-')[0]) acc_dict = {} for test_set in guid_set: test_set_idxes = [idx for (idx, d) in enumerate(data_dict) if (d['guid'].split('-')[0] == test_set)] acc_dict[test_set] = [] for pretraining_seed in range(1, 11): these_seed_accs = [np.mean(model2correctness_tensor[pretraining_seed][finetuning_seed][test_set_idxes]) for finetuning_seed in range(1, 11)] acc_dict[test_set].append(these_seed_accs) acc_dict[test_set] = np.array(acc_dict[test_set]) lex_nonent_idxes = [idx for (idx, d) in enumerate(data_dict) if (('HANS' in d['guid']) and (d['heuristic'] == 'lexical_overlap') and (d['label'] == 'non-entailment'))] acc_dict['lex_nonent'] = [] for pretraining_seed in range(1, 11): these_seed_accs = [np.mean(model2correctness_tensor[pretraining_seed][finetuning_seed][lex_nonent_idxes]) for finetuning_seed in range(1, 11)] acc_dict['lex_nonent'].append(these_seed_accs) acc_dict['lex_nonent'] = np.array(acc_dict['lex_nonent']) guid_set.add('lex_nonent') return (guid_set, acc_dict)
def get_accuracy(acc_dict, stress_test, pretraining_seed, finetuning_seed): return acc_dict[stress_test][pretraining_seed][finetuning_seed]
def get_acc_diff(acc_dict, stress_test, pre_seed1, pre_seed2, fine_seed1, fine_seed2): avg_acc1 = get_accuracy(acc_dict, stress_test, pre_seed1, fine_seed1) avg_acc2 = get_accuracy(acc_dict, stress_test, pre_seed2, fine_seed2) return np.abs((avg_acc2 - avg_acc1))